From 48f68714db3fa14b258d3a2f18a55b7162efb9b7 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 10 Nov 2025 16:33:36 -0800 Subject: [PATCH 01/94] sync from container_agent repo --- .../agentserver/agentframework/agent_framework.py | 4 ++-- ...ent_framework_output_non_streaming_converter.py | 2 +- .../agent_framework_output_streaming_converter.py | 1 + .../azure/ai/agentserver/core/constants.py | 3 +-- .../azure/ai/agentserver/core/logger.py | 8 +++++--- .../azure/ai/agentserver/core/server/base.py | 8 ++++---- .../core/server/common/agent_run_context.py | 6 +++--- .../common/id_generator/foundry_id_generator.py | 2 +- .../response_content_part_event_generator.py | 7 ++++--- .../response_event_generator.py | 14 +++++++------- .../response_output_text_event_generator.py | 4 ++-- 11 files changed, 31 insertions(+), 28 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 7177b522d2a9..34270cceadb9 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -14,7 +14,7 @@ from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.constants import Constants as AdapterConstants -from azure.ai.agentserver.core.logger import get_logger +from azure.ai.agentserver.core.logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger from azure.ai.agentserver.core.models import ( CreateResponse, Response as OpenAIResponse, @@ -77,7 +77,7 @@ def _resolve_stream_timeout(self, request_body: CreateResponse) -> float: def init_tracing(self): exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) - app_insights_conn_str = os.environ.get(AdapterConstants.APPLICATION_INSIGHTS_CONNECTION_STRING) + app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT) if project_endpoint: diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index 805a5eeb9dec..823846f3ca7e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -169,7 +169,7 @@ def _append_function_result_content(self, content: FunctionResultContent, sink: result = [raw] elif isinstance(raw, list): for item in raw: - result.append(self._coerce_result_text(item)) # type: ignore + result.append(self._coerce_result_text(item)) # type: ignore call_id = getattr(content, "call_id", None) or "" func_out_id = self._context.id_generator.generate_function_output_id() sink.append( diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index d9bc3199efb5..4e3d12d4563e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -545,6 +545,7 @@ def build_response(self, status: str) -> OpenAIResponse: "id": self._response_id, "status": status, "created_at": self._response_created_at, + "conversation": self._context.get_conversation_object(), } if status == "completed" and self._completed_output_items: response_data["output"] = self._completed_output_items diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py index a13f23aa261e..33fcb0139fea 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py @@ -3,7 +3,6 @@ # --------------------------------------------------------- class Constants: # well-known environment variables - APPLICATION_INSIGHTS_CONNECTION_STRING = "_AGENT_RUNTIME_APP_INSIGHTS_CONNECTION_STRING" AZURE_AI_PROJECT_ENDPOINT = "AZURE_AI_PROJECT_ENDPOINT" AGENT_ID = "AGENT_ID" AGENT_NAME = "AGENT_NAME" @@ -11,4 +10,4 @@ class Constants: OTEL_EXPORTER_ENDPOINT = "OTEL_EXPORTER_ENDPOINT" AGENT_LOG_LEVEL = "AGENT_LOG_LEVEL" AGENT_DEBUG_ERRORS = "AGENT_DEBUG_ERRORS" - ENABLE_APPLICATION_INSIGHTS_LOGGER = "ENABLE_APPLICATION_INSIGHTS_LOGGER" + ENABLE_APPLICATION_INSIGHTS_LOGGER = "AGENT_APP_INSIGHTS_ENABLED" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py index f062398c0d3b..319e02da7e98 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py @@ -28,6 +28,8 @@ request_context = contextvars.ContextVar("request_context", default=None) +APPINSIGHT_CONNSTR_ENV_NAME = "APPLICATIONINSIGHTS_CONNECTION_STRING" + def get_dimensions(): env_values = {name: value for name, value in vars(Constants).items() if not name.startswith("_")} @@ -58,9 +60,9 @@ def get_project_endpoint(): def get_application_insights_connstr(): try: - conn_str = os.environ.get(Constants.APPLICATION_INSIGHTS_CONNECTION_STRING) + conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) if not conn_str: - print("environment variable APPLICATION_INSIGHTS_CONNECTION_STRING not set.") + print(f"environment variable {APPINSIGHT_CONNSTR_ENV_NAME} not set.") project_endpoint = get_project_endpoint() if project_endpoint: # try to get the project connected application insights @@ -72,7 +74,7 @@ def get_application_insights_connstr(): if not conn_str: print(f"no connected application insights found for project:{project_endpoint}") else: - os.environ[Constants.APPLICATION_INSIGHTS_CONNECTION_STRING] = conn_str + os.environ[APPINSIGHT_CONNSTR_ENV_NAME] = conn_str return conn_str except Exception as e: print(f"failed to get application insights with error: {e}") diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 8915aadb172b..9463c4002e08 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -21,7 +21,7 @@ from starlette.types import ASGIApp from ..constants import Constants -from ..logger import get_logger, request_context +from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, request_context from ..models import ( Response as OpenAIResponse, ResponseStreamEvent, @@ -93,7 +93,7 @@ async def runs_endpoint(request): kind=trace.SpanKind.SERVER, ): try: - logger.info("Start processing CreateResponse request:") + logger.info("Start processing CreateResponse request.") context_carrier = {} TraceContextTextMapPropagator().inject(context_carrier) @@ -126,7 +126,7 @@ def gen(): yield "data: [DONE]\n\n" error_sent = True finally: - logger.info("End of processing CreateResponse request:") + logger.info("End of processing CreateResponse request.") otel_context.detach(token) if not error_sent: yield "data: [DONE]\n\n" @@ -261,7 +261,7 @@ def run(self, port: int = int(os.environ.get("DEFAULT_AD_PORT", 8088))) -> None: def init_tracing(self): exporter = os.environ.get(Constants.OTEL_EXPORTER_ENDPOINT) - app_insights_conn_str = os.environ.get(Constants.APPLICATION_INSIGHTS_CONNECTION_STRING) + app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) if exporter or app_insights_conn_str: from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 6fae56f0027d..2703f66f6ff2 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -46,7 +46,7 @@ def stream(self) -> bool: def get_agent_id_object(self) -> AgentId: agent = self.request.get("agent") if not agent: - return None # type: ignore + return None # type: ignore return AgentId( { "type": agent.type, @@ -57,7 +57,7 @@ def get_agent_id_object(self) -> AgentId: def get_conversation_object(self) -> ResponseConversation1: if not self._conversation_id: - return None # type: ignore + return None # type: ignore return ResponseConversation1(id=self._conversation_id) @@ -72,5 +72,5 @@ def _deserialize_create_response(payload: dict) -> CreateResponse: def _deserialize_agent_reference(payload: dict) -> AgentReference: if not payload: - return None # type: ignore + return None # type: ignore return AgentReference(**payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py index 910a7c481daa..b1c4e1ac55fd 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py @@ -88,7 +88,7 @@ def _new_id( infix = infix or "" prefix_part = f"{prefix}{delimiter}" if prefix else "" - return f"{prefix_part}{entropy}{infix}{pkey}" + return f"{prefix_part}{infix}{pkey}{entropy}" @staticmethod def _secure_entropy(string_length: int) -> str: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py index fe141887a2b2..4823de4411ae 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py @@ -63,7 +63,7 @@ def try_process_message( return is_processed, next_processor, events - def on_start( # mypy: ignore[override] + def on_start( # mypy: ignore[override] self, event, run_details, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if self.started: @@ -81,8 +81,9 @@ def on_start( # mypy: ignore[override] return True, [start_event] - def on_end(self, message, context, stream_state: StreamEventState - ) -> List[project_models.ResponseStreamEvent]: # mypy: ignore[override] + def on_end( + self, message, context, stream_state: StreamEventState + ) -> List[project_models.ResponseStreamEvent]: # mypy: ignore[override] aggregated_content = self.item_content_helper.create_item_content() done_event = project_models.ResponseContentPartDoneEvent( item_id=self.item_id, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py index ee19ca74f4bb..2bea925ef2ed 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py @@ -33,11 +33,11 @@ def __init__(self, logger, parent): self.parent = parent # parent generator def try_process_message( - self, - message: AnyMessage, # mypy: ignore[valid-type] - context: AgentRunContext, - stream_state: StreamEventState - ): # mypy: ignore[empty-body] + self, + message: AnyMessage, # mypy: ignore[valid-type] + context: AgentRunContext, + stream_state: StreamEventState, + ): # mypy: ignore[empty-body] """ Try to process the incoming message. @@ -63,8 +63,8 @@ def on_start(self) -> tuple[bool, List[project_models.ResponseStreamEvent]]: return False, [] def on_end( - self, message: AnyMessage, context: AgentRunContext, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + self, message: AnyMessage, context: AgentRunContext, stream_state: StreamEventState + ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: """ Generate the ending events for this layer. TODO: handle different end conditions, e.g. normal end, error end, etc. diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py index b6be81ec7cb2..c65eda157bbd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py @@ -74,7 +74,7 @@ def process( self.aggregated_content += item stream_state.sequence_number += 1 res.append(chunk_event) - return True, self, res # mypy: ignore[return-value] + return True, self, res # mypy: ignore[return-value] return False, self, [] def has_finish_reason(self, message) -> bool: @@ -92,7 +92,7 @@ def should_end(self, message) -> bool: return True return False - def on_end( # mypy: ignore[override] + def on_end( # mypy: ignore[override] self, message, context: AgentRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if not self.started: From aef1c478de6ebaccb7d5f4a6bc3321c3add3c1f4 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 10 Nov 2025 17:16:05 -0800 Subject: [PATCH 02/94] sync error format --- .../azure/ai/agentserver/core/server/base.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 9463c4002e08..c3f001245133 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -105,7 +105,7 @@ async def runs_endpoint(request): try: first_event = next(resp) except Exception as e: # noqa: BLE001 - err_msg = str(e) if DEBUG_ERRORS else "Internal error" + err_msg = _format_error(e) logger.error("Generator initialization failed: %s\n%s", e, traceback.format_exc()) return JSONResponse({"error": err_msg}, status_code=500) @@ -119,7 +119,7 @@ def gen(): for event in resp: yield _event_to_sse_chunk(event) except Exception as e: # noqa: BLE001 - err_msg = str(e) if DEBUG_ERRORS else "Internal error" + err_msg = _format_error(e) logger.error("Error in non-async generator: %s\n%s", e, traceback.format_exc()) payload = {"error": err_msg} yield f"event: error\ndata: {json.dumps(payload)}\n\n" @@ -143,7 +143,7 @@ def empty_gen(): return StreamingResponse(empty_gen(), media_type="text/event-stream") except Exception as e: # noqa: BLE001 - err_msg = str(e) if DEBUG_ERRORS else "Internal error" + err_msg = _format_error(e) logger.error("Async generator initialization failed: %s\n%s", e, traceback.format_exc()) return JSONResponse({"error": err_msg}, status_code=500) @@ -157,7 +157,7 @@ async def gen_async(): async for event in resp: yield _event_to_sse_chunk(event) except Exception as e: # noqa: BLE001 - err_msg = str(e) if DEBUG_ERRORS else "Internal error" + err_msg = _format_error(e) logger.error("Error in async generator: %s\n%s", e, traceback.format_exc()) payload = {"error": err_msg} yield f"event: error\ndata: {json.dumps(payload)}\n\n" @@ -311,5 +311,14 @@ def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: return f"data: {event_data}\n\n" +def _format_error(exc: Exception) -> str: + message = str(exc) + if message: + return message + if DEBUG_ERRORS: + return repr(exc) + return "Internal error" + + def _to_response(result: Union[Response, dict]) -> Response: return result if isinstance(result, Response) else JSONResponse(result) From 43a9c44334ee0bc2cbc2a72571e2d1645707c243 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 10 Nov 2025 17:29:01 -0800 Subject: [PATCH 03/94] updated version and changelog --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 6 ++++++ .../azure/ai/agentserver/agentframework/_version.py | 2 +- sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md | 6 ++++++ .../azure/ai/agentserver/core/_version.py | 2 +- sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md | 6 ++++++ .../azure/ai/agentserver/langgraph/_version.py | 2 +- 6 files changed, 21 insertions(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index cfcf2445e256..b1d0e7aa3c6c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History + +## 1.0.0b2 (2025-11-10) + +Fixed some bugs + + ## 1.0.0b1 (2025-11-07) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index be71c81bd282..bbcd28b4aa67 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b1" +VERSION = "1.0.0b2" diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index cfcf2445e256..b1d0e7aa3c6c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History + +## 1.0.0b2 (2025-11-10) + +Fixed some bugs + + ## 1.0.0b1 (2025-11-07) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index be71c81bd282..bbcd28b4aa67 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b1" +VERSION = "1.0.0b2" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index cfcf2445e256..b1d0e7aa3c6c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History + +## 1.0.0b2 (2025-11-10) + +Fixed some bugs + + ## 1.0.0b1 (2025-11-07) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index be71c81bd282..bbcd28b4aa67 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b1" +VERSION = "1.0.0b2" From f33ec9f46bf1e531dfa75e02c8a27ca2658f7d8b Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 10 Nov 2025 18:16:37 -0800 Subject: [PATCH 04/94] refined changelog --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 8 +++++++- sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md | 8 +++++++- .../azure-ai-agentserver-langgraph/CHANGELOG.md | 8 +++++++- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index b1d0e7aa3c6c..5b10716fd6ae 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -3,7 +3,13 @@ ## 1.0.0b2 (2025-11-10) -Fixed some bugs +### Bugs Fixed + +- Fixed Id generator format. + +- Improved stream mode error messsage. + +- Updated application insights related configuration environment variables. ## 1.0.0b1 (2025-11-07) diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index b1d0e7aa3c6c..5b10716fd6ae 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -3,7 +3,13 @@ ## 1.0.0b2 (2025-11-10) -Fixed some bugs +### Bugs Fixed + +- Fixed Id generator format. + +- Improved stream mode error messsage. + +- Updated application insights related configuration environment variables. ## 1.0.0b1 (2025-11-07) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index b1d0e7aa3c6c..5b10716fd6ae 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -3,7 +3,13 @@ ## 1.0.0b2 (2025-11-10) -Fixed some bugs +### Bugs Fixed + +- Fixed Id generator format. + +- Improved stream mode error messsage. + +- Updated application insights related configuration environment variables. ## 1.0.0b1 (2025-11-07) From 18d38c45af08b9494b2b14a05f03a2b2e31f57af Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 10 Nov 2025 19:10:17 -0800 Subject: [PATCH 05/94] fix build --- .../azure/ai/agentserver/agentframework/agent_framework.py | 2 +- .../azure-ai-agentserver-agentframework/pyproject.toml | 3 +++ sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 3 +++ sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml | 3 +++ 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 34270cceadb9..1997b22cc800 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation +# pylint: disable=logging-fstring-interpolation,no-name-in-module from __future__ import annotations import asyncio # pylint: disable=do-not-import-asyncio diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 814d1d6d1a1e..052d36d10c7d 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -30,6 +30,9 @@ dependencies = [ requires = ["setuptools>=69", "wheel"] build-backend = "setuptools.build_meta" +[project.urls] +repository = "https://github.com/Azure/azure-sdk-for-python" + [tool.setuptools.packages.find] exclude = [ "tests*", diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index f574360722bb..a55490a960be 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -35,6 +35,9 @@ dependencies = [ requires = ["setuptools>=69", "wheel"] build-backend = "setuptools.build_meta" +[project.urls] +repository = "https://github.com/Azure/azure-sdk-for-python" + [tool.setuptools.packages.find] exclude = [ "tests*", diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index 5552ff8233d2..a5140068e12d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -31,6 +31,9 @@ dependencies = [ requires = ["setuptools>=69", "wheel"] build-backend = "setuptools.build_meta" +[project.urls] +repository = "https://github.com/Azure/azure-sdk-for-python" + [tool.setuptools.packages.find] exclude = [ "tests*", From 963b06d9737f514d5144e8caf4fe5474d60ef91f Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Tue, 11 Nov 2025 12:06:56 -0800 Subject: [PATCH 06/94] update id generator --- .../core/server/common/id_generator/foundry_id_generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py index b1c4e1ac55fd..1082242cbf51 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py @@ -133,4 +133,4 @@ def _extract_partition_id( if len(segment) < string_length + partition_key_length: raise ValueError(f"Id '{id_str}' does not contain a valid id.") - return segment[-partition_key_length:] + return segment[:partition_key_length] From b9703266a7174a572f7b89996e1c2086d631e479 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Tue, 11 Nov 2025 12:09:05 -0800 Subject: [PATCH 07/94] fix agentframework trace init --- .../ai/agentserver/agentframework/agent_framework.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 1997b22cc800..38eea41e2afb 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -80,12 +80,7 @@ def init_tracing(self): app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT) - if project_endpoint: - project_client = AIProjectClient(endpoint=project_endpoint, credential=DefaultAzureCredential()) - agent_client = AzureAIAgentClient(project_client=project_client) - agent_client.setup_azure_ai_observability() - elif exporter or app_insights_conn_str: - os.environ["WORKFLOW_ENABLE_OTEL"] = "true" + if exporter or app_insights_conn_str: from agent_framework.observability import setup_observability setup_observability( @@ -93,6 +88,10 @@ def init_tracing(self): otlp_endpoint=exporter, applicationinsights_connection_string=app_insights_conn_str, ) + elif project_endpoint: + project_client = AIProjectClient(endpoint=project_endpoint, credential=DefaultAzureCredential()) + agent_client = AzureAIAgentClient(project_client=project_client) + agent_client.setup_azure_ai_observability() self.tracer = trace.get_tracer(__name__) async def agent_run( From 6fb2f0398ef97b4aa41f62ae586302569f18b40b Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Tue, 11 Nov 2025 12:11:38 -0800 Subject: [PATCH 08/94] update version and changelog --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 9 +++++++++ .../azure/ai/agentserver/agentframework/_version.py | 2 +- sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md | 9 +++++++++ .../azure/ai/agentserver/core/_version.py | 2 +- .../azure-ai-agentserver-langgraph/CHANGELOG.md | 9 +++++++++ .../azure/ai/agentserver/langgraph/_version.py | 2 +- 6 files changed, 30 insertions(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index 5b10716fd6ae..c22ea4418361 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,6 +1,15 @@ # Release History +## 1.0.0b3 (2025-11-11) + +### Bugs Fixed + +- Fixed Id generator format. + +- Fixed trace initialization for agent-framework. + + ## 1.0.0b2 (2025-11-10) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index bbcd28b4aa67..c43fdbc2e239 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b2" +VERSION = "1.0.0b3" diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index 5b10716fd6ae..c22ea4418361 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,6 +1,15 @@ # Release History +## 1.0.0b3 (2025-11-11) + +### Bugs Fixed + +- Fixed Id generator format. + +- Fixed trace initialization for agent-framework. + + ## 1.0.0b2 (2025-11-10) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index bbcd28b4aa67..c43fdbc2e239 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b2" +VERSION = "1.0.0b3" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index 5b10716fd6ae..c22ea4418361 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,6 +1,15 @@ # Release History +## 1.0.0b3 (2025-11-11) + +### Bugs Fixed + +- Fixed Id generator format. + +- Fixed trace initialization for agent-framework. + + ## 1.0.0b2 (2025-11-10) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index bbcd28b4aa67..dc203fe30c70 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b2" +VERSION = "1.0.0b3" \ No newline at end of file From 5a01655b9c561970ac5106ef14046b319c7b8609 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Tue, 11 Nov 2025 12:54:59 -0800 Subject: [PATCH 09/94] fix pylint --- .../azure/ai/agentserver/langgraph/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index dc203fe30c70..c43fdbc2e239 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b3" \ No newline at end of file +VERSION = "1.0.0b3" From f30779b952b55dd1bb8ab724ef6ae8a1a361e465 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 12 Nov 2025 22:41:54 -0800 Subject: [PATCH 10/94] pin azure-ai-agents and azure-ai-projects version --- sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index a55490a960be..1c6c37e19e23 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -20,8 +20,8 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-monitor-opentelemetry>=1.5.0", - "azure-ai-projects", - "azure-ai-agents>=1.2.0b5", + "azure-ai-projects==1.1.0b4", + "azure-ai-agents==1.2.0b6", "azure-core>=1.35.0", "azure-identity", "openai>=1.80.0", From 2891cab9346c8b830466b7609afd796b3931c378 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty <1634042+ganeshyb@users.noreply.github.com> Date: Wed, 12 Nov 2025 23:25:55 -0800 Subject: [PATCH 11/94] Feature Agent Server support tools (#43961) * Tool Client V1 Version * Langraph integration * Updates fixing langgraph adapter * fix build * fix cspel * fix cspell * Add ToolClient integration with Agent Framework and examples for dynamic agent creation * Made changes to return tools instead of toolclient * Address comments --------- Co-authored-by: Lu Sun --- .../ai/agentserver/agentframework/__init__.py | 14 +- .../agentframework/agent_framework.py | 223 +++-- .../agentserver/agentframework/tool_client.py | 164 ++++ .../samples/tool_client_example/README.md | 113 +++ .../agent_factory_example.py | 109 +++ .../tool_client_example/requirements.txt | 4 + .../agentserver/core/client/tools/__init__.py | 13 + .../agentserver/core/client/tools/_client.py | 224 +++++ .../core/client/tools/_configuration.py | 88 ++ .../core/client/tools/_exceptions.py | 49 ++ .../core/client/tools/_model_base.py | 168 ++++ .../core/client/tools/_utils/_model_base.py | 792 ++++++++++++++++++ .../core/client/tools/aio/__init__.py | 13 + .../core/client/tools/aio/_client.py | 226 +++++ .../core/client/tools/aio/_configuration.py | 88 ++ .../tools/aio/operations/_operations.py | 184 ++++ .../client/tools/operations/_operations.py | 543 ++++++++++++ .../core/models/_create_response.py | 1 + .../azure/ai/agentserver/core/server/base.py | 50 +- .../core/server/common/agent_run_context.py | 21 +- .../azure-ai-agentserver-core/cspell.json | 4 +- .../custom_mock_agent_with_tools_test.py | 108 +++ .../ai/agentserver/langgraph/__init__.py | 11 +- .../ai/agentserver/langgraph/langgraph.py | 191 ++++- .../ai/agentserver/langgraph/tool_client.py | 211 +++++ .../cspell.json | 3 +- .../graph_factory_example.py | 128 +++ .../use_tool_client_example.py | 109 +++ 28 files changed, 3763 insertions(+), 89 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/requirements.txt create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index af980a34799f..aa03a264339c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -3,14 +3,22 @@ # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) +from typing import TYPE_CHECKING, Optional, Any + from ._version import VERSION +from .agent_framework import AgentFrameworkCBAgent + +if TYPE_CHECKING: # pragma: no cover + from azure.core.credentials_async import AsyncTokenCredential -def from_agent_framework(agent): +def from_agent_framework(agent, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any) -> "AgentFrameworkCBAgent": from .agent_framework import AgentFrameworkCBAgent - return AgentFrameworkCBAgent(agent) + return AgentFrameworkCBAgent(agent, credentials=credentials, **kwargs) + +from .tool_client import ToolClient -__all__ = ["from_agent_framework"] +__all__ = ["from_agent_framework", "ToolClient"] __version__ = VERSION diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 38eea41e2afb..50cb09fd66f7 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -6,7 +6,8 @@ import asyncio # pylint: disable=do-not-import-asyncio import os -from typing import Any, AsyncGenerator, Union +from typing import TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Optional, Protocol, Union, List +import inspect from agent_framework import AgentProtocol from agent_framework.azure import AzureAIAgentClient # pylint: disable=no-name-in-module @@ -27,12 +28,35 @@ from .models.agent_framework_output_non_streaming_converter import ( AgentFrameworkOutputNonStreamingConverter, ) +from agent_framework import AIFunction from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter from .models.constants import Constants +from .tool_client import ToolClient + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential logger = get_logger() +class AgentFactory(Protocol): + """Protocol for agent factory functions. + + An agent factory is a callable that takes a ToolClient and returns + an AgentProtocol, either synchronously or asynchronously. + """ + + def __call__(self, tools: List[AIFunction]) -> Union[AgentProtocol, Awaitable[AgentProtocol]]: + """Create an AgentProtocol using the provided ToolClient. + + :param tools: The list of AIFunction tools available to the agent. + :type tools: List[AIFunction] + :return: An Agent Framework agent, or an awaitable that resolves to one. + :rtype: Union[AgentProtocol, Awaitable[AgentProtocol]] + """ + ... + + class AgentFrameworkCBAgent(FoundryCBAgent): """ Adapter class for integrating Agent Framework agents with the FoundryCB agent interface. @@ -50,10 +74,33 @@ class AgentFrameworkCBAgent(FoundryCBAgent): - Supports both streaming and non-streaming responses based on the `stream` flag. """ - def __init__(self, agent: AgentProtocol): - super().__init__() - self.agent = agent - logger.info(f"Initialized AgentFrameworkCBAgent with agent: {type(agent).__name__}") + def __init__(self, agent: Union[AgentProtocol, AgentFactory], credentials: "Optional[AsyncTokenCredential]" = None, **kwargs: Any): + """Initialize the AgentFrameworkCBAgent with an AgentProtocol or a factory function. + + :param agent: The Agent Framework agent to adapt, or a callable that takes ToolClient and returns AgentProtocol (sync or async). + :type agent: Union[AgentProtocol, AgentFactory] + :param credentials: Azure credentials for authentication. + :type credentials: Optional[AsyncTokenCredential] + """ + super().__init__(credentials=credentials, **kwargs) + self._agent_or_factory: Union[AgentProtocol, AgentFactory] = agent + self._resolved_agent: "Optional[AgentProtocol]" = None + + # If agent is already instantiated, use it directly + if isinstance(agent, AgentProtocol): + self._resolved_agent = agent + logger.info(f"Initialized AgentFrameworkCBAgent with agent: {type(agent).__name__}") + else: + logger.info("Initialized AgentFrameworkCBAgent with agent factory") + + @property + def agent(self) -> "Optional[AgentProtocol]": + """Get the resolved agent. This property provides backward compatibility. + + :return: The resolved AgentProtocol if available, None otherwise. + :rtype: Optional[AgentProtocol] + """ + return self._resolved_agent def _resolve_stream_timeout(self, request_body: CreateResponse) -> float: """Resolve idle timeout for streaming updates. @@ -75,6 +122,49 @@ def _resolve_stream_timeout(self, request_body: CreateResponse) -> float: env_val = os.getenv(Constants.AGENTS_ADAPTER_STREAM_TIMEOUT_S) return float(env_val) if env_val is not None else float(Constants.DEFAULT_STREAM_TIMEOUT_S) + async def _resolve_agent(self, context: AgentRunContext): + """Resolve the agent if it's a factory function (for single-use/first-time resolution). + Creates a ToolClient and calls the factory function with it. + This is used for the initial resolution. + """ + if callable(self._agent_or_factory): + logger.debug("Resolving agent from factory function") + + # Create ToolClient with credentials + tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) + tool_client_wrapper = ToolClient(tool_client) + tools = await tool_client_wrapper.list_tools() + + result = self._agent_or_factory(tools) + if inspect.iscoroutine(result): + self._resolved_agent = await result + else: + self._resolved_agent = result + + logger.debug("Agent resolved successfully") + else: + # Should not reach here, but just in case + self._resolved_agent = self._agent_or_factory + + async def _resolve_agent_for_request(self, context: AgentRunContext): + + logger.debug("Resolving fresh agent from factory function for request") + + # Create ToolClient with credentials + tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) + tool_client_wrapper = ToolClient(tool_client) + tools = await tool_client_wrapper.list_tools() + + import inspect + result = self._agent_or_factory(tools) + if inspect.iscoroutine(result): + agent = await result + else: + agent = result + + logger.debug("Fresh agent resolved successfully for request") + return agent, tool_client_wrapper + def init_tracing(self): exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) @@ -100,53 +190,82 @@ async def agent_run( OpenAIResponse, AsyncGenerator[ResponseStreamEvent, Any], ]: - logger.info(f"Starting agent_run with stream={context.stream}") - request_input = context.request.get("input") - - input_converter = AgentFrameworkInputConverter() - message = input_converter.transform_input(request_input) - logger.debug(f"Transformed input message type: {type(message)}") - - # Use split converters - if context.stream: - logger.info("Running agent in streaming mode") - streaming_converter = AgentFrameworkOutputStreamingConverter(context) - - async def stream_updates(): - update_count = 0 - timeout_s = self._resolve_stream_timeout(context.request) - logger.info("Starting streaming with idle-timeout=%.2fs", timeout_s) - for ev in streaming_converter.initial_events(): - yield ev - - # Iterate with per-update timeout; terminate if idle too long - aiter = self.agent.run_stream(message).__aiter__() - while True: + # Resolve agent - always resolve if it's a factory function to get fresh agent each time + # For factories, get a new agent instance per request to avoid concurrency issues + tool_client = None + try: + if callable(self._agent_or_factory): + agent, tool_client = await self._resolve_agent_for_request(context) + elif self._resolved_agent is None: + await self._resolve_agent(context) + agent = self._resolved_agent + else: + agent = self._resolved_agent + + logger.info(f"Starting agent_run with stream={context.stream}") + request_input = context.request.get("input") + + input_converter = AgentFrameworkInputConverter() + message = input_converter.transform_input(request_input) + logger.debug(f"Transformed input message type: {type(message)}") + + # Use split converters + if context.stream: + logger.info("Running agent in streaming mode") + streaming_converter = AgentFrameworkOutputStreamingConverter(context) + + async def stream_updates(): try: - update = await asyncio.wait_for(aiter.__anext__(), timeout=timeout_s) - except StopAsyncIteration: - logger.debug("Agent streaming iterator finished (StopAsyncIteration)") - break - except asyncio.TimeoutError: - logger.warning("Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s) + update_count = 0 + timeout_s = self._resolve_stream_timeout(context.request) + logger.info("Starting streaming with idle-timeout=%.2fs", timeout_s) + for ev in streaming_converter.initial_events(): + yield ev + + # Iterate with per-update timeout; terminate if idle too long + aiter = agent.run_stream(message).__aiter__() + while True: + try: + update = await asyncio.wait_for(aiter.__anext__(), timeout=timeout_s) + except StopAsyncIteration: + logger.debug("Agent streaming iterator finished (StopAsyncIteration)") + break + except asyncio.TimeoutError: + logger.warning("Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s) + for ev in streaming_converter.completion_events(): + yield ev + return + update_count += 1 + transformed = streaming_converter.transform_output_for_streaming(update) + for event in transformed: + yield event for ev in streaming_converter.completion_events(): yield ev - return - update_count += 1 - transformed = streaming_converter.transform_output_for_streaming(update) - for event in transformed: - yield event - for ev in streaming_converter.completion_events(): - yield ev - logger.info("Streaming completed with %d updates", update_count) - - return stream_updates() - - # Non-streaming path - logger.info("Running agent in non-streaming mode") - non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context) - result = await self.agent.run(message) - logger.debug(f"Agent run completed, result type: {type(result)}") - transformed_result = non_streaming_converter.transform_output_for_response(result) - logger.info("Agent run and transformation completed successfully") - return transformed_result + logger.info("Streaming completed with %d updates", update_count) + finally: + # Close tool_client if it was created for this request + if tool_client is not None: + try: + await tool_client.close() + logger.debug("Closed tool_client after streaming completed") + except Exception as e: + logger.warning(f"Error closing tool_client in stream: {e}") + + return stream_updates() + + # Non-streaming path + logger.info("Running agent in non-streaming mode") + non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context) + result = await agent.run(message) + logger.debug(f"Agent run completed, result type: {type(result)}") + transformed_result = non_streaming_converter.transform_output_for_response(result) + logger.info("Agent run and transformation completed successfully") + return transformed_result + finally: + # Close tool_client if it was created for this request (non-streaming only, streaming handles in generator) + if not context.stream and tool_client is not None: + try: + await tool_client.close() + logger.debug("Closed tool_client after request processing") + except Exception as e: + logger.warning(f"Error closing tool_client: {e}") diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py new file mode 100644 index 000000000000..e06df0df3026 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py @@ -0,0 +1,164 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Tool client for integrating AzureAIToolClient with Agent Framework.""" + +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional +from agent_framework import AIFunction +from pydantic import BaseModel, Field, create_model + +if TYPE_CHECKING: + from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient, FoundryTool + +class ToolClient: + """Client that integrates AzureAIToolClient with Agent Framework. + + This class provides methods to list tools from AzureAIToolClient and invoke them + in a format compatible with Agent Framework agents. + + :param tool_client: The AzureAIToolClient instance to use for tool operations. + :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient + + .. admonition:: Example: + + .. code-block:: python + + from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient + from azure.ai.agentserver.agentframework import ToolClient + from azure.identity.aio import DefaultAzureCredential + + async with DefaultAzureCredential() as credential: + tool_client = AzureAIToolClient( + endpoint="https://", + credential=credential + ) + + client = ToolClient(tool_client) + + # List tools as Agent Framework tool definitions + tools = await client.list_tools() + + # Invoke a tool directly + result = await client.invoke_tool( + tool_name="my_tool", + tool_input={"param": "value"} + ) + + :meta private: + """ + + def __init__(self, tool_client: "AzureAIToolClient") -> None: + """Initialize the ToolClient. + + :param tool_client: The AzureAIToolClient instance to use for tool operations. + :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient + """ + self._tool_client = tool_client + self._aifunction_cache: List[AIFunction] = None + + async def list_tools(self) -> List[AIFunction]: + """List all available tools as Agent Framework tool definitions. + + Retrieves tools from AzureAIToolClient and returns them in a format + compatible with Agent Framework. + + :return: List of tool definitions. + :rtype: List[AIFunction] + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + .. admonition:: Example: + + .. code-block:: python + + client = ToolClient(tool_client) + tools = await client.list_tools() + """ + # Get tools from AzureAIToolClient + if self._aifunction_cache is not None: + return self._aifunction_cache + + azure_tools = await self._tool_client.list_tools() + self._aifunction_cache = [] + + # Convert to Agent Framework tool definitions + for azure_tool in azure_tools: + ai_function_tool = self._convert_to_agent_framework_tool(azure_tool) + self._aifunction_cache.append(ai_function_tool) + + return self._aifunction_cache + + def _convert_to_agent_framework_tool(self, azure_tool: "FoundryTool") -> AIFunction: + """Convert an AzureAITool to an Agent Framework AI Function + + :param azure_tool: The AzureAITool to convert. + :type azure_tool: ~azure.ai.agentserver.core.client.tools.aio.FoundryTool + :return: An AI Function Tool. + :rtype: AIFunction + """ + # Get the input schema from the tool descriptor + input_schema = azure_tool.input_schema or {} + + # Create a Pydantic model from the input schema + properties = input_schema.get("properties", {}) + required_fields = set(input_schema.get("required", [])) + + # Build field definitions for the Pydantic model + field_definitions: Dict[str, Any] = {} + for field_name, field_info in properties.items(): + field_type = self._json_schema_type_to_python(field_info.get("type", "string")) + field_description = field_info.get("description", "") + is_required = field_name in required_fields + + if is_required: + field_definitions[field_name] = (field_type, Field(description=field_description)) + else: + field_definitions[field_name] = (Optional[field_type], Field(default=None, description=field_description)) + + # Create the Pydantic model dynamically + input_model = create_model( + f"{azure_tool.name}_input", + **field_definitions + ) + + # Create a wrapper function that calls the Azure tool + async def tool_func(**kwargs: Any) -> Any: + """Dynamically generated function to invoke the Azure AI tool.""" + return await self.invoke_tool(azure_tool.name, kwargs) + + # Create and return the AIFunction + return AIFunction( + name=azure_tool.name, + description=azure_tool.description or "No description available", + func=tool_func, + input_model=input_model + ) + + def _json_schema_type_to_python(self, json_type: str) -> type: + """Convert JSON schema type to Python type. + + :param json_type: The JSON schema type string. + :type json_type: str + :return: The corresponding Python type. + :rtype: type + """ + type_map = { + "string": str, + "number": float, + "integer": int, + "boolean": bool, + "array": list, + "object": dict, + } + return type_map.get(json_type, str) + + async def close(self) -> None: + await self._tool_client.close() + + async def __aenter__(self) -> "ToolClient": + """Async context manager entry.""" + return self + + async def __aexit__(self, *exc_details: Any) -> None: + """Async context manager exit.""" + await self.close() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md new file mode 100644 index 000000000000..019e388975ff --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md @@ -0,0 +1,113 @@ +# Tool Client Example + +This example demonstrates how to use the `ToolClient` with Agent Framework to dynamically access tools from Azure AI Tool Client. + +## Overview + +The `ToolClient` provides a bridge between Azure AI Tool Client and Agent Framework, allowing agents to access tools configured in your Azure AI project. This example shows how to use a factory function pattern to create agents dynamically with access to tools at runtime. + +## Features + +- **Dynamic Tool Access**: Agents can list and invoke tools from Azure AI Tool Client +- **Factory Pattern**: Create fresh agent instances per request to avoid concurrency issues +- **Tool Integration**: Seamlessly integrate Azure AI tools with Agent Framework agents + +## Prerequisites + +- Python 3.10 or later +- Azure AI project with configured tools +- Azure credentials (DefaultAzureCredential) + +## Setup + +1. Install dependencies: +```bash +pip install -r requirements.txt +``` + +2. Configure environment variables in `.env`: +``` +AZURE_AI_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ +``` + +3. Ensure your Azure AI project has tools configured (e.g., MCP connections) + +## Running the Example + +```bash +python agent_factory_example.py +``` + +## How It Works + +1. **Factory Function**: The example creates a factory function that: + - Receives a `ToolClient` instance + - Lists available tools from Azure AI Tool Client + - Creates an Agent Framework agent with those tools + - Returns the agent instance + +2. **Dynamic Agent Creation**: The factory is called for each request, ensuring: + - Fresh agent instances per request + - Latest tool configurations + - No concurrency issues + +3. **Tool Access**: The agent can use tools like: + - MCP (Model Context Protocol) connections + - Function tools + - Other Azure AI configured tools + +## Key Code Patterns + +### Creating a Factory Function + +```python +async def agent_factory(tool_client: ToolClient): + # List tools from Azure AI + tools = await tool_client.list_tools() + + # Create agent with tools + agent = Agent( + name="MyAgent", + model="gpt-4o", + instructions="You are a helpful assistant.", + tools=tools + ) + return agent +``` + +### Using the Factory + +```python +from azure.ai.agentserver.agentframework import from_agent_framework + +adapter = from_agent_framework( + agent_factory, + credentials=credential, + tools=[{"type": "mcp", "project_connection_id": "my-mcp"}] +) +``` + +## Alternative: Direct Agent Usage + +You can also use a pre-created agent instead of a factory: + +```python +agent = Agent( + name="MyAgent", + model="gpt-4o", + instructions="You are a helpful assistant." +) + +adapter = from_agent_framework(agent, credentials=credential) +``` + +## Troubleshooting + +- **No tools found**: Ensure your Azure AI project has tools configured +- **Authentication errors**: Check your Azure credentials and project endpoint +- **Import errors**: Verify all dependencies are installed + +## Learn More + +- [Azure AI Agent Service Documentation](https://learn.microsoft.com/azure/ai-services/agents/) +- [Agent Framework Documentation](https://github.com/microsoft/agent-framework) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py new file mode 100644 index 000000000000..bc4d6bf8806d --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft. All rights reserved. +"""Example showing how to use an agent factory function with ToolClient. + +This sample demonstrates how to pass a factory function to from_agent_framework +that receives a ToolClient and returns an AgentProtocol. This pattern allows +the agent to be created dynamically with access to tools from Azure AI Tool +Client at runtime. +""" + +import asyncio +import os +from typing import List +from dotenv import load_dotenv +from agent_framework import AIFunction +from agent_framework.azure import AzureOpenAIChatClient + +from azure.ai.agentserver.agentframework import from_agent_framework +from azure.identity.aio import DefaultAzureCredential + +load_dotenv() + + +def create_agent_factory(): + """Create a factory function that builds an agent with ToolClient. + + This function returns a factory that takes a ToolClient and returns + an AgentProtocol. The agent is created at runtime for every request, + allowing it to access the latest tool configuration dynamically. + """ + + async def agent_factory(tools: List[AIFunction]) -> AzureOpenAIChatClient: + """Factory function that creates an agent using the provided tools. + + :param tools: The list of AIFunction tools available to the agent. + :type tools: List[AIFunction] + :return: An Agent Framework ChatAgent instance. + :rtype: ChatAgent + """ + # List all available tools from the ToolClient + print("Fetching tools from Azure AI Tool Client via factory...") + print(f"Found {len(tools)} tools:") + for tool in tools: + print(f" - tool: {tool.name}, description: {tool.description}") + + if not tools: + print("\nNo tools found!") + print("Make sure your Azure AI project has tools configured.") + raise ValueError("No tools available to create agent") + + # Create the Agent Framework agent with the tools + print("\nCreating Agent Framework agent with tools from factory...") + agent = AzureOpenAIChatClient(credential=DefaultAzureCredential()).create_agent( + name="ToolClientAgent", + instructions="You are a helpful assistant with access to various tools.", + tools=tools, + ) + + print("Agent created successfully!") + return agent + + return agent_factory + + +async def quickstart(): + """Build and return an AgentFrameworkCBAgent using an agent factory function.""" + + # Get configuration from environment + project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") + + if not project_endpoint: + raise ValueError( + "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " + "Set it to your Azure AI project endpoint, e.g., " + "https://.services.ai.azure.com/api/projects/" + ) + + # Create Azure credentials + credential = DefaultAzureCredential() + + # Create a factory function that will build the agent at runtime + # The factory will receive a ToolClient when the agent first runs + agent_factory = create_agent_factory() + + tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") + # Pass the factory function to from_agent_framework instead of a compiled agent + # The agent will be created on every agent run with access to ToolClient + print("Creating Agent Framework adapter with factory function...") + adapter = from_agent_framework( + agent_factory, + credentials=credential, + tools=[{"type": "mcp", "project_connection_id": tool_connection_id}] + ) + + print("Adapter created! Agent will be built on every request.") + return adapter + + +async def main(): # pragma: no cover - sample entrypoint + """Main function to run the agent.""" + adapter = await quickstart() + + if adapter: + print("\nStarting agent server...") + print("The agent factory will be called for every request that arrives.") + await adapter.run_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/requirements.txt b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/requirements.txt new file mode 100644 index 000000000000..79caf276114f --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/requirements.txt @@ -0,0 +1,4 @@ +azure-ai-agentserver-agentframework +azure-identity +python-dotenv +agent-framework diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py new file mode 100644 index 000000000000..8cf7c6b67389 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py @@ -0,0 +1,13 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from ._client import AzureAIToolClient, FoundryTool +from ._exceptions import OAuthConsentRequiredError, MCPToolApprovalRequiredError + +__all__ = [ + "AzureAIToolClient", + "FoundryTool", + "OAuthConsentRequiredError", + "MCPToolApprovalRequiredError", +] \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py new file mode 100644 index 000000000000..a7afd935df64 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py @@ -0,0 +1,224 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from copy import deepcopy +from typing import Any, List, TYPE_CHECKING, Mapping, Union +from azure.core import PipelineClient +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.credentials import TokenCredential + +from ._configuration import AzureAIToolClientConfiguration +from .operations._operations import MCPToolsOperations, RemoteToolsOperations +from ._utils._model_base import InvocationPayloadBuilder +from ._model_base import FoundryTool, ToolSource + +class AzureAITool: + """Azure AI tool wrapper for invocation. + + Represents a single tool that can be invoked either via MCP protocol or + Azure AI Tools API. This class provides a convenient interface for tool + invocation and exposes tool metadata. + + :ivar str name: The name of the tool. + :ivar str description: Human-readable description of what the tool does. + :ivar dict metadata: Additional metadata about the tool from the API. + :ivar ~Tool_Client.models.ToolSource source: + The source of the tool (MCP_TOOLS or REMOTE_TOOLS). + + .. admonition:: Example: + + .. literalinclude:: ../samples/simple_example.py + :start-after: [START use_tool] + :end-before: [END use_tool] + :language: python + :dedent: 4 + :caption: Using an AzureAITool instance. + """ + + def __init__(self, client: "AzureAIToolClient", descriptor: FoundryTool) -> None: + """Initialize an Azure AI Tool. + + :param client: Parent client instance for making API calls. + :type client: AzureAIToolClient + :param descriptor: Tool descriptor containing metadata and configuration. + :type descriptor: ~Tool_Client.models.FoundryTool + """ + self._client = client + self._descriptor = descriptor + self.name = descriptor.name + self.description = descriptor.description + self.metadata = dict(descriptor.metadata) + self.source = descriptor.source + + def invoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool synchronously. + + :param args: Positional arguments to pass to the tool. + :param kwargs: Keyword arguments to pass to the tool. + :return: The result from the tool invocation. + :rtype: Any + """ + payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) + return self._client._invoke_tool(self._descriptor, payload) + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + return self.invoke(*args, **kwargs) + +class AzureAIToolClient: + """Synchronous client for aggregating tools from Azure AI MCP and Tools APIs. + + This client provides access to tools from both MCP (Model Context Protocol) servers + and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. + + :param str endpoint: + The fully qualified endpoint for the Azure AI Agents service. + Example: "https://.api.azureml.ms" + :param credential: + Credential for authenticating requests to the service. + Use credentials from azure-identity like DefaultAzureCredential. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str agent_name: + Name of the agent to use for tool operations. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations defining which tools to include. + :keyword Mapping[str, Any] user: + User information for tool invocations (object_id, tenant_id). + :keyword str api_version: + API version to use when communicating with the service. + Default is the latest supported version. + :keyword transport: + Custom transport implementation. Default is RequestsTransport. + :paramtype transport: ~azure.core.pipeline.transport.HttpTransport + + """ + + def __init__( + self, + endpoint: str, + credential: "TokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the synchronous Azure AI Tool Client. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional keyword arguments for client configuration. + """ + self._config = AzureAIToolClientConfiguration( + endpoint, + credential, + **kwargs, + ) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=endpoint, policies=_policies, **kwargs) + + # Initialize specialized clients with client and config + self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) + self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) + + def list_tools(self) -> List[FoundryTool]: + """List all available tools from configured sources. + + Retrieves tools from both MCP servers and Azure AI Tools API endpoints, + returning them as FoundryTool instances ready for invocation. + :return: List of available tools from all configured sources. + :rtype: List[~AzureAITool] + :raises ~exceptions.OAuthConsentRequiredError: + Raised when the service requires user OAuth consent. + :raises ~exceptions.MCPToolApprovalRequiredError: + Raised when tool access requires human approval. + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + """ + + existing_names: set[str] = set() + + tools: List[FoundryTool] = [] + + # Fetch MCP tools + mcp_tools = self._mcp_tools.list_tools(existing_names) + tools.extend(mcp_tools) + + # Fetch Tools API tools + tools_api_tools = self._remote_tools.resolve_tools(existing_names) + tools.extend(tools_api_tools) + + for tool in tools: + # Capture tool in a closure to avoid shadowing issues + def make_invoker(captured_tool): + return lambda *args, **kwargs: self.invoke_tool(captured_tool, *args, **kwargs) + tool.invoker = make_invoker(tool) + return tools + + def invoke_tool( + self, + tool: Union[str, FoundryTool], + *args: Any, + **kwargs: Any, + ) -> Any: + """Invoke a tool by instance, name, or descriptor. + + :param tool: Tool to invoke, specified as an AzureAITool instance, + tool name string, or FoundryTool. + :type tool: Union[str, ~FoundryTool] + :param args: Positional arguments to pass to the tool + """ + descriptor = self._resolve_tool_descriptor(tool) + payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) + return self._invoke_tool(descriptor, payload, **kwargs) + + def _resolve_tool_descriptor( + self, tool: Union[AzureAITool, str, FoundryTool] + ) -> FoundryTool: + """Resolve a tool reference to a descriptor.""" + if isinstance(tool, AzureAITool): + return tool._descriptor + if isinstance(tool, FoundryTool): + return tool + if isinstance(tool, str): + # Fetch all tools and find matching descriptor + descriptors = self.list_tools() + for descriptor in descriptors: + if descriptor.name == tool or descriptor.key == tool: + return descriptor + raise KeyError(f"Unknown tool: {tool}") + raise TypeError("Tool must be an AzureAITool, FoundryTool, or registered name/key") + + def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: + """Invoke a tool descriptor.""" + if descriptor.source is ToolSource.MCP_TOOLS: + return self._mcp_tools.invoke_tool(descriptor, arguments) + if descriptor.source is ToolSource.REMOTE_TOOLS: + return self._remote_tools.invoke_tool(descriptor, arguments) + raise ValueError(f"Unsupported tool source: {descriptor.source}") + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "AzureAIToolClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py new file mode 100644 index 000000000000..45e2ac178654 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py @@ -0,0 +1,88 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from typing import Any, Mapping, List, Optional, TYPE_CHECKING + +from azure.core.pipeline import policies + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + +from ._utils._model_base import ToolConfigurationParser, UserInfo, ToolDefinition + +class AzureAIToolClientConfiguration: + """Configuration for Azure AI Tool Client. + + Manages authentication, endpoint configuration, and policy settings for the + Azure AI Tool Client. This class is used internally by the client and should + not typically be instantiated directly. + + :param str endpoint: + Fully qualified endpoint for the Azure AI Agents service. + :param credential: + Azure TokenCredential for authentication. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str api_version: + API version to use. Default is the latest supported version. + :keyword List[str] credential_scopes: + OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. + :keyword str agent_name: + Name of the agent. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations. + :keyword Mapping[str, Any] user: + User information for tool invocations. + """ + + def __init__( + self, + endpoint: str, + credential: "TokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the configuration. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional configuration options. + """ + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) + + + # Tool configuration + self.agent_name: str = kwargs.pop("agent_name", "$default") + self.tools: Optional[List[ToolDefinition]] = kwargs.pop("tools", None) + self.user: Optional[UserInfo] = kwargs.pop("user", None) + + # Initialize tool configuration parser + + self.tool_config = ToolConfigurationParser(self.tools) + + self._configure(**kwargs) + + # Warn about unused kwargs + if kwargs: + import warnings + warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py new file mode 100644 index 000000000000..aa00b6b5f4b5 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py @@ -0,0 +1,49 @@ + +from typing import Any, Mapping, Optional + + +class OAuthConsentRequiredError(RuntimeError): + """Raised when the service requires end-user OAuth consent. + + This exception is raised when a tool or service operation requires explicit + OAuth consent from the end user before the operation can proceed. + + :ivar str message: Human-readable guidance returned by the service. + :ivar str consent_url: Link that the end user must visit to provide consent. + :ivar dict payload: Full response payload from the service. + + :param str message: Human-readable guidance returned by the service. + :param str consent_url: Link that the end user must visit to provide the required consent. + :param dict payload: Full response payload supplied by the service. + """ + + def __init__(self, message: str, consent_url: Optional[str], payload: Mapping[str, Any]): + super().__init__(message) + self.message = message + self.consent_url = consent_url + self.payload = dict(payload) + + +class MCPToolApprovalRequiredError(RuntimeError): + """Raised when an MCP tool invocation needs human approval. + + This exception is raised when an MCP (Model Context Protocol) tool requires + explicit human approval before the invocation can proceed, typically for + security or compliance reasons. + + :ivar str message: Human-readable guidance returned by the service. + :ivar dict approval_arguments: + Arguments that must be approved or amended before continuing. + :ivar dict payload: Full response payload from the service. + + :param str message: Human-readable guidance returned by the service. + :param dict approval_arguments: + Arguments that must be approved or amended before continuing. + :param dict payload: Full response payload supplied by the service. + """ + + def __init__(self, message: str, approval_arguments: Mapping[str, Any], payload: Mapping[str, Any]): + super().__init__(message) + self.message = message + self.approval_arguments = dict(approval_arguments) + self.payload = dict(payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py new file mode 100644 index 000000000000..3c7bed8b5db1 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py @@ -0,0 +1,168 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from enum import Enum +import json + +from typing import Any, Awaitable, Callable, List, Mapping, Optional +from dataclasses import dataclass +import asyncio +import inspect + +class ToolSource(str, Enum): + """Identifies the origin of a tool. + + Specifies whether a tool comes from an MCP (Model Context Protocol) server + or from the Azure AI Tools API (remote tools). + """ + + MCP_TOOLS = "mcp_tools" + REMOTE_TOOLS = "remote_tools" + +class ToolDefinition: + """Definition of a tool including its parameters. + + :ivar str type: JSON schema type (e.g., "mcp", "a2", other tools). + """ + + def __init__(self, type: str, **kwargs: Any) -> None: + """Initialize ToolDefinition with type and any additional properties. + + :param str type: JSON schema type (e.g., "mcp", "a2", other tools). + :param kwargs: Any additional properties to set on the tool definition. + """ + self.type = type + # Store all additional properties as attributes + for key, value in kwargs.items(): + setattr(self, key, value) + + def __repr__(self) -> str: + """Return a detailed string representation of the ToolDefinition.""" + return json.dumps(self.__dict__, default=str) + + def __str__(self) -> str: + """Return a human-readable string representation.""" + return json.dumps(self.__dict__, default=str) + + +@dataclass +class FoundryTool: + """Lightweight description of a tool that can be invoked. + + Represents metadata and configuration for a single tool, including its + name, description, input schema, and source information. + + :ivar str key: Unique identifier for this tool. + :ivar str name: Display name of the tool. + :ivar str description: Human-readable description of what the tool does. + :ivar ~ToolSource source: + Origin of the tool (MCP_TOOLS or REMOTE_TOOLS). + :ivar dict metadata: Raw metadata from the API response. + :ivar dict input_schema: + JSON schema describing the tool's input parameters, or None. + :ivar ToolDefinition tool_definition: + Optional tool definition object, or None. + """ + + key: str + name: str + description: str + source: ToolSource + metadata: Mapping[str, Any] + input_schema: Optional[Mapping[str, Any]] = None + tool_definition: Optional[ToolDefinition] = None + invoker: Optional[Callable[..., Awaitable[Any]]] = None + + def invoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool synchronously. + + :param args: Positional arguments to pass to the tool. + :param kwargs: Keyword arguments to pass to the tool. + :return: The result from the tool invocation. + :rtype: Any + """ + + + if not self.invoker: + raise NotImplementedError("No invoker function defined for this tool.") + if inspect.iscoroutinefunction(self.invoker): + # If the invoker is async, check if we're already in an event loop + try: + loop = asyncio.get_running_loop() + # We're in a running loop, can't use asyncio.run() + raise RuntimeError( + "Cannot call invoke() on an async tool from within an async context. " + "Use 'await tool.ainvoke(...)' or 'await tool(...)' instead." + ) + except RuntimeError as e: + if "no running event loop" in str(e).lower(): + # No running loop, safe to use asyncio.run() + return asyncio.run(self.invoker(*args, **kwargs)) + else: + # Re-raise our custom error + raise + else: + return self.invoker(*args, **kwargs) + + async def ainvoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool asynchronously. + + :param args: Positional arguments to pass to the tool. + :param kwargs: Keyword arguments to pass to the tool. + :return: The result from the tool invocation. + :rtype: Any + """ + + if not self.invoker: + raise NotImplementedError("No invoker function defined for this tool.") + if inspect.iscoroutinefunction(self.invoker): + return await self.invoker(*args, **kwargs) + else: + result = self.invoker(*args, **kwargs) + # If the result is awaitable (e.g., a coroutine), await it + if inspect.iscoroutine(result) or hasattr(result, '__await__'): + return await result + return result + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + + # Check if the invoker is async + if self.invoker and inspect.iscoroutinefunction(self.invoker): + # Return coroutine for async context + return self.ainvoke(*args, **kwargs) + else: + # Use sync invoke + return self.invoke(*args, **kwargs) + + +class UserInfo: + """Represents user information. + + :ivar str objectId: User's object identifier. + :ivar str tenantId: Tenant identifier. + """ + + def __init__(self, objectId: str, tenantId: str, **kwargs: Any) -> None: + """Initialize UserInfo with user details. + + :param str objectId: User's object identifier. + :param str tenantId: Tenant identifier. + :param kwargs: Any additional properties to set on the user. + """ + self.objectId = objectId + self.tenantId = tenantId + # Store all additional properties as attributes + for key, value in kwargs.items(): + setattr(self, key, value) + + def to_dict(self) -> dict: + """Convert to dictionary for JSON serialization.""" + return { + "objectId": self.objectId, + "tenantId": self.tenantId + } + + + + diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py new file mode 100644 index 000000000000..d68c2ae28744 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py @@ -0,0 +1,792 @@ + +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from dataclasses import dataclass, asdict, is_dataclass +from typing import Any, Dict, Iterable, List, Mapping, MutableMapping, Optional, Set, Tuple + +from .._model_base import ToolDefinition, FoundryTool, ToolSource, UserInfo + + + +class ToolDescriptorBuilder: + """Builds FoundryTool objects from raw tool data.""" + + @staticmethod + def build_descriptors( + raw_tools: Iterable[Mapping[str, Any]], + source: ToolSource, + existing_names: Set[str], + ) -> List[FoundryTool]: + """Build tool descriptors from raw tool data. + + Parameters + ---------- + raw_tools : Iterable[Mapping[str, Any]] + Raw tool data from API (can be dicts or dataclass objects) + source : ToolSource + Source of the tools + existing_names : Set[str] + Set of existing tool names to avoid conflicts + + Returns + ------- + List[FoundryTool] + List of built tool descriptors + """ + descriptors: List[FoundryTool] = [] + for raw in raw_tools: + # Convert dataclass objects to dictionaries + if is_dataclass(raw) and not isinstance(raw, type): + raw = asdict(raw) + + name, description = ToolMetadataExtractor.extract_name_description(raw) + if not name: + continue + + key = ToolMetadataExtractor.derive_tool_key(raw, source) + description = description or "" + resolved_name = NameResolver.ensure_unique_name(name, existing_names) + + descriptor = FoundryTool( + key=key, + name=resolved_name, + description=description, + source=source, + metadata=dict(raw), + input_schema=ToolMetadataExtractor.extract_input_schema(raw), + tool_definition= raw.get("tool_definition") + ) + descriptors.append(descriptor) + existing_names.add(resolved_name) + + return descriptors + + +class ToolMetadataExtractor: + """Extracts metadata from raw tool data.""" + + @staticmethod + def extract_name_description(raw: Mapping[str, Any]) -> Tuple[Optional[str], Optional[str]]: + """Extract name and description from raw tool data. + + Parameters + ---------- + raw : Mapping[str, Any] + Raw tool data + + Returns + ------- + Tuple[Optional[str], Optional[str]] + Tuple of (name, description) + """ + name = ( + raw.get("name") + or raw.get("id") + or raw.get("tool_name") + or raw.get("definition", {}).get("name") + or raw.get("tool", {}).get("name") + ) + description = ( + raw.get("description") + or raw.get("long_description") + or raw.get("definition", {}).get("description") + or raw.get("tool", {}).get("description") + ) + return name, description + + @staticmethod + def derive_tool_key(raw: Mapping[str, Any], source: ToolSource) -> str: + """Derive unique key for a tool. + + Parameters + ---------- + raw : Mapping[str, Any] + Raw tool data + source : ToolSource + Source of the tool + + Returns + ------- + str + Unique tool key + """ + for candidate in (raw.get("id"), raw.get("name"), raw.get("tool_name")): + if candidate: + return f"{source.value}:{candidate}" + return f"{source.value}:{id(raw)}" + + @staticmethod + def extract_input_schema(raw: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: + """Extract input schema from raw tool data. + + Parameters + ---------- + raw : Mapping[str, Any] + Raw tool data + + Returns + ------- + Optional[Mapping[str, Any]] + Input schema if found + """ + for key in ("input_schema", "inputSchema", "schema", "parameters"): + if key in raw and isinstance(raw[key], Mapping): + return raw[key] + nested = raw.get("definition") or raw.get("tool") + if isinstance(nested, Mapping): + return ToolMetadataExtractor.extract_input_schema(nested) + return None + + @staticmethod + def extract_metadata_schema(raw: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: + """Extract input schema from raw tool data. + + Parameters + ---------- + raw : Mapping[str, Any] + Raw tool data + + Returns + ------- + Optional[Mapping[str, Any]] + _metadata if found + """ + for key in ("_meta", "metadata", "meta"): + if key in raw and isinstance(raw[key], Mapping): + return raw[key] + return None + + +class NameResolver: + """Resolves tool names to ensure uniqueness.""" + + @staticmethod + def ensure_unique_name(proposed_name: str, existing_names: Set[str]) -> str: + """Ensure a tool name is unique. + + Parameters + ---------- + proposed_name : str + Proposed tool name + existing_names : Set[str] + Set of existing tool names + + Returns + ------- + str + Unique tool name + """ + if proposed_name not in existing_names: + return proposed_name + + suffix = 1 + while True: + candidate = f"{proposed_name}_{suffix}" + if candidate not in existing_names: + return candidate + suffix += 1 + + +class MetadataMapper: + """Maps tool metadata from _meta schema to tool configuration.""" + + # Default key mapping: meta_schema_key -> output_key + # Note: When used with key_overrides, the direction is reversed internally + # to support tool_def_key -> meta_schema_key mapping + DEFAULT_KEY_MAPPING = { + "imagegen_model_deployment_name": "model_deployment_name", + "model_deployment_name": "model", + "deployment_name": "model", + } + + @staticmethod + def extract_metadata_config( + tool_metadata: Mapping[str, Any], + tool_definition: Optional[Mapping[str, Any]] = None, + key_overrides: Optional[Mapping[str, str]] = None, + ) -> Dict[str, Any]: + """Extract metadata configuration from _meta schema and tool definition. + + This method extracts properties defined in the _meta schema and attempts + to find matching values in the tool definition. Key overrides allow mapping + from tool definition property names to _meta schema property names. + + Parameters + ---------- + tool_metadata : Mapping[str, Any] + The _meta schema containing property definitions + tool_definition : Optional[Mapping[str, Any]] + The tool definition containing actual values + key_overrides : Optional[Mapping[str, str]] + Mapping from tool definition keys to _meta schema keys. + Format: {"tool_def_key": "meta_schema_key"} + Example: {"model": "imagegen_model_deployment_name"} + + Returns + ------- + Dict[str, Any] + Dictionary with mapped metadata configuration + + Examples + -------- + >>> meta_schema = { + ... "properties": { + ... "quality": {"type": "string", "default": "auto"}, + ... "model_deployment_name": {"type": "string"} + ... } + ... } + >>> tool_def = {"quality": "high", "model": "gpt-4"} + >>> overrides = {"model": "model_deployment_name"} # tool_def -> meta + >>> MetadataMapper.extract_metadata_config(meta_schema, tool_def, overrides) + {'quality': 'high', 'model_deployment_name': 'gpt-4'} + """ + result: Dict[str, Any] = {} + + # Build reverse mapping: tool_definition_key -> meta_property_name + # Start with default mappings (also reversed) + reverse_default_mapping = {v: k for k, v in MetadataMapper.DEFAULT_KEY_MAPPING.items()} + + # Add user overrides (these are already tool_def -> meta format) + tool_to_meta_mapping = dict(reverse_default_mapping) + if key_overrides: + tool_to_meta_mapping.update(key_overrides) + + # Extract properties from _meta schema + properties = tool_metadata.get("properties", {}) + if not isinstance(properties, Mapping): + return result + + for meta_prop_name, prop_schema in properties.items(): + if not isinstance(prop_schema, Mapping): + continue + + is_required = meta_prop_name in tool_metadata.get("required", []) + + # Try to find value in tool definition + value = None + value_from_definition = False + + if tool_definition: + # First check if tool definition has this exact key + if meta_prop_name in tool_definition: + value = tool_definition[meta_prop_name] + value_from_definition = True + else: + # Check if any tool definition key maps to this meta property + for tool_key, mapped_meta_key in tool_to_meta_mapping.items(): + if mapped_meta_key == meta_prop_name and tool_key in tool_definition: + value = tool_definition[tool_key] + value_from_definition = True + break + + # If no value from definition, check for default (only use if required) + if value is None and is_required and "default" in prop_schema: + value = prop_schema["default"] + + # Only add if: + # 1. Value is from tool definition, OR + # 2. Value is required and has a default + if value is not None and (value_from_definition or is_required): + result[meta_prop_name] = value + + return result + + @staticmethod + def prepare_metadata_dict( + tool_metadata_raw: Mapping[str, Any], + tool_definition: Optional[Mapping[str, Any]] = None, + key_overrides: Optional[Mapping[str, str]] = None, + ) -> Dict[str, Any]: + """Prepare a _meta dictionary from tool metadata and definition. + + This is a convenience method that extracts the _meta schema from + raw tool metadata and maps it to configuration values. + + Parameters + ---------- + tool_metadata_raw : Mapping[str, Any] + Raw tool metadata containing _meta or similar fields + tool_definition : Optional[Mapping[str, Any]] + The tool definition containing actual values + key_overrides : Optional[Mapping[str, str]] + Mapping from tool definition keys to _meta schema keys. + Format: {"tool_def_key": "meta_schema_key"} + + Returns + ------- + Dict[str, Any] + Dictionary with mapped metadata configuration + """ + # Extract _meta schema using existing utility + meta_schema = ToolMetadataExtractor.extract_metadata_schema(tool_metadata_raw) + if not meta_schema: + return {} + + return MetadataMapper.extract_metadata_config( + meta_schema, + tool_definition, + key_overrides + ) + + +class InvocationPayloadBuilder: + """Builds invocation payloads for tool calls.""" + + @staticmethod + def build_payload( + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + configuration: Dict[str, Any], + ) -> Dict[str, Any]: + """Build invocation payload from args and kwargs. + + Parameters + ---------- + args : Tuple[Any, ...] + Positional arguments + kwargs : Dict[str, Any] + Keyword arguments + configuration : Dict[str, Any] + Tool configuration defaults + + Returns + ------- + Dict[str, Any] + Complete invocation payload + """ + user_arguments = InvocationPayloadBuilder._normalize_input(args, kwargs) + merged = dict(configuration) + merged.update(user_arguments) + return merged + + @staticmethod + def _normalize_input( + args: Tuple[Any, ...], + kwargs: Dict[str, Any] + ) -> Dict[str, Any]: + """Normalize invocation input to a dictionary. + + Parameters + ---------- + args : Tuple[Any, ...] + Positional arguments + kwargs : Dict[str, Any] + Keyword arguments + + Returns + ------- + Dict[str, Any] + Normalized input dictionary + + Raises + ------ + ValueError + If mixing positional and keyword arguments or providing multiple positional args + """ + if args and kwargs: + raise ValueError("Mixing positional and keyword arguments is not supported") + + if args: + if len(args) > 1: + raise ValueError("Multiple positional arguments are not supported") + candidate = next(iter(args)) + if candidate is None: + return {} + if isinstance(candidate, Mapping): + return dict(candidate) + return {"input": candidate} + + if kwargs: + return dict(kwargs) + + return {} + + +@dataclass +class ToolProperty: + """Represents a single property/parameter in a tool's schema. + + :ivar str type: JSON schema type (e.g., "string", "object", "array"). + :ivar Optional[str] description: Human-readable description of the property. + :ivar Optional[Mapping[str, Any]] properties: Nested properties for object types. + :ivar Any default: Default value for the property. + :ivar List[str] required: List of required nested properties. + """ + + type: str + description: Optional[str] = None + properties: Optional[Mapping[str, Any]] = None + default: Any = None + required: Optional[List[str]] = None + +@dataclass +class ToolParameters: + """Represents the parameters schema for a tool. + + :ivar str type: JSON schema type, typically "object". + :ivar Mapping[str, ToolProperty] properties: Dictionary of parameter properties. + :ivar List[str] required: List of required parameter names. + """ + + type: str + properties: Mapping[str, ToolProperty] + required: Optional[List[str]] = None + +@dataclass +class ToolManifest: + """Represents a tool manifest with metadata and parameters. + + :ivar str name: Unique name of the tool. + :ivar str description: Detailed description of the tool's functionality. + :ivar ToolParameters parameters: Schema defining the tool's input parameters. + """ + + name: str + description: str + parameters: ToolParameters + +@dataclass +class RemoteServer: + """Represents remote server configuration for a tool. + + :ivar str projectConnectionId: Identifier for the project connection. + :ivar str protocol: Communication protocol (e.g., "mcp"). + """ + + projectConnectionId: str + protocol: str + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for JSON serialization.""" + return { + "projectConnectionId": self.projectConnectionId, + "protocol": self.protocol + } + +@dataclass +class EnrichedToolEntry(ToolManifest): + """Enriched tool representation with input schema. + + :ivar str name: Name of the tool. + :ivar str description: Description of the tool. + """ + remoteServer: RemoteServer + projectConnectionId: str + protocol: str + inputSchema: Optional[Mapping[str, Any]] = None + tool_definition: Optional[ToolDefinition] = None + +@dataclass +class ToolEntry: + """Represents a single tool entry in the API response. + + :ivar RemoteServer remoteServer: Configuration for the remote server. + :ivar List[ToolManifest] manifest: List of tool manifests provided by this entry. + """ + + remoteServer: RemoteServer + manifest: List[ToolManifest] + +@dataclass +class ToolsResponse: + """Root response model for the tools API. + + :ivar List[ToolEntry] tools: List of tool entries from the API. + """ + + tools: List[ToolEntry] + enriched_tools: List[EnrichedToolEntry] + + @classmethod + def from_dict(cls, data: Mapping[str, Any], tool_definitions: List[ToolDefinition]) -> "ToolsResponse": + """Create a ToolsResponse from a dictionary. + + :param Mapping[str, Any] data: Dictionary representation of the API response. + :return: Parsed ToolsResponse instance. + :rtype: ToolsResponse + """ + tool_defintions_map = {f"{td.type.lower()}_{td.project_connection_id.lower()}": td for td in tool_definitions} + + def tool_definition_lookup(remote_server: RemoteServer) -> Optional[ToolDefinition]: + return tool_defintions_map.get(f"{remote_server.protocol.lower()}_{remote_server.projectConnectionId.lower()}") + + + tools = [] + flattend_tools = [] + for tool_data in data.get("tools", []): + remote_server = RemoteServer( + projectConnectionId=tool_data["remoteServer"]["projectConnectionId"], + protocol=tool_data["remoteServer"]["protocol"] + ) + + manifests = [] + for manifest_data in tool_data.get("manifest", []): + params_data = manifest_data.get("parameters", {}) + properties = {} + + for prop_name, prop_data in params_data.get("properties", {}).items(): + properties[prop_name] = ToolProperty( + type=prop_data.get("type"), + description=prop_data.get("description"), + properties=prop_data.get("properties"), + default=prop_data.get("default"), + required=prop_data.get("required") + ) + + parameters = ToolParameters( + type=params_data.get("type", "object"), + properties=properties, + required=params_data.get("required") + ) + manifest = ToolManifest( + name=manifest_data["name"], + description=manifest_data["description"], + parameters=parameters + ) + manifests.append(manifest) + tool_definition = tool_definition_lookup(remote_server) + flattend_tools.append(EnrichedToolEntry( + projectConnectionId=remote_server.projectConnectionId, + protocol=remote_server.protocol, + name=manifest.name, + description=manifest.description, + parameters=parameters, + remoteServer=remote_server, + inputSchema=parameters, + tool_definition=tool_definition + )) + + tools.append(ToolEntry( + remoteServer=remote_server, + manifest=manifests + )) + + return cls(tools=tools, enriched_tools=flattend_tools) + +class ResolveToolsRequest: + """Represents a request containing remote servers and user information. + + :ivar List[RemoteServer] remoteservers: List of remote server configurations. + :ivar UserInfo user: User information. + """ + + def __init__(self, remoteservers: List[RemoteServer], user: UserInfo) -> None: + """Initialize RemoteServersRequest with servers and user info. + + :param List[RemoteServer] remoteservers: List of remote server configurations. + :param UserInfo user: User information. + """ + self.remoteservers = remoteservers + self.user: UserInfo = user + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for JSON serialization.""" + result = { + "remoteservers": [rs.to_dict() for rs in self.remoteservers] + } + if self.user: + # Handle both UserInfo objects and dictionaries + if isinstance(self.user, dict): + # Validate required fields for dict + if self.user.get("objectId") and self.user.get("tenantId"): + result["user"] = { + "objectId": self.user["objectId"], + "tenantId": self.user["tenantId"] + } + elif hasattr(self.user, "objectId") and hasattr(self.user, "tenantId"): + # UserInfo object + if self.user.objectId and self.user.tenantId: + result["user"] = { + "objectId": self.user.objectId, + "tenantId": self.user.tenantId + } + return result + + +class ToolConfigurationParser: + """Parses and processes tool configuration. + + This class handles parsing and categorizing tool configurations into + remote tools (MCP/A2A) and named MCP tools. + + :param List[Mapping[str, Any]] tools_config: + List of tool configurations to parse. Can be None. + """ + + def __init__(self, tools_definitions: Optional[List[Any]] = None): + """Initialize the parser. + + :param tools_definitions: List of tool configurations (can be dicts or ToolDefinition objects), or None. + :type tools_definitions: Optional[List[Any]] + """ + # Convert dictionaries to ToolDefinition objects if needed + self._tools_definitions = [] + for tool_def in (tools_definitions or []): + if isinstance(tool_def, dict): + # Convert dict to ToolDefinition + tool_type = tool_def.get("type") + if tool_type: + self._tools_definitions.append(ToolDefinition(type=tool_type, **{k: v for k, v in tool_def.items() if k != "type"})) + elif isinstance(tool_def, ToolDefinition): + self._tools_definitions.append(tool_def) + + self._remote_tools: List[ToolDefinition] = [] + self._named_mcp_tools: List[ToolDefinition] = [] + self._parse_tools_config() + + def _parse_tools_config(self) -> None: + """Parse tools configuration into categorized lists. + + Separates tool configurations into remote tools (MCP/A2A types) and + named MCP tools based on the 'type' field in each configuration. + """ + for tool_definition in self._tools_definitions: + tool_type = tool_definition.type.lower() + if tool_type in ["mcp", "a2a"]: + self._remote_tools.append(tool_definition) + else: + self._named_mcp_tools.append(tool_definition) + +def to_remote_server(tool_definition: ToolDefinition) -> RemoteServer: + """Convert ToolDefinition to RemoteServer. + + :param ToolDefinition tool_definition: + Tool definition to convert. + :return: Converted RemoteServer instance. + :rtype: RemoteServer + """ + return RemoteServer( + projectConnectionId=tool_definition.project_connection_id, + protocol=tool_definition.type.lower() + ) + + +@dataclass +class MCPToolSchema: + """Represents the input schema for an MCP tool. + + :ivar str type: JSON schema type, typically "object". + :ivar Mapping[str, Any] properties: Dictionary of parameter properties. + :ivar List[str] required: List of required parameter names. + """ + + type: str + properties: Mapping[str, Any] + required: Optional[List[str]] = None + + +@dataclass +class MCPToolMetadata: + """Represents the _meta field for an MCP tool. + + :ivar str type: JSON schema type, typically "object". + :ivar Mapping[str, Any] properties: Dictionary of metadata properties. + :ivar List[str] required: List of required metadata parameter names. + """ + + type: str + properties: Mapping[str, Any] + required: Optional[List[str]] = None + + +@dataclass +class MCPTool: + """Represents a single MCP tool from the tools/list response. + + :ivar str name: Unique name of the tool. + :ivar str title: Display title of the tool. + :ivar str description: Detailed description of the tool's functionality. + :ivar MCPToolSchema inputSchema: Schema defining the tool's input parameters. + :ivar Optional[MCPToolMetadata] _meta: Optional metadata schema for the tool. + """ + + name: str + title: str + description: str + inputSchema: MCPToolSchema + _meta: Optional[MCPToolMetadata] = None + +@dataclass +class EnrichedMCPTool(MCPTool): + """Represents an enriched MCP tool with additional metadata. + + :ivar ToolDefinition tool_definition: Associated tool definition. + """ + tool_definition: Optional[ToolDefinition] = None + +@dataclass +class MCPToolsListResult: + """Represents the result field of an MCP tools/list response. + + :ivar List[MCPTool] tools: List of available MCP tools. + """ + + tools: List[MCPTool] + + +@dataclass +class MCPToolsListResponse: + """Root response model for the MCP tools/list JSON-RPC response. + + :ivar str jsonrpc: JSON-RPC protocol version (e.g., "2.0"). + :ivar int id: Request identifier. + :ivar MCPToolsListResult result: Result containing the list of tools. + """ + + jsonrpc: str + id: int + result: MCPToolsListResult + + @classmethod + def from_dict(cls, data: Mapping[str, Any], tool_definitions: List[ToolDefinition]) -> "MCPToolsListResponse": + """Create an MCPToolsListResponse from a dictionary. + + :param Mapping[str, Any] data: Dictionary representation of the JSON-RPC response. + :return: Parsed MCPToolsListResponse instance. + :rtype: MCPToolsListResponse + """ + result_data = data.get("result", {}) + tools_list = [] + tool_definitions_map = {f"{td.type.lower()}": td for td in tool_definitions} + + for tool_data in result_data.get("tools", []): + # Parse inputSchema + input_schema_data = tool_data.get("inputSchema", {}) + input_schema = MCPToolSchema( + type=input_schema_data.get("type", "object"), + properties=input_schema_data.get("properties", {}), + required=input_schema_data.get("required") + ) + + # Parse _meta if present + meta = None + meta_data = tool_data.get("_meta") + if meta_data: + meta = MCPToolMetadata( + type=meta_data.get("type", "object"), + properties=meta_data.get("properties", {}), + required=meta_data.get("required") + ) + + # Create MCPTool + mcp_tool = EnrichedMCPTool( + name=tool_data["name"], + title=tool_data.get("title", tool_data["name"]), + description=tool_data.get("description", ""), + inputSchema=input_schema, + _meta=meta, + tool_definition=tool_definitions_map.get(tool_data["name"].lower()) + ) + + tools_list.append(mcp_tool) + + # Create result + result = MCPToolsListResult(tools=tools_list) + + return cls( + jsonrpc=data.get("jsonrpc", "2.0"), + id=data.get("id", 0), + result=result + ) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py new file mode 100644 index 000000000000..c0abe5b29bb9 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py @@ -0,0 +1,13 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from ._client import AzureAIToolClient, FoundryTool +from .._exceptions import OAuthConsentRequiredError, MCPToolApprovalRequiredError + +__all__ = [ + "AzureAIToolClient", + "FoundryTool", + "OAuthConsentRequiredError", + "MCPToolApprovalRequiredError", +] \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py new file mode 100644 index 000000000000..93f550448b5a --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -0,0 +1,226 @@ + +from typing import Any, List, Mapping, Union, TYPE_CHECKING + +from azure.core import AsyncPipelineClient +from azure.core.pipeline import policies + +from ._configuration import AzureAIToolClientConfiguration +from .._utils._model_base import InvocationPayloadBuilder +from .._model_base import FoundryTool, ToolSource + +from .operations._operations import MCPToolsOperations, RemoteToolsOperations + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + +class AzureAITool: + """Azure AI tool wrapper for invocation. + + Represents a single tool that can be invoked either via MCP protocol or + Azure AI Tools API. This class provides a convenient interface for tool + invocation and exposes tool metadata. + + :ivar str name: The name of the tool. + :ivar str description: Human-readable description of what the tool does. + :ivar dict metadata: Additional metadata about the tool from the API. + :ivar ~Tool_Client.models.ToolSource source: + The source of the tool (MCP_TOOLS or REMOTE_TOOLS). + + .. admonition:: Example: + + .. literalinclude:: ../samples/simple_example.py + :start-after: [START use_tool] + :end-before: [END use_tool] + :language: python + :dedent: 4 + :caption: Using an AzureAITool instance. + """ + + def __init__(self, client: "AzureAIToolClient", descriptor: FoundryTool) -> None: + """Initialize an Azure AI Tool. + + :param client: Parent client instance for making API calls. + :type client: AzureAIToolClient + :param descriptor: Tool descriptor containing metadata and configuration. + :type descriptor: ~Tool_Client.models.FoundryTool + """ + self._client = client + self._descriptor = descriptor + self.name = descriptor.name + self.description = descriptor.description + self.metadata = dict(descriptor.metadata) + self.source = descriptor.source + + async def invoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool asynchronously. + + :param args: Positional arguments to pass to the tool. + :param kwargs: Keyword arguments to pass to the tool. + :return: The result from the tool invocation. + :rtype: Any + """ + payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) + return await self._client._invoke_tool(self._descriptor, payload) + + async def __call__(self, *args: Any, **kwargs: Any) -> Any: + return await self.invoke(*args, **kwargs) + +class AzureAIToolClient: + """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. + + This client provides access to tools from both MCP (Model Context Protocol) servers + and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. + + :param str endpoint: + The fully qualified endpoint for the Azure AI Agents service. + Example: "https://.api.azureml.ms" + :param credential: + Credential for authenticating requests to the service. + Use credentials from azure-identity like DefaultAzureCredential. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str agent_name: + Name of the agent to use for tool operations. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations defining which tools to include. + :keyword Mapping[str, Any] user: + User information for tool invocations (object_id, tenant_id). + :keyword str api_version: + API version to use when communicating with the service. + Default is the latest supported version. + :keyword transport: + Custom transport implementation. Default is RequestsTransport. + :paramtype transport: ~azure.core.pipeline.transport.HttpTransport + + """ + + def __init__( + self, + endpoint: str, + credential: "AsyncTokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the asynchronous Azure AI Tool Client. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional keyword arguments for client configuration. + """ + self._config = AzureAIToolClientConfiguration( + endpoint, + credential, + **kwargs, + ) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=endpoint, policies=_policies, **kwargs) + + # Initialize specialized clients with client and config + self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) + self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) + + async def list_tools(self) -> List[FoundryTool]: + """List all available tools from configured sources. + + Retrieves tools from both MCP servers and Azure AI Tools API endpoints, + returning them as AzureAITool instances ready for invocation. + :return: List of available tools from all configured sources. + :rtype: List[~AzureAITool] + :raises ~Tool_Client.exceptions.OAuthConsentRequiredError: + Raised when the service requires user OAuth consent. + :raises ~Tool_Client.exceptions.MCPToolApprovalRequiredError: + Raised when tool access requires human approval. + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + """ + + existing_names: set[str] = set() + + tools: List[FoundryTool] = [] + + # Fetch MCP tools + mcp_tools = await self._mcp_tools.list_tools(existing_names) + tools.extend(mcp_tools) + # Fetch Tools API tools + tools_api_tools = await self._remote_tools.resolve_tools(existing_names) + tools.extend(tools_api_tools) + + for tool in tools: + # Capture tool in a closure to avoid shadowing issues + def make_invoker(captured_tool): + async def _invoker(*args, **kwargs): + return await self.invoke_tool(captured_tool, *args, **kwargs) + return _invoker + tool.invoker = make_invoker(tool) + + return tools + + async def invoke_tool( + self, + tool: Union[AzureAITool, str, FoundryTool], + *args: Any, + **kwargs: Any, + ) -> Any: + """Invoke a tool by instance, name, or descriptor. + + :param tool: Tool to invoke, specified as an AzureAITool instance, + tool name string, or FoundryTool. + :type tool: Union[~AzureAITool, str, ~Tool_Client.models.FoundryTool] + :param args: Positional arguments to pass to the tool + """ + descriptor = await self._resolve_tool_descriptor(tool) + payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) + return await self._invoke_tool(descriptor, payload, **kwargs) + + async def _resolve_tool_descriptor( + self, tool: Union[AzureAITool, str, FoundryTool] + ) -> FoundryTool: + """Resolve a tool reference to a descriptor.""" + if isinstance(tool, AzureAITool): + return tool._descriptor + if isinstance(tool, FoundryTool): + return tool + if isinstance(tool, str): + # Fetch all tools and find matching descriptor + descriptors = await self.list_tools() + for descriptor in descriptors: + if descriptor.name == tool or descriptor.key == tool: + return descriptor + raise KeyError(f"Unknown tool: {tool}") + raise TypeError("Tool must be an AsyncAzureAITool, FoundryTool, or registered name/key") + + async def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: + """Invoke a tool descriptor.""" + if descriptor.source is ToolSource.MCP_TOOLS: + return await self._mcp_tools.invoke_tool(descriptor, arguments) + if descriptor.source is ToolSource.REMOTE_TOOLS: + return await self._remote_tools.invoke_tool(descriptor, arguments) + raise ValueError(f"Unsupported tool source: {descriptor.source}") + + async def close(self) -> None: + """Close the underlying HTTP pipeline.""" + await self._client.close() + + async def __aenter__(self) -> "AzureAIToolClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py new file mode 100644 index 000000000000..79b819863399 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py @@ -0,0 +1,88 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from typing import Any, Mapping, List, Optional, TYPE_CHECKING + +from azure.core.pipeline import policies + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + +from .._utils._model_base import ToolConfigurationParser + +class AzureAIToolClientConfiguration: + """Configuration for Azure AI Tool Client. + + Manages authentication, endpoint configuration, and policy settings for the + Azure AI Tool Client. This class is used internally by the client and should + not typically be instantiated directly. + + :param str endpoint: + Fully qualified endpoint for the Azure AI Agents service. + :param credential: + Azure TokenCredential for authentication. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str api_version: + API version to use. Default is the latest supported version. + :keyword List[str] credential_scopes: + OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. + :keyword str agent_name: + Name of the agent. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations. + :keyword Mapping[str, Any] user: + User information for tool invocations. + """ + + def __init__( + self, + endpoint: str, + credential: "AsyncTokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the configuration. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional configuration options. + """ + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) + + + # Tool configuration + self.agent_name: str = kwargs.pop("agent_name", "$default") + self.tools: Optional[List[Mapping[str, Any]]] = kwargs.pop("tools", None) + self.user: Optional[Mapping[str, Any]] = kwargs.pop("user", None) + + # Initialize tool configuration parser + + self.tool_config = ToolConfigurationParser(self.tools) + + self._configure(**kwargs) + + # Warn about unused kwargs + if kwargs: + import warnings + warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py new file mode 100644 index 000000000000..f99646d5fb8b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py @@ -0,0 +1,184 @@ + + +import json +from typing import Any, Dict, List, Mapping, MutableMapping + +from azure.core import AsyncPipelineClient +from ..._exceptions import OAuthConsentRequiredError +from .._configuration import AzureAIToolClientConfiguration + +from ...operations._operations import ( + build_remotetools_invoke_tool_request, + build_remotetools_resolve_tools_request, + prepare_remotetools_invoke_tool_request_content, + prepare_remotetools_resolve_tools_request_content, + build_mcptools_list_tools_request, + prepare_mcptools_list_tools_request_content, + build_mcptools_invoke_tool_request, + prepare_mcptools_invoke_tool_request_content, + API_VERSION, + MCP_ENDPOINT_PATH, + TOOL_PROPERTY_OVERRIDES, + DEFAULT_ERROR_MAP, + MCP_HEADERS, + REMOTE_TOOLS_HEADERS, + prepare_request_headers, + prepare_error_map, + handle_response_error, + build_list_tools_request, + process_list_tools_response, + build_invoke_mcp_tool_request, + build_resolve_tools_request, + process_resolve_tools_response, + build_invoke_remote_tool_request, + process_invoke_remote_tool_response, +) +from ..._model_base import FoundryTool, ToolSource, UserInfo + +from ..._utils._model_base import ToolsResponse, ToolDescriptorBuilder, ToolConfigurationParser, ResolveToolsRequest +from ..._utils._model_base import to_remote_server, MCPToolsListResponse, MetadataMapper + +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.pipeline import PipelineResponse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) + +class MCPToolsOperations: + + def __init__(self, *args, **kwargs) -> None: + """Initialize MCP client. + + Parameters + ---------- + client : AsyncPipelineClient + Azure AsyncPipelineClient for HTTP requests + config : AzureAIToolClientConfiguration + Configuration object + """ + input_args = list(args) + self._client : AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + + if self._client is None or self._config is None: + raise ValueError("Both 'client' and 'config' must be provided") + + self._endpoint_path = MCP_ENDPOINT_PATH + self._api_version = API_VERSION + + async def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: + """List MCP tools. + + :return: List of tool descriptors from MCP server. + :rtype: List[FoundryTool] + """ + _request, error_map, remaining_kwargs = build_list_tools_request(self._api_version, kwargs) + + path_format_arguments = {"endpoint": self._config.endpoint} + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **remaining_kwargs) + response = pipeline_response.http_response + + handle_response_error(response, error_map) + return process_list_tools_response(response, self._config.tool_config._named_mcp_tools, existing_names) + + async def invoke_tool( + self, + tool: FoundryTool, + arguments: Mapping[str, Any], + **kwargs: Any + ) -> Any: + """Invoke an MCP tool. + + :param tool: Tool descriptor for the tool to invoke. + :type tool: FoundryTool + :param arguments: Input arguments for the tool. + :type arguments: Mapping[str, Any] + :return: Result of the tool invocation. + :rtype: Any + """ + _request, error_map = build_invoke_mcp_tool_request(self._api_version, tool, arguments) + + path_format_arguments = {"endpoint": self._config.endpoint} + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **kwargs) + response = pipeline_response.http_response + + handle_response_error(response, error_map) + return response.json().get("result") + +class RemoteToolsOperations: + def __init__(self, *args, **kwargs) -> None: + """Initialize Tools API client. + + :param client: Azure PipelineClient for HTTP requests. + :type client: ~azure.core.PipelineClient + :param config: Configuration object. + :type config: ~Tool_Client.models.AzureAIToolClientConfiguration + :raises ValueError: If required parameters are not provided. + """ + input_args = list(args) + self._client : AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + + if self._client is None or self._config is None: + raise ValueError("Both 'client' and 'config' must be provided") + + + # Apply agent name substitution to endpoint paths + self.agent = self._config.agent_name.strip() if self._config.agent_name and self._config.agent_name.strip() else "$default" + self._api_version = API_VERSION + + async def resolve_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: + """Resolve remote tools from Azure AI Tools API. + + :return: List of tool descriptors from Tools API. + :rtype: List[FoundryTool] + """ + result = build_resolve_tools_request(self.agent, self._api_version, self._config.tool_config, self._config.user, kwargs) + if result[0] is None: + return [] + + _request, error_map, remaining_kwargs = result + + path_format_arguments = {"endpoint": self._config.endpoint} + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **remaining_kwargs) + response = pipeline_response.http_response + + handle_response_error(response, error_map) + return process_resolve_tools_response(response, self._config.tool_config._remote_tools, existing_names) + + async def invoke_tool( + self, + tool: FoundryTool, + arguments: Mapping[str, Any], + ) -> Any: + """Invoke a remote tool. + + :param tool: Tool descriptor to invoke. + :type tool: FoundryTool + :param arguments: Input arguments for the tool. + :type arguments: Mapping[str, Any] + :return: Result of the tool invocation. + :rtype: Any + """ + _request, error_map = build_invoke_remote_tool_request(self.agent, self._api_version, tool, self._config.user, arguments) + + path_format_arguments = {"endpoint": self._config.endpoint} + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + pipeline_response: PipelineResponse = await self._client._pipeline.run(_request) + response = pipeline_response.http_response + + handle_response_error(response, error_map) + return process_invoke_remote_tool_response(response) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py new file mode 100644 index 000000000000..e05e1e84e708 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py @@ -0,0 +1,543 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +import json +import logging +from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union +from azure.core import PipelineClient +from .._configuration import AzureAIToolClientConfiguration +from .._model_base import FoundryTool, ToolSource, UserInfo + +from .._utils._model_base import ToolsResponse, ToolDescriptorBuilder, ToolConfigurationParser, ResolveToolsRequest +from .._utils._model_base import to_remote_server, MCPToolsListResponse, MetadataMapper +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse + +from .._exceptions import OAuthConsentRequiredError + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) + +logger = logging.getLogger(__name__) + +# Shared constants +API_VERSION = "2025-11-15-preview" +MCP_ENDPOINT_PATH = "/mcp_tools" + +# Tool-specific property key overrides +# Format: {"tool_name": {"tool_def_key": "meta_schema_key"}} +TOOL_PROPERTY_OVERRIDES: Dict[str, Dict[str, str]] = { + "image_generation": { + "model": "imagegen_model_deployment_name" + }, + # Add more tool-specific mappings as needed +} + +# Shared error map +DEFAULT_ERROR_MAP: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, +} + +# Shared header configurations +MCP_HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json,text/event-stream", + "Connection": "keep-alive", + "Cache-Control": "no-cache", +} + +REMOTE_TOOLS_HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + +# Helper functions for request/response processing +def prepare_request_headers(base_headers: Dict[str, str], custom_headers: Mapping[str, str] = None) -> Dict[str, str]: + """Prepare request headers by merging base and custom headers. + + :param base_headers: Base headers to use + :param custom_headers: Custom headers to merge + :return: Merged headers dictionary + """ + headers = base_headers.copy() + if custom_headers: + headers.update(custom_headers) + return headers + +def prepare_error_map(custom_error_map: Mapping[int, Any] = None) -> MutableMapping: + """Prepare error map by merging default and custom error mappings. + + :param custom_error_map: Custom error mappings to merge + :return: Merged error map + """ + error_map = DEFAULT_ERROR_MAP.copy() + if custom_error_map: + error_map.update(custom_error_map) + return error_map + +def format_and_execute_request( + client: PipelineClient, + request: HttpRequest, + endpoint: str, + **kwargs: Any +) -> HttpResponse: + """Format request URL and execute pipeline. + + :param client: Pipeline client + :param request: HTTP request to execute + :param endpoint: Endpoint URL for formatting + :return: HTTP response + """ + path_format_arguments = {"endpoint": endpoint} + request.url = client.format_url(request.url, **path_format_arguments) + pipeline_response: PipelineResponse = client._pipeline.run(request, **kwargs) + return pipeline_response.http_response + +def handle_response_error(response: HttpResponse, error_map: MutableMapping) -> None: + """Handle HTTP response errors. + + :param response: HTTP response to check + :param error_map: Error map for status code mapping + :raises HttpResponseError: If response status is not 200 + """ + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + +def process_list_tools_response( + response: HttpResponse, + named_mcp_tools: Any, + existing_names: set +) -> List[FoundryTool]: + """Process list_tools response and build descriptors. + + :param response: HTTP response with MCP tools + :param named_mcp_tools: Named MCP tools configuration + :param existing_names: Set of existing tool names + :return: List of tool descriptors + """ + mcp_response = MCPToolsListResponse.from_dict(response.json(), named_mcp_tools) + raw_tools = mcp_response.result.tools + return ToolDescriptorBuilder.build_descriptors( + raw_tools, + ToolSource.MCP_TOOLS, + existing_names, + ) + +def process_resolve_tools_response( + response: HttpResponse, + remote_tools: Any, + existing_names: set +) -> List[FoundryTool]: + """Process resolve_tools response and build descriptors. + + :param response: HTTP response with remote tools + :param remote_tools: Remote tools configuration + :param existing_names: Set of existing tool names + :return: List of tool descriptors + """ + toolResponse = ToolsResponse.from_dict(response.json(), remote_tools) + return ToolDescriptorBuilder.build_descriptors( + toolResponse.enriched_tools, + ToolSource.REMOTE_TOOLS, + existing_names, + ) + +def build_list_tools_request( + api_version: str, + kwargs: Dict[str, Any] +) -> Tuple[HttpRequest, MutableMapping, Dict[str, str]]: + """Build request for listing MCP tools. + + :param api_version: API version + :param kwargs: Additional arguments (headers, params, error_map) + :return: Tuple of (request, error_map, params) + """ + error_map = prepare_error_map(kwargs.pop("error_map", None)) + _headers = prepare_request_headers(MCP_HEADERS, kwargs.pop("headers", None)) + _params = kwargs.pop("params", {}) or {} + + _content = prepare_mcptools_list_tools_request_content() + content = json.dumps(_content) + _request = build_mcptools_list_tools_request(api_version=api_version, headers=_headers, params=_params, content=content) + + return _request, error_map, kwargs + +def build_invoke_mcp_tool_request( + api_version: str, + tool: FoundryTool, + arguments: Mapping[str, Any], + **kwargs: Any +) -> Tuple[HttpRequest, MutableMapping]: + """Build request for invoking MCP tool. + + :param api_version: API version + :param tool: Tool descriptor + :param arguments: Tool arguments + :return: Tuple of (request, error_map) + """ + error_map = prepare_error_map() + _headers = prepare_request_headers(MCP_HEADERS) + _params = {} + + _content = prepare_mcptools_invoke_tool_request_content(tool, arguments, TOOL_PROPERTY_OVERRIDES) + logger.info("Invoking MCP tool: %s with arguments: %s", tool.name, dict(arguments)) + content = json.dumps(_content) + _request = build_mcptools_invoke_tool_request(api_version=api_version, headers=_headers, params=_params, content=content) + + return _request, error_map + +def build_resolve_tools_request( + agent_name: str, + api_version: str, + tool_config: ToolConfigurationParser, + user: UserInfo, + kwargs: Dict[str, Any] +) -> Union[Tuple[HttpRequest, MutableMapping, Dict[str, Any]], Tuple[None, None, None]]: + """Build request for resolving remote tools. + + :param agent_name: Agent name + :param api_version: API version + :param tool_config: Tool configuration + :param user: User info + :param kwargs: Additional arguments + :return: Tuple of (request, error_map, remaining_kwargs) or (None, None, None) + """ + error_map = prepare_error_map(kwargs.pop("error_map", None)) + _headers = prepare_request_headers(REMOTE_TOOLS_HEADERS, kwargs.pop("headers", None)) + _params = kwargs.pop("params", {}) or {} + + _content = prepare_remotetools_resolve_tools_request_content(tool_config, user) + if _content is None: + return None, None, None + + content = json.dumps(_content.to_dict()) + _request = build_remotetools_resolve_tools_request(agent_name, api_version=api_version, headers=_headers, params=_params, content=content) + + return _request, error_map, kwargs + +def build_invoke_remote_tool_request( + agent_name: str, + api_version: str, + tool: FoundryTool, + user: UserInfo, + arguments: Mapping[str, Any] +) -> Tuple[HttpRequest, MutableMapping]: + """Build request for invoking remote tool. + + :param agent_name: Agent name + :param api_version: API version + :param tool: Tool descriptor + :param user: User info + :param arguments: Tool arguments + :return: Tuple of (request, error_map) + """ + error_map = prepare_error_map() + _headers = prepare_request_headers(REMOTE_TOOLS_HEADERS) + _params = {} + + _content = prepare_remotetools_invoke_tool_request_content(tool, user, arguments) + content = json.dumps(_content) + _request = build_remotetools_invoke_tool_request(agent_name, api_version=api_version, headers=_headers, params=_params, content=content) + + return _request, error_map + +def process_invoke_remote_tool_response(response: HttpResponse) -> Any: + """Process remote tool invocation response. + + :param response: HTTP response + :return: Tool result + :raises OAuthConsentRequiredError: If OAuth consent is required + """ + payload = response.json() + response_type = payload.get("type") + result = payload.get("toolResult") + + if response_type == "OAuthConsentRequired": + raise OAuthConsentRequiredError(result.get("message"), consent_url=result.get("consentUrl"), payload=payload) + return result + +class MCPToolsOperations: + + def __init__(self, *args, **kwargs) -> None: + """Initialize MCP client. + + Parameters + ---------- + client : PipelineClient + Azure PipelineClient for HTTP requests + config : AzureAIToolClientConfiguration + Configuration object + """ + input_args = list(args) + self._client : PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + + if self._client is None or self._config is None: + raise ValueError("Both 'client' and 'config' must be provided") + + self._endpoint_path = MCP_ENDPOINT_PATH + self._api_version = API_VERSION + + def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: + """List MCP tools. + + :return: List of tool descriptors from MCP server. + :rtype: List[FoundryTool] + """ + _request, error_map, remaining_kwargs = build_list_tools_request(self._api_version, kwargs) + response = format_and_execute_request(self._client, _request, self._config.endpoint, **remaining_kwargs) + handle_response_error(response, error_map) + return process_list_tools_response(response, self._config.tool_config._named_mcp_tools, existing_names) + + def invoke_tool( + self, + tool: FoundryTool, + arguments: Mapping[str, Any], + **kwargs: Any + ) -> Any: + """Invoke an MCP tool. + + :param tool: Tool descriptor for the tool to invoke. + :type tool: FoundryTool + :param arguments: Input arguments for the tool. + :type arguments: Mapping[str, Any] + :return: Result of the tool invocation. + :rtype: Any + """ + _request, error_map = build_invoke_mcp_tool_request(self._api_version, tool, arguments) + response = format_and_execute_request(self._client, _request, self._config.endpoint, **kwargs) + handle_response_error(response, error_map) + return response.json().get("result") + +def prepare_mcptools_list_tools_request_content() -> Any: + return { + "jsonrpc": "2.0", + "id": 1, + "method": "tools/list", + "params": {} + } + +def build_mcptools_list_tools_request( + api_version: str, + headers: Mapping[str, str] = None, + params: Mapping[str, str] = None, + **kwargs: Any + ) -> HttpRequest: + """Build the HTTP request for listing MCP tools. + + :param api_version: API version to use. + :type api_version: str + :param headers: Additional headers for the request. + :type headers: Mapping[str, str], optional + :param params: Query parameters for the request. + :type params: Mapping[str, str], optional + :return: Constructed HttpRequest object. + :rtype: ~azure.core.rest.HttpRequest + """ + _headers = headers or {} + _params = params or {} + _params["api-version"] = api_version + + _url = f"/mcp_tools" + return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) + +def prepare_mcptools_invoke_tool_request_content(tool: FoundryTool, arguments: Mapping[str, Any], tool_overrides: Dict[str, Dict[str, str]]) -> Any: + + params = { + "name": tool.name, + "arguments": dict(arguments), + } + + if tool.tool_definition: + + key_overrides = tool_overrides.get(tool.name, {}) + meta_config = MetadataMapper.prepare_metadata_dict( + tool.metadata, + tool.tool_definition.__dict__ if hasattr(tool.tool_definition, '__dict__') else tool.tool_definition, + key_overrides + ) + if meta_config: + params["_meta"] = meta_config + logger.info("Prepared MCP tool invocation params: %s", params) + payload = { + "jsonrpc": "2.0", + "id": 2, + "method": "tools/call", + "params": params + } + return payload + +def build_mcptools_invoke_tool_request( + api_version: str, + headers: Mapping[str, str] = None, + params: Mapping[str, str] = None, + **kwargs: Any +) -> HttpRequest: + """Build the HTTP request for invoking an MCP tool. + + :param api_version: API version to use. + :type api_version: str + :param headers: Additional headers for the request. + :type headers: Mapping[str, str], optional + :param params: Query parameters for the request. + :type params: Mapping[str, str], optional + :return: Constructed HttpRequest object. + :rtype: ~azure.core.rest.HttpRequest + """ + _headers = headers or {} + _params = params or {} + _params["api-version"] = api_version + + _url = f"/mcp_tools" + return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) + +class RemoteToolsOperations: + def __init__(self, *args, **kwargs) -> None: + """Initialize Tools API client. + + :param client: Azure PipelineClient for HTTP requests. + :type client: ~azure.core.PipelineClient + :param config: Configuration object. + :type config: ~Tool_Client.models.AzureAIToolClientConfiguration + :raises ValueError: If required parameters are not provided. + """ + input_args = list(args) + self._client : PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + + if self._client is None or self._config is None: + raise ValueError("Both 'client' and 'config' must be provided") + + + # Apply agent name substitution to endpoint paths + self.agent = self._config.agent_name.strip() if self._config.agent_name and self._config.agent_name.strip() else "$default" + self._api_version = API_VERSION + + def resolve_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: + """Resolve remote tools from Azure AI Tools API. + + :return: List of tool descriptors from Tools API. + :rtype: List[FoundryTool] + """ + result = build_resolve_tools_request(self.agent, self._api_version, self._config.tool_config, self._config.user, kwargs) + if result[0] is None: + return [] + + _request, error_map, remaining_kwargs = result + response = format_and_execute_request(self._client, _request, self._config.endpoint, **remaining_kwargs) + handle_response_error(response, error_map) + return process_resolve_tools_response(response, self._config.tool_config._remote_tools, existing_names) + + def invoke_tool( + self, + tool: FoundryTool, + arguments: Mapping[str, Any], + ) -> Any: + """Invoke a remote tool. + + :param tool: Tool descriptor to invoke. + :type tool: FoundryTool + :param arguments: Input arguments for the tool. + :type arguments: Mapping[str, Any] + :return: Result of the tool invocation. + :rtype: Any + """ + _request, error_map = build_invoke_remote_tool_request(self.agent, self._api_version, tool, self._config.user, arguments) + response = format_and_execute_request(self._client, _request, self._config.endpoint) + handle_response_error(response, error_map) + return process_invoke_remote_tool_response(response) + +def prepare_remotetools_invoke_tool_request_content(tool: FoundryTool, user: UserInfo, arguments: Mapping[str, Any]) -> Any: + payload = { + "toolName": tool.name, + "arguments": dict(arguments), + "remoteServer": to_remote_server(tool.tool_definition).to_dict(), + } + if user: + # Handle both UserInfo objects and dictionaries + if isinstance(user, dict): + if user.get("objectId") and user.get("tenantId"): + payload["user"] = { + "objectId": user["objectId"], + "tenantId": user["tenantId"], + } + elif hasattr(user, "objectId") and hasattr(user, "tenantId"): + if user.objectId and user.tenantId: + payload["user"] = { + "objectId": user.objectId, + "tenantId": user.tenantId, + } + return payload + +def build_remotetools_invoke_tool_request( + agent_name: str, + api_version: str, + headers: Mapping[str, str] = None, + params: Mapping[str, str] = None, + **kwargs: Any + ) -> HttpRequest: + """Build the HTTP request for invoking a remote tool. + + :param api_version: API version to use. + :type api_version: str + :param headers: Additional headers for the request. + :type headers: Mapping[str, str], optional + :param params: Query parameters for the request. + :type params: Mapping[str, str], optional + :return: Constructed HttpRequest object. + :rtype: ~azure.core.rest.HttpRequest + """ + _headers = headers or {} + _params = params or {} + _params["api-version"] = api_version + + _url = f"/agents/{agent_name}/tools/invoke" + return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) + + +def prepare_remotetools_resolve_tools_request_content(tool_config: ToolConfigurationParser, user: UserInfo = None) -> ResolveToolsRequest: + resolve_tools_request: ResolveToolsRequest = None + if tool_config._remote_tools: + remote_servers = [] + for remote_tool in tool_config._remote_tools: + remote_servers.append(to_remote_server(remote_tool)) + resolve_tools_request = ResolveToolsRequest(remote_servers, user=user) + + return resolve_tools_request + +def build_remotetools_resolve_tools_request( + agent_name: str, + api_version: str, + headers: Mapping[str, str] = None, + params: Mapping[str, str] = None, + **kwargs: Any + ) -> HttpRequest: + """Build the HTTP request for resolving remote tools. + + :param api_version: API version to use. + :type api_version: str + :param headers: Additional headers for the request. + :type headers: Mapping[str, str], optional + :param params: Query parameters for the request. + :type params: Mapping[str, str], optional + :return: Constructed HttpRequest object. + :rtype: ~azure.core.rest.HttpRequest + """ + _headers = headers or {} + _params = params or {} + _params["api-version"] = api_version + + _url = f"/agents/{agent_name}/tools/resolve" + return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) + \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py index a38f55408c7f..820d54c6cea0 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py @@ -10,3 +10,4 @@ class CreateResponse(response_create_params.ResponseCreateParamsBase, total=False): # type: ignore agent: Optional[_azure_ai_projects_models.AgentReference] stream: Optional[bool] + tools: Optional[list[_azure_ai_projects_models.Tool]] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index c3f001245133..cd0d7ed75896 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -7,7 +7,7 @@ import os import traceback from abc import abstractmethod -from typing import Any, AsyncGenerator, Generator, Union +from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn from opentelemetry import context as otel_context, trace @@ -28,24 +28,31 @@ ) from .common.agent_run_context import AgentRunContext +from ..client.tools.aio._client import AzureAIToolClient +from ..client.tools._utils._model_base import ToolDefinition, UserInfo + logger = get_logger() DEBUG_ERRORS = os.environ.get(Constants.AGENT_DEBUG_ERRORS, "false").lower() == "true" class AgentRunContextMiddleware(BaseHTTPMiddleware): - def __init__(self, app: ASGIApp): + def __init__(self, app: ASGIApp, agent: Optional['FoundryCBAgent'] = None): super().__init__(app) + self.agent = agent async def dispatch(self, request: Request, call_next): + user_info = {} if request.url.path in ("/runs", "/responses"): try: + user_info = self.set_user_info_to_context_var(request) self.set_request_id_to_context_var(request) payload = await request.json() except Exception as e: logger.error(f"Invalid JSON payload: {e}") return JSONResponse({"error": f"Invalid JSON payload: {e}"}, status_code=400) try: - request.state.agent_run_context = AgentRunContext(payload) + agent_tools = self.agent.tools if self.agent else [] + request.state.agent_run_context = AgentRunContext(payload, user_info=user_info, agent_tools=agent_tools) self.set_run_context_to_context_var(request.state.agent_run_context) except Exception as e: logger.error(f"Context build failed: {e}.", exc_info=True) @@ -80,9 +87,32 @@ def set_run_context_to_context_var(self, run_context): ctx.update(res) request_context.set(ctx) + def set_user_info_to_context_var(self, request): + user_info: UserInfo = {} + try: + object_id_header = request.headers.get("x-aml-oid", None) + tenant_id_header = request.headers.get("x-aml-tenant-id", None) + + if object_id_header: + user_info["object_id"] = object_id_header + if tenant_id_header: + user_info["tenant_id"] = tenant_id_header + + except Exception as e: + logger.error(f"Failed to parse X-User-Info header: {e}", exc_info=True) + if user_info: + ctx = request_context.get() or {} + for key, value in user_info.items(): + ctx[f"azure.ai.agentserver.user.{key}"] = str(value) + request_context.set(ctx) + return user_info + class FoundryCBAgent: - def __init__(self): + def __init__(self, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any) -> None: + self.credentials = credentials + self.tools = kwargs.get("tools", []) + async def runs_endpoint(request): # Set up tracing context and span context = request.state.agent_run_context @@ -200,7 +230,7 @@ async def readiness_endpoint(request): allow_methods=["*"], allow_headers=["*"], ) - self.app.add_middleware(AgentRunContextMiddleware) + self.app.add_middleware(AgentRunContextMiddleware, agent=self) @self.app.on_event("startup") async def attach_appinsights_logger(): @@ -303,7 +333,17 @@ def setup_otlp_exporter(self, endpoint, provider): provider.add_span_processor(processor) logger.info(f"Tracing setup with OTLP exporter: {endpoint}") + def get_tool_client(self, tools: Optional[list[ToolDefinition]], user_info: Optional[UserInfo]) -> AzureAIToolClient: + if not self.credentials: + raise ValueError("Credentials are required to create Tool Client.") + return AzureAIToolClient( + endpoint=os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT), + credential=self.credentials, + tools = tools, + user = user_info, + ) + def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: event_data = json.dumps(event.as_dict()) if event.type: diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 2703f66f6ff2..89def295ef0c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -1,23 +1,27 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from typing import Any from ...logger import get_logger from ...models import CreateResponse from ...models.projects import AgentId, AgentReference, ResponseConversation1 from .id_generator.foundry_id_generator import FoundryIdGenerator from .id_generator.id_generator import IdGenerator +from ...client.tools.aio._client import AzureAIToolClient logger = get_logger() class AgentRunContext: - def __init__(self, payload: dict): + def __init__(self, payload: dict, **kwargs: Any) -> None: self._raw_payload = payload self._request = _deserialize_create_response(payload) self._id_generator = FoundryIdGenerator.from_request(payload) self._response_id = self._id_generator.response_id self._conversation_id = self._id_generator.conversation_id self._stream = self.request.get("stream", False) + self._user_info = kwargs.get("user_info", {}) + self._agent_tools = kwargs.get("agent_tools", []) @property def raw_payload(self) -> dict: @@ -60,13 +64,26 @@ def get_conversation_object(self) -> ResponseConversation1: return None # type: ignore return ResponseConversation1(id=self._conversation_id) - + def get_tools(self) -> list: + # request tools take precedence over agent tools + request_tools = self.request.get("tools", []) + if not request_tools: + return self._agent_tools + + return request_tools + + def get_user_info(self) -> dict: + return self._user_info def _deserialize_create_response(payload: dict) -> CreateResponse: _deserialized = CreateResponse(**payload) raw_agent_reference = payload.get("agent") if raw_agent_reference: _deserialized["agent"] = _deserialize_agent_reference(raw_agent_reference) + + tools = payload.get("tools") + if tools: + _deserialized["tools"] = [tool for tool in tools] return _deserialized diff --git a/sdk/agentserver/azure-ai-agentserver-core/cspell.json b/sdk/agentserver/azure-ai-agentserver-core/cspell.json index 126cadc0625c..17fb91b1e58f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-core/cspell.json @@ -16,7 +16,9 @@ "GETFL", "DETFL", "SETFL", - "Planifica" + "Planifica", + "mcptools", + "ainvoke" ], "ignorePaths": [ "*.csv", diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py new file mode 100644 index 000000000000..52648465e151 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py @@ -0,0 +1,108 @@ +# mypy: ignore-errors +import datetime + +from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent +from azure.ai.agentserver.core.models import Response as OpenAIResponse +from azure.ai.agentserver.core.models.projects import ( + ItemContentOutputText, + ResponseCompletedEvent, + ResponseCreatedEvent, + ResponseOutputItemAddedEvent, + ResponsesAssistantMessageItemResource, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, +) + +from azure.identity import DefaultAzureCredential + +def stream_events(text: str, context: AgentRunContext): + item_id = context.id_generator.generate_message_id() + + assembled = "" + yield ResponseCreatedEvent(response=OpenAIResponse(output=[])) + yield ResponseOutputItemAddedEvent( + output_index=0, + item=ResponsesAssistantMessageItemResource( + id=item_id, + status="in_progress", + content=[ + ItemContentOutputText( + text="", + annotations=[], + ) + ], + ), + ) + for i, token in enumerate(text.split(" ")): + piece = token if i == len(text.split(" ")) - 1 else token + " " + assembled += piece + yield ResponseTextDeltaEvent(output_index=0, content_index=0, delta=piece) + # Done with text + yield ResponseTextDoneEvent(output_index=0, content_index=0, text=assembled) + yield ResponseCompletedEvent( + response=OpenAIResponse( + metadata={}, + temperature=0.0, + top_p=0.0, + user="me", + id=context.response_id, + created_at=datetime.datetime.now(), + output=[ + ResponsesAssistantMessageItemResource( + id=item_id, + status="completed", + content=[ + ItemContentOutputText( + text=assembled, + annotations=[], + ) + ], + ) + ], + ) + ) + + +async def agent_run(context: AgentRunContext): + agent = context.request.get("agent") + print(f"agent:{agent}") + + if context.stream: + return stream_events( + "I am mock agent with no intelligence in stream mode.", context + ) + + tool = await my_agent.get_tool_client().list_tools() + tool_list = [t.name for t in tool] + # Build assistant output content + output_content = [ + ItemContentOutputText( + text="I am mock agent with no intelligence with tools " + str(tool_list), + annotations=[], + ) + ] + my_agent.get_tool_client() # just to illustrate we can access tool client from context + response = OpenAIResponse( + metadata={}, + temperature=0.0, + top_p=0.0, + user="me", + id=context.response_id, + created_at=datetime.datetime.now(), + output=[ + ResponsesAssistantMessageItemResource( + id=context.id_generator.generate_message_id(), + status="completed", + content=output_content, + ) + ], + ) + return response + +credentials = DefaultAzureCredential() + +my_agent = FoundryCBAgent(credentials=credentials) +my_agent.agent_run = agent_run + +if __name__ == "__main__": + my_agent.run() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index ed2e0d4d493a..06cfe3bd8489 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -3,19 +3,22 @@ # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Optional, Any from ._version import VERSION if TYPE_CHECKING: # pragma: no cover from . import models + from azure.core.credentials_async import AsyncTokenCredential -def from_langgraph(agent, state_converter: Optional["models.LanggraphStateConverter"] = None): +def from_langgraph(agent, credentials: Optional["AsyncTokenCredential"] = None, state_converter: Optional["models.LanggraphStateConverter"] = None, **kwargs: Any) -> "LangGraphAdapter": from .langgraph import LangGraphAdapter - return LangGraphAdapter(agent, state_converter=state_converter) + return LangGraphAdapter(agent, credentials=credentials, state_converter=state_converter, **kwargs) +from .tool_client import ToolClient -__all__ = ["from_langgraph"] + +__all__ = ["from_langgraph", "ToolClient"] __version__ = VERSION diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 0d2b60bac248..27b302e29a18 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -5,7 +5,7 @@ # mypy: disable-error-code="assignment,arg-type" import os import re -from typing import Optional +from typing import TYPE_CHECKING, Any, Awaitable, Protocol, Union, Optional, List from langchain_core.runnables import RunnableConfig from langgraph.graph.state import CompiledStateGraph @@ -20,42 +20,177 @@ LanggraphStateConverter, ) from .models.utils import is_state_schema_valid +from .tool_client import ToolClient +from langchain_core.tools import StructuredTool + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + logger = get_logger() +class GraphFactory(Protocol): + """Protocol for graph factory functions. + + A graph factory is a callable that takes a ToolClient and returns + a CompiledStateGraph, either synchronously or asynchronously. + """ + + def __call__(self, tools: List[StructuredTool]) -> Union[CompiledStateGraph, Awaitable[CompiledStateGraph]]: + """Create a CompiledStateGraph using the provided ToolClient. + + :param tools: The list of StructuredTool instances. + :type tools: List[StructuredTool] + :return: A compiled LangGraph state graph, or an awaitable that resolves to one. + :rtype: Union[CompiledStateGraph, Awaitable[CompiledStateGraph]] + """ + ... + + class LangGraphAdapter(FoundryCBAgent): """ Adapter for LangGraph Agent. """ - def __init__(self, graph: CompiledStateGraph, state_converter: Optional[LanggraphStateConverter] = None): + def __init__(self, graph: Union[CompiledStateGraph, GraphFactory], credentials: "Optional[AsyncTokenCredential]" = None, state_converter: "Optional[LanggraphStateConverter]" = None, **kwargs: Any) -> None: """ - Initialize the LangGraphAdapter with a CompiledStateGraph. + Initialize the LangGraphAdapter with a CompiledStateGraph or a function that returns one. - :param graph: The LangGraph StateGraph to adapt. - :type graph: CompiledStateGraph + :param graph: The LangGraph StateGraph to adapt, or a callable that takes ToolClient and returns CompiledStateGraph (sync or async). + :type graph: Union[CompiledStateGraph, GraphFactory] + :param credentials: Azure credentials for authentication. + :type credentials: Optional[AsyncTokenCredential] :param state_converter: custom state converter. Required if graph state is not MessagesState. :type state_converter: Optional[LanggraphStateConverter] """ - super().__init__() - self.graph = graph + super().__init__(credentials=credentials, **kwargs) + self._graph_or_factory: Union[CompiledStateGraph, GraphFactory] = graph + self._resolved_graph: "Optional[CompiledStateGraph]" = None self.azure_ai_tracer = None - if not state_converter: - if is_state_schema_valid(self.graph.builder.state_schema): - self.state_converter = LanggraphMessageStateConverter() + + # If graph is already compiled, validate and set up state converter + if isinstance(graph, CompiledStateGraph): + self._resolved_graph = graph + if not state_converter: + if is_state_schema_valid(self._resolved_graph.builder.state_schema): + self.state_converter = LanggraphMessageStateConverter() + else: + raise ValueError("state_converter is required for non-MessagesState graph.") else: - raise ValueError("state_converter is required for non-MessagesState graph.") + self.state_converter = state_converter else: + # Defer validation until graph is resolved self.state_converter = state_converter + @property + def graph(self) -> "Optional[CompiledStateGraph]": + """ + Get the resolved graph. This property provides backward compatibility. + + :return: The resolved CompiledStateGraph if available, None otherwise. + :rtype: Optional[CompiledStateGraph] + """ + return self._resolved_graph + async def agent_run(self, context: AgentRunContext): - input_data = self.state_converter.request_to_state(context) - logger.debug(f"Converted input data: {input_data}") - if not context.stream: - response = await self.agent_run_non_stream(input_data, context) - return response - return self.agent_run_astream(input_data, context) + # Resolve graph - always resolve if it's a factory function to get fresh graph each time + # For factories, get a new graph instance per request to avoid concurrency issues + tool_client = None + try: + if callable(self._graph_or_factory): + graph, tool_client = await self._resolve_graph_for_request(context) + elif self._resolved_graph is None: + await self._resolve_graph(context) + graph = self._resolved_graph + else: + graph = self._resolved_graph + + input_data = self.state_converter.request_to_state(context) + logger.debug(f"Converted input data: {input_data}") + if not context.stream: + response = await self.agent_run_non_stream(input_data, context, graph) + return response + return self.agent_run_astream(input_data, context, graph, tool_client) + finally: + # Close tool_client if it was created for this request + if tool_client is not None: + try: + await tool_client.close() + logger.debug("Closed tool_client after request processing") + except Exception as e: + logger.warning(f"Error closing tool_client: {e}") + + async def _resolve_graph(self, context: AgentRunContext): + """ + Resolve the graph if it's a factory function (for single-use/first-time resolution). + Creates a ToolClient and calls the factory function with it. + This is used for the initial resolution to set up state_converter. + """ + if callable(self._graph_or_factory): + logger.debug("Resolving graph from factory function") + + + # Create ToolClient with credentials + tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) + tool_client_wrapper = ToolClient(tool_client) + tools = await tool_client_wrapper.list_tools() + # Call the factory function with ToolClient + # Support both sync and async factories + import inspect + result = self._graph_or_factory(tools) + if inspect.iscoroutine(result): + self._resolved_graph = await result + else: + self._resolved_graph = result + + # Validate and set up state converter if not already set from initialization + if not self.state_converter: + if is_state_schema_valid(self._resolved_graph.builder.state_schema): + self.state_converter = LanggraphMessageStateConverter() + else: + raise ValueError("state_converter is required for non-MessagesState graph.") + + logger.debug("Graph resolved successfully") + else: + # Should not reach here, but just in case + self._resolved_graph = self._graph_or_factory + + async def _resolve_graph_for_request(self, context: AgentRunContext): + """ + Resolve a fresh graph instance for a single request to avoid concurrency issues. + Creates a ToolClient and calls the factory function with it. + This method returns a new graph instance and the tool_client for cleanup. + + :param context: The context for the agent run. + :type context: AgentRunContext + :return: A tuple of (compiled graph instance, tool_client wrapper). + :rtype: tuple[CompiledStateGraph, ToolClient] + """ + logger.debug("Resolving fresh graph from factory function for request") + + # Create ToolClient with credentials + tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) + tool_client_wrapper = ToolClient(tool_client) + tools = await tool_client_wrapper.list_tools() + # Call the factory function with ToolClient + # Support both sync and async factories + import inspect + result = self._graph_or_factory(tools) + if inspect.iscoroutine(result): + graph = await result + else: + graph = result + + # Ensure state converter is set up (use existing one or create new) + if not self.state_converter: + if is_state_schema_valid(graph.builder.state_schema): + self.state_converter = LanggraphMessageStateConverter() + else: + raise ValueError("state_converter is required for non-MessagesState graph.") + + logger.debug("Fresh graph resolved successfully for request") + return graph, tool_client_wrapper def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=None): # set env vars for langsmith @@ -85,7 +220,7 @@ def get_trace_attributes(self): attrs["service.namespace"] = "azure.ai.agentserver.langgraph" return attrs - async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext): + async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext, graph: CompiledStateGraph): """ Run the agent with non-streaming response. @@ -93,6 +228,8 @@ async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext) :type input_data: dict :param context: The context for the agent run. :type context: AgentRunContext + :param graph: The compiled graph instance to use for this request. + :type graph: CompiledStateGraph :return: The response of the agent run. :rtype: dict @@ -101,14 +238,14 @@ async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext) try: config = self.create_runnable_config(context) stream_mode = self.state_converter.get_stream_mode(context) - result = await self.graph.ainvoke(input_data, config=config, stream_mode=stream_mode) + result = await graph.ainvoke(input_data, config=config, stream_mode=stream_mode) output = self.state_converter.state_to_response(result, context) return output except Exception as e: logger.error(f"Error during agent run: {e}") raise e - async def agent_run_astream(self, input_data: dict, context: AgentRunContext): + async def agent_run_astream(self, input_data: dict, context: AgentRunContext, graph: CompiledStateGraph, tool_client: "Optional[ToolClient]" = None): """ Run the agent with streaming response. @@ -116,6 +253,10 @@ async def agent_run_astream(self, input_data: dict, context: AgentRunContext): :type input_data: dict :param context: The context for the agent run. :type context: AgentRunContext + :param graph: The compiled graph instance to use for this request. + :type graph: CompiledStateGraph + :param tool_client: Optional ToolClient to close after streaming completes. + :type tool_client: Optional[ToolClient] :return: An async generator yielding the response stream events. :rtype: AsyncGenerator[dict] @@ -124,12 +265,20 @@ async def agent_run_astream(self, input_data: dict, context: AgentRunContext): logger.info(f"Starting streaming agent run {context.response_id}") config = self.create_runnable_config(context) stream_mode = self.state_converter.get_stream_mode(context) - stream = self.graph.astream(input=input_data, config=config, stream_mode=stream_mode) + stream = graph.astream(input=input_data, config=config, stream_mode=stream_mode) async for result in self.state_converter.state_to_response_stream(stream, context): yield result except Exception as e: logger.error(f"Error during streaming agent run: {e}") raise e + finally: + # Close tool_client if provided + if tool_client is not None: + try: + await tool_client._tool_client.close() + logger.debug("Closed tool_client after streaming completed") + except Exception as e: + logger.warning(f"Error closing tool_client in stream: {e}") def create_runnable_config(self, context: AgentRunContext) -> RunnableConfig: """ diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py new file mode 100644 index 000000000000..5a5b75c13a03 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py @@ -0,0 +1,211 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Tool client for integrating AzureAIToolClient with LangGraph.""" + +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from langchain_core.tools import StructuredTool +from pydantic import BaseModel, Field, create_model + +if TYPE_CHECKING: + from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient, FoundryTool + + +class ToolClient: + """Client that integrates AzureAIToolClient with LangGraph. + + This class provides methods to list tools from AzureAIToolClient and convert them + to LangChain BaseTool format, as well as invoke tools in a format compatible with + LangGraph's create_react_agent and StateGraph. + + :param tool_client: The AzureAIToolClient instance to use for tool operations. + :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient + + .. admonition:: Example: + + .. code-block:: python + + from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient + from azure.ai.agentserver.langgraph import ToolClient + from azure.identity.aio import DefaultAzureCredential + + async with DefaultAzureCredential() as credential: + tool_client = AzureAIToolClient( + endpoint="https://", + credential=credential + ) + + client = ToolClient(tool_client) + + # List tools as LangChain BaseTool instances + tools = await client.list_tools() + + # Use with create_react_agent + from langgraph.prebuilt import create_react_agent + from langchain_openai import AzureChatOpenAI + + model = AzureChatOpenAI(model="gpt-4o") + agent = create_react_agent(model, tools) + + # Invoke a tool directly + result = await client.invoke_tool( + tool_name="my_tool", + tool_input={"param": "value"} + ) + + :meta private: + """ + + def __init__(self, tool_client: "AzureAIToolClient") -> None: + """Initialize the ToolClient. + + :param tool_client: The AzureAIToolClient instance to use for tool operations. + :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient + """ + self._tool_client = tool_client + self._langchain_tools_cache: List[StructuredTool] = None + + async def list_tools(self) -> List[StructuredTool]: + """List all available tools as LangChain BaseTool instances. + + Retrieves tools from AzureAIToolClient and converts them to LangChain + StructuredTool instances that can be used with LangGraph's create_react_agent + or StateGraph. + + :return: List of LangChain StructuredTool instances. + :rtype: List[~langchain_core.tools.StructuredTool] + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + .. admonition:: Example: + + .. code-block:: python + + client = ToolClient(tool_client) + tools = await client.list_tools() + + # Use with create_react_agent + agent = create_react_agent(model, tools) + """ + # Get tools from AzureAIToolClient + if self._langchain_tools_cache is not None: + return self._langchain_tools_cache + + azure_tools = await self._tool_client.list_tools() + self._langchain_tools_cache = [] + # Convert to LangChain StructuredTool instances + for azure_tool in azure_tools: + langchain_tool = self._convert_to_langchain_tool(azure_tool) + self._langchain_tools_cache.append(langchain_tool) + + return self._langchain_tools_cache + + def _convert_to_langchain_tool(self, azure_tool: "FoundryTool") -> StructuredTool: + """Convert an AzureAITool to a LangChain StructuredTool. + + :param azure_tool: The AzureAITool to convert. + :type azure_tool: ~azure.ai.agentserver.core.client.tools.aio.AzureAITool + :return: A LangChain StructuredTool instance. + :rtype: ~langchain_core.tools.StructuredTool + """ + # Get the input schema from the tool descriptor + input_schema = azure_tool.input_schema or {} + + # Create a Pydantic model for the tool's input schema + args_schema = self._create_pydantic_model( + tool_name=azure_tool.name, + schema=input_schema + ) + + # Create an async function that invokes the tool + async def tool_func(**kwargs: Any) -> str: + """Invoke the Azure AI tool.""" + result = await azure_tool(**kwargs) + # Convert result to string for LangChain compatibility + if isinstance(result, dict): + import json + return json.dumps(result) + return str(result) + + # Create a StructuredTool with the async function + structured_tool = StructuredTool( + name=azure_tool.name, + description=azure_tool.description or "No description available", + coroutine=tool_func, + args_schema=args_schema, + ) + + return structured_tool + + def _create_pydantic_model( + self, + tool_name: str, + schema: Dict[str, Any] + ) -> type[BaseModel]: + """Create a Pydantic model from a JSON schema. + + :param tool_name: Name of the tool (used for model name). + :type tool_name: str + :param schema: JSON schema for the tool's input parameters. + :type schema: Dict[str, Any] + :return: A Pydantic model class. + :rtype: type[BaseModel] + """ + # Get properties from schema + properties = schema.get("properties", {}) + required_fields = schema.get("required", []) + + # Build field definitions for Pydantic model + field_definitions = {} + for prop_name, prop_schema in properties.items(): + prop_type = self._json_type_to_python_type(prop_schema.get("type", "string")) + prop_description = prop_schema.get("description", "") + + # Determine if field is required + is_required = prop_name in required_fields + + if is_required: + field_definitions[prop_name] = ( + prop_type, + Field(..., description=prop_description) + ) + else: + field_definitions[prop_name] = ( + Optional[prop_type], + Field(None, description=prop_description) + ) + + # Create the model dynamically + model_name = f"{tool_name.replace('-', '_').replace(' ', '_').title()}Input" + return create_model(model_name, **field_definitions) + + def _json_type_to_python_type(self, json_type: str) -> type: + """Convert JSON schema type to Python type. + + :param json_type: JSON schema type string. + :type json_type: str + :return: Corresponding Python type. + :rtype: type + """ + type_mapping = { + "string": str, + "integer": int, + "number": float, + "boolean": bool, + "array": list, + "object": dict, + } + return type_mapping.get(json_type, str) + + async def close(self) -> None: + await self._tool_client.close() + + async def __aenter__(self) -> "ToolClient": + """Async context manager entry.""" + return self + + async def __aexit__(self, *exc_details: Any) -> None: + """Async context manager exit.""" + # The tool_client lifecycle is managed externally + pass diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json b/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json index 470408fb66cc..1ea68a37f8d5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json @@ -5,7 +5,8 @@ "mslearn", "envtemplate", "ainvoke", - "asetup" + "asetup", + "mcptools" ], "ignorePaths": [ "*.csv", diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py new file mode 100644 index 000000000000..4b95f4d98801 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py @@ -0,0 +1,128 @@ +# Copyright (c) Microsoft. All rights reserved. +"""Example showing how to use a graph factory function with ToolClient. + +This sample demonstrates how to pass a factory function to LangGraphAdapter +that receives a ToolClient and returns a CompiledStateGraph. This pattern +allows the graph to be created dynamically with access to tools from +Azure AI Tool Client at runtime. +""" + +import asyncio +import os +from typing import List +from dotenv import load_dotenv +from importlib.metadata import version +from langchain_openai import AzureChatOpenAI +from langgraph.checkpoint.memory import MemorySaver +from langgraph.graph.state import CompiledStateGraph +from langchain_core.tools import StructuredTool + +from azure.ai.agentserver.langgraph import from_langgraph +from azure.identity.aio import DefaultAzureCredential + +load_dotenv() + + +def create_agent(model, tools, checkpointer): + """Create a LangGraph agent based on the version.""" + # for different langgraph versions + langgraph_version = version("langgraph") + if langgraph_version < "1.0.0": + from langgraph.prebuilt import create_react_agent + + return create_react_agent(model, tools, checkpointer=checkpointer) + else: + from langchain.agents import create_agent + + return create_agent(model, tools, checkpointer=checkpointer) + + +def create_graph_factory(): + """Create a factory function that builds a graph with ToolClient. + + This function returns a factory that takes a ToolClient and returns + a CompiledStateGraph. The graph is created at runtime for every request, + allowing it to access the latest tool configuration dynamically. + """ + + async def graph_factory(tools: List[StructuredTool]) -> CompiledStateGraph: + """Factory function that creates a graph using the provided tools. + + :param tools: The list of StructuredTool instances. + :type tools: List[StructuredTool] + :return: A compiled LangGraph state graph. + :rtype: CompiledStateGraph + """ + # Get configuration from environment + deployment_name = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "gpt-4o") + + # List all available tools from the ToolClient + print(f"Found {len(tools)} tools:") + for tool in tools: + print(f" - {tool.name}: {tool.description}") + + if not tools: + print("\nNo tools found!") + print("Make sure your Azure AI project has tools configured.") + raise ValueError("No tools available to create agent") + + # Create the language model + model = AzureChatOpenAI(model=deployment_name) + + # Create a memory checkpointer for conversation history + memory = MemorySaver() + + # Create the LangGraph agent with the tools + print("\nCreating LangGraph agent with tools from factory...") + agent = create_agent(model, tools, memory) + + print("Agent created successfully!") + return agent + + return graph_factory + + +async def quickstart(): + """Build and return a LangGraphAdapter using a graph factory function.""" + + # Get configuration from environment + project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") + + if not project_endpoint: + raise ValueError( + "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " + "Set it to your Azure AI project endpoint, e.g., " + "https://.services.ai.azure.com/api/projects/" + ) + + # Create Azure credentials + credential = DefaultAzureCredential() + + # Create a factory function that will build the graph at runtime + # The factory will receive a ToolClient when the agent first runs + graph_factory = create_graph_factory() + + # Pass the factory function to from_langgraph instead of a compiled graph + # The graph will be created on every agent run with access to ToolClient + print("Creating LangGraph adapter with factory function...") + # Get project connection ID from environment + tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") + + adapter = from_langgraph(graph_factory, credentials=credential, tools=[{"type": "mcp", "project_connection_id": tool_connection_id}]) + + print("Adapter created! Graph will be built on every request.") + return adapter + + +async def main(): # pragma: no cover - sample entrypoint + """Main function to run the agent.""" + adapter = await quickstart() + + if adapter: + print("\nStarting agent server...") + print("The graph factory will be called for every request that arrives.") + await adapter.run_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py new file mode 100644 index 000000000000..f77a0b31b1d5 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft. All rights reserved. +"""Enhanced MCP example using ToolClient with AzureAIToolClient. + +This sample demonstrates how to use the ToolClient to integrate Azure AI +Tool Client (which supports both MCP tools and Azure AI Tools API) with +LangGraph's create_react_agent. +""" + +import asyncio +import os + +from dotenv import load_dotenv +from importlib.metadata import version +from langchain_openai import AzureChatOpenAI +from langgraph.checkpoint.memory import MemorySaver + +from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient +from azure.ai.agentserver.langgraph import ToolClient, from_langgraph +from azure.identity.aio import DefaultAzureCredential + +load_dotenv() + + +def create_agent(model, tools, checkpointer): + """Create a LangGraph agent based on available imports.""" + try: + from langgraph.prebuilt import create_react_agent + return create_react_agent(model, tools, checkpointer=checkpointer) + except ImportError: + from langchain.agents import create_agent + return create_agent(model, tools, checkpointer=checkpointer) + + +async def quickstart(): + """Build and return a LangGraph agent wired to Azure AI Tool Client.""" + + # Get configuration from environment + project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") + deployment_name = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "gpt-4o") + + if not project_endpoint: + raise ValueError( + "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " + "Set it to your Azure AI project endpoint, e.g., " + "https://.services.ai.azure.com/api/projects/" + ) + + # Create Azure credentials + credential = DefaultAzureCredential() + tool_definitions = [ + { + "type": "mcp", + "project_connection_id": "" + }, + { + "type": "code_interpreter", + } + ] + # Create the AzureAIToolClient + # This client supports both MCP tools and Azure AI Tools API + tool_client = AzureAIToolClient( + endpoint=project_endpoint, + credential=credential, + tools=tool_definitions + ) + + # Create the ToolClient + client = ToolClient(tool_client) + + # List all available tools and convert to LangChain format + print("Fetching tools from Azure AI Tool Client...") + tools = await client.list_tools() + print(f"Found {len(tools)} tools:") + for tool in tools: + print(f" - {tool.name}: {tool.description}") + + if not tools: + print("\nNo tools found!") + print("Make sure your Azure AI project has tools configured.") + print("This can include:") + print(" - MCP (Model Context Protocol) servers") + print(" - Foundry AI Tools") + return None + + # Create the language model + model = AzureChatOpenAI(model=deployment_name) + + # Create a memory checkpointer for conversation history + memory = MemorySaver() + + # Create the LangGraph agent with the tools + print("\nCreating LangGraph agent with tools...") + agent = create_agent(model, tools, memory) + + print("Agent created successfully!") + return agent + + +async def main(): # pragma: no cover - sample entrypoint + """Main function to run the agent.""" + agent = await quickstart() + + if agent: + print("\nStarting agent server...") + await from_langgraph(agent).run_async() + + +if __name__ == "__main__": + asyncio.run(main()) From b7d7bea2296a0009dda63237f836ecbbcd246a89 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 12 Nov 2025 23:33:30 -0800 Subject: [PATCH 12/94] update changelog and version --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 11 +++++++++++ .../azure/ai/agentserver/agentframework/_version.py | 2 +- .../azure-ai-agentserver-core/CHANGELOG.md | 11 +++++++++++ .../azure/ai/agentserver/core/_version.py | 2 +- .../azure-ai-agentserver-langgraph/CHANGELOG.md | 11 +++++++++++ .../azure/ai/agentserver/langgraph/_version.py | 2 +- 6 files changed, 36 insertions(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index c22ea4418361..15d90e5660ab 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,6 +1,17 @@ # Release History +## 1.0.0b4 (2025-11-13) + +### Feature Added + +- Adapters support tools + +### Bugs Fixed + +- Pin azure-ai-projects and azure-ai-agents version to avoid version confliction + + ## 1.0.0b3 (2025-11-11) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index c43fdbc2e239..22553b18fb7e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b3" +VERSION = "1.0.0b4" diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index c22ea4418361..15d90e5660ab 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,6 +1,17 @@ # Release History +## 1.0.0b4 (2025-11-13) + +### Feature Added + +- Adapters support tools + +### Bugs Fixed + +- Pin azure-ai-projects and azure-ai-agents version to avoid version confliction + + ## 1.0.0b3 (2025-11-11) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index c43fdbc2e239..22553b18fb7e 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b3" +VERSION = "1.0.0b4" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index c22ea4418361..15d90e5660ab 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,6 +1,17 @@ # Release History +## 1.0.0b4 (2025-11-13) + +### Feature Added + +- Adapters support tools + +### Bugs Fixed + +- Pin azure-ai-projects and azure-ai-agents version to avoid version confliction + + ## 1.0.0b3 (2025-11-11) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index c43fdbc2e239..22553b18fb7e 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b3" +VERSION = "1.0.0b4" From 52a9256153a65007fdeb83f853a9b1c83ce040f9 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 00:13:25 -0800 Subject: [PATCH 13/94] fix cspell --- .../azure-ai-agentserver-agentframework/cspell.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json b/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json index 116acbc87af3..48c11927e406 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json @@ -5,7 +5,8 @@ "mslearn", "envtemplate", "pysort", - "redef" + "redef", + "aifunction" ], "ignorePaths": [ "*.csv", From ba4e1fccd8354a7e51565a8f206a5cd43f112c32 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 01:22:14 -0800 Subject: [PATCH 14/94] fix pylint and mypy for -core --- .../core/client/tools/_utils/_model_base.py | 1 + .../ai/agentserver/core/client/tools/aio/_client.py | 3 +++ .../core/client/tools/aio/operations/_operations.py | 5 ++++- .../core/client/tools/operations/_operations.py | 1 + .../azure/ai/agentserver/core/server/base.py | 7 +++++-- .../core/server/common/agent_run_context.py | 10 +++++----- 6 files changed, 19 insertions(+), 8 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py index d68c2ae28744..1bbdb6e4172c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py @@ -2,6 +2,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# mypy: ignore-errors from dataclasses import dataclass, asdict, is_dataclass from typing import Any, Dict, Iterable, List, Mapping, MutableMapping, Optional, Set, Tuple diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py index 93f550448b5a..8fd092bab5f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -1,3 +1,6 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- from typing import Any, List, Mapping, Union, TYPE_CHECKING diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py index f99646d5fb8b..e55be880fb6a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py @@ -1,4 +1,7 @@ - +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +# mypy: ignore-errors import json from typing import Any, Dict, List, Mapping, MutableMapping diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py index e05e1e84e708..9f6e0eb20e8c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# mypy: ignore-errors import json import logging diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index cd0d7ed75896..e1d2531ea22a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -2,6 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=broad-exception-caught,unused-argument,logging-fstring-interpolation,too-many-statements,too-many-return-statements +# mypy: disable-error-code="name-defined,annotation-unchecked,arg-type" import inspect import json import os @@ -333,7 +334,9 @@ def setup_otlp_exporter(self, endpoint, provider): provider.add_span_processor(processor) logger.info(f"Tracing setup with OTLP exporter: {endpoint}") - def get_tool_client(self, tools: Optional[list[ToolDefinition]], user_info: Optional[UserInfo]) -> AzureAIToolClient: + def get_tool_client( + self, tools: Optional[list[ToolDefinition]], user_info: Optional[UserInfo] + ) -> AzureAIToolClient: if not self.credentials: raise ValueError("Credentials are required to create Tool Client.") return AzureAIToolClient( @@ -343,7 +346,7 @@ def get_tool_client(self, tools: Optional[list[ToolDefinition]], user_info: Opti user = user_info, ) - + def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: event_data = json.dumps(event.as_dict()) if event.type: diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 89def295ef0c..5188476b8339 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -7,7 +7,6 @@ from ...models.projects import AgentId, AgentReference, ResponseConversation1 from .id_generator.foundry_id_generator import FoundryIdGenerator from .id_generator.id_generator import IdGenerator -from ...client.tools.aio._client import AzureAIToolClient logger = get_logger() @@ -69,21 +68,22 @@ def get_tools(self) -> list: request_tools = self.request.get("tools", []) if not request_tools: return self._agent_tools - return request_tools - + def get_user_info(self) -> dict: return self._user_info + + def _deserialize_create_response(payload: dict) -> CreateResponse: _deserialized = CreateResponse(**payload) raw_agent_reference = payload.get("agent") if raw_agent_reference: _deserialized["agent"] = _deserialize_agent_reference(raw_agent_reference) - + tools = payload.get("tools") if tools: - _deserialized["tools"] = [tool for tool in tools] + _deserialized["tools"] = [tool for tool in tools] # pylint: disable=unnecessary-comprehension return _deserialized From 661ecb31b4b4fe2f5e115dd61ef7ba715cd55336 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 10:00:47 -0800 Subject: [PATCH 15/94] fix agents sdk version --- sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 1c6c37e19e23..ad882b2ab596 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -21,7 +21,7 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-monitor-opentelemetry>=1.5.0", "azure-ai-projects==1.1.0b4", - "azure-ai-agents==1.2.0b6", + "azure-ai-agents==1.2.0b5", "azure-core>=1.35.0", "azure-identity", "openai>=1.80.0", From 8ba9f1b5ec11daa4206e2b51d4604f06a680ee94 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty <1634042+ganeshyb@users.noreply.github.com> Date: Thu, 13 Nov 2025 13:05:54 -0800 Subject: [PATCH 16/94] pylint fixes (#44010) --- .../ai/agentserver/agentframework/__init__.py | 10 +-- .../agentframework/agent_framework.py | 43 +++++++------ .../agentserver/agentframework/tool_client.py | 60 +++++++++++------- .../core/client/tools/_utils/_model_base.py | 5 +- .../client/tools/operations/_operations.py | 8 +-- .../azure/ai/agentserver/core/server/base.py | 1 + .../ai/agentserver/langgraph/__init__.py | 10 ++- .../ai/agentserver/langgraph/langgraph.py | 50 +++++++++------ .../ai/agentserver/langgraph/tool_client.py | 61 +++++++++++-------- 9 files changed, 151 insertions(+), 97 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index aa03a264339c..2b987cdcf3f5 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -5,20 +5,20 @@ from typing import TYPE_CHECKING, Optional, Any -from ._version import VERSION from .agent_framework import AgentFrameworkCBAgent +from .tool_client import ToolClient +from ._version import VERSION if TYPE_CHECKING: # pragma: no cover from azure.core.credentials_async import AsyncTokenCredential -def from_agent_framework(agent, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any) -> "AgentFrameworkCBAgent": - from .agent_framework import AgentFrameworkCBAgent +def from_agent_framework(agent, + credentials: Optional["AsyncTokenCredential"] = None, + **kwargs: Any) -> "AgentFrameworkCBAgent": return AgentFrameworkCBAgent(agent, credentials=credentials, **kwargs) -from .tool_client import ToolClient - __all__ = ["from_agent_framework", "ToolClient"] __version__ = VERSION diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 50cb09fd66f7..77f3a4b1ce85 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -9,7 +9,7 @@ from typing import TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Optional, Protocol, Union, List import inspect -from agent_framework import AgentProtocol +from agent_framework import AgentProtocol, AIFunction from agent_framework.azure import AzureAIAgentClient # pylint: disable=no-name-in-module from opentelemetry import trace @@ -28,7 +28,6 @@ from .models.agent_framework_output_non_streaming_converter import ( AgentFrameworkOutputNonStreamingConverter, ) -from agent_framework import AIFunction from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter from .models.constants import Constants from .tool_client import ToolClient @@ -45,7 +44,7 @@ class AgentFactory(Protocol): An agent factory is a callable that takes a ToolClient and returns an AgentProtocol, either synchronously or asynchronously. """ - + def __call__(self, tools: List[AIFunction]) -> Union[AgentProtocol, Awaitable[AgentProtocol]]: """Create an AgentProtocol using the provided ToolClient. @@ -74,18 +73,20 @@ class AgentFrameworkCBAgent(FoundryCBAgent): - Supports both streaming and non-streaming responses based on the `stream` flag. """ - def __init__(self, agent: Union[AgentProtocol, AgentFactory], credentials: "Optional[AsyncTokenCredential]" = None, **kwargs: Any): + def __init__(self, agent: Union[AgentProtocol, AgentFactory], + credentials: "Optional[AsyncTokenCredential]" = None, + **kwargs: Any): """Initialize the AgentFrameworkCBAgent with an AgentProtocol or a factory function. - :param agent: The Agent Framework agent to adapt, or a callable that takes ToolClient and returns AgentProtocol (sync or async). + :param agent: The Agent Framework agent to adapt, or a callable that takes ToolClient + and returns AgentProtocol (sync or async). :type agent: Union[AgentProtocol, AgentFactory] :param credentials: Azure credentials for authentication. :type credentials: Optional[AsyncTokenCredential] """ - super().__init__(credentials=credentials, **kwargs) + super().__init__(credentials=credentials, **kwargs) # pylint: disable=unexpected-keyword-arg self._agent_or_factory: Union[AgentProtocol, AgentFactory] = agent self._resolved_agent: "Optional[AgentProtocol]" = None - # If agent is already instantiated, use it directly if isinstance(agent, AgentProtocol): self._resolved_agent = agent @@ -126,21 +127,24 @@ async def _resolve_agent(self, context: AgentRunContext): """Resolve the agent if it's a factory function (for single-use/first-time resolution). Creates a ToolClient and calls the factory function with it. This is used for the initial resolution. + + :param context: The agent run context containing tools and user information. + :type context: AgentRunContext """ if callable(self._agent_or_factory): logger.debug("Resolving agent from factory function") - + # Create ToolClient with credentials - tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) + tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) # pylint: disable=no-member tool_client_wrapper = ToolClient(tool_client) tools = await tool_client_wrapper.list_tools() - + result = self._agent_or_factory(tools) if inspect.iscoroutine(result): self._resolved_agent = await result else: self._resolved_agent = result - + logger.debug("Agent resolved successfully") else: # Should not reach here, but just in case @@ -149,19 +153,18 @@ async def _resolve_agent(self, context: AgentRunContext): async def _resolve_agent_for_request(self, context: AgentRunContext): logger.debug("Resolving fresh agent from factory function for request") - + # Create ToolClient with credentials - tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) + tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) # pylint: disable=no-member tool_client_wrapper = ToolClient(tool_client) tools = await tool_client_wrapper.list_tools() - - import inspect + result = self._agent_or_factory(tools) if inspect.iscoroutine(result): agent = await result else: agent = result - + logger.debug("Fresh agent resolved successfully for request") return agent, tool_client_wrapper @@ -184,7 +187,7 @@ def init_tracing(self): agent_client.setup_azure_ai_observability() self.tracer = trace.get_tracer(__name__) - async def agent_run( + async def agent_run( # pylint: disable=too-many-statements self, context: AgentRunContext ) -> Union[ OpenAIResponse, @@ -201,7 +204,7 @@ async def agent_run( agent = self._resolved_agent else: agent = self._resolved_agent - + logger.info(f"Starting agent_run with stream={context.stream}") request_input = context.request.get("input") @@ -248,7 +251,7 @@ async def stream_updates(): try: await tool_client.close() logger.debug("Closed tool_client after streaming completed") - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught logger.warning(f"Error closing tool_client in stream: {e}") return stream_updates() @@ -267,5 +270,5 @@ async def stream_updates(): try: await tool_client.close() logger.debug("Closed tool_client after request processing") - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught logger.warning(f"Error closing tool_client: {e}") diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py index e06df0df3026..0049b3982b1c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py @@ -3,13 +3,16 @@ # --------------------------------------------------------- """Tool client for integrating AzureAIToolClient with Agent Framework.""" -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Dict, List, Optional from agent_framework import AIFunction -from pydantic import BaseModel, Field, create_model - +from pydantic import Field, create_model +from azure.ai.agentserver.core.logger import get_logger if TYPE_CHECKING: from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient, FoundryTool +logger = get_logger() + +# pylint: disable=client-accepts-api-version-keyword,missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs class ToolClient: """Client that integrates AzureAIToolClient with Agent Framework. @@ -46,7 +49,7 @@ class ToolClient: :meta private: """ - + def __init__(self, tool_client: "AzureAIToolClient") -> None: """Initialize the ToolClient. @@ -55,7 +58,7 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: """ self._tool_client = tool_client self._aifunction_cache: List[AIFunction] = None - + async def list_tools(self) -> List[AIFunction]: """List all available tools as Agent Framework tool definitions. @@ -77,7 +80,7 @@ async def list_tools(self) -> List[AIFunction]: # Get tools from AzureAIToolClient if self._aifunction_cache is not None: return self._aifunction_cache - + azure_tools = await self._tool_client.list_tools() self._aifunction_cache = [] @@ -98,34 +101,40 @@ def _convert_to_agent_framework_tool(self, azure_tool: "FoundryTool") -> AIFunct """ # Get the input schema from the tool descriptor input_schema = azure_tool.input_schema or {} - + # Create a Pydantic model from the input schema properties = input_schema.get("properties", {}) required_fields = set(input_schema.get("required", [])) - + # Build field definitions for the Pydantic model field_definitions: Dict[str, Any] = {} for field_name, field_info in properties.items(): field_type = self._json_schema_type_to_python(field_info.get("type", "string")) field_description = field_info.get("description", "") is_required = field_name in required_fields - + if is_required: field_definitions[field_name] = (field_type, Field(description=field_description)) else: - field_definitions[field_name] = (Optional[field_type], Field(default=None, description=field_description)) - + field_definitions[field_name] = (Optional[field_type], + Field(default=None, description=field_description)) + # Create the Pydantic model dynamically input_model = create_model( f"{azure_tool.name}_input", **field_definitions ) - + # Create a wrapper function that calls the Azure tool async def tool_func(**kwargs: Any) -> Any: - """Dynamically generated function to invoke the Azure AI tool.""" - return await self.invoke_tool(azure_tool.name, kwargs) - + """Dynamically generated function to invoke the Azure AI tool. + + :return: The result from the tool invocation. + :rtype: Any + """ + logger.debug("Invoking tool: %s with input: %s", azure_tool.name, kwargs) + return await azure_tool.ainvoke(kwargs) + # Create and return the AIFunction return AIFunction( name=azure_tool.name, @@ -133,7 +142,7 @@ async def tool_func(**kwargs: Any) -> Any: func=tool_func, input_model=input_model ) - + def _json_schema_type_to_python(self, json_type: str) -> type: """Convert JSON schema type to Python type. @@ -151,14 +160,23 @@ def _json_schema_type_to_python(self, json_type: str) -> type: "object": dict, } return type_map.get(json_type, str) - + async def close(self) -> None: + """Close the tool client and release resources.""" await self._tool_client.close() - + async def __aenter__(self) -> "ToolClient": - """Async context manager entry.""" + """Async context manager entry. + + :return: The ToolClient instance. + :rtype: ToolClient + """ return self - + async def __aexit__(self, *exc_details: Any) -> None: - """Async context manager exit.""" + """Async context manager exit. + + :param exc_details: Exception details if an exception occurred. + :type exc_details: Any + """ await self.close() diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py index 1bbdb6e4172c..e06ef576264e 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py @@ -751,8 +751,11 @@ def from_dict(cls, data: Mapping[str, Any], tool_definitions: List[ToolDefinitio result_data = data.get("result", {}) tools_list = [] tool_definitions_map = {f"{td.type.lower()}": td for td in tool_definitions} - + filter_tools = len(tool_definitions_map) > 0 for tool_data in result_data.get("tools", []): + + if filter_tools and tool_data["name"].lower() not in tool_definitions_map: + continue # Parse inputSchema input_schema_data = tool_data.get("inputSchema", {}) input_schema = MCPToolSchema( diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py index 9f6e0eb20e8c..b54d2d7f6538 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py @@ -4,8 +4,7 @@ # mypy: ignore-errors import json -import logging -from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union +from typing import Any, Dict, List, Mapping, MutableMapping, Tuple, Union from azure.core import PipelineClient from .._configuration import AzureAIToolClientConfiguration from .._model_base import FoundryTool, ToolSource, UserInfo @@ -26,7 +25,6 @@ map_error, ) -logger = logging.getLogger(__name__) # Shared constants API_VERSION = "2025-11-15-preview" @@ -192,7 +190,7 @@ def build_invoke_mcp_tool_request( _params = {} _content = prepare_mcptools_invoke_tool_request_content(tool, arguments, TOOL_PROPERTY_OVERRIDES) - logger.info("Invoking MCP tool: %s with arguments: %s", tool.name, dict(arguments)) + content = json.dumps(_content) _request = build_mcptools_invoke_tool_request(api_version=api_version, headers=_headers, params=_params, content=content) @@ -370,7 +368,7 @@ def prepare_mcptools_invoke_tool_request_content(tool: FoundryTool, arguments: M ) if meta_config: params["_meta"] = meta_config - logger.info("Prepared MCP tool invocation params: %s", params) + payload = { "jsonrpc": "2.0", "id": 2, diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index e1d2531ea22a..1d1ba9e2eb3c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -337,6 +337,7 @@ def setup_otlp_exporter(self, endpoint, provider): def get_tool_client( self, tools: Optional[list[ToolDefinition]], user_info: Optional[UserInfo] ) -> AzureAIToolClient: + logger.debug("Creating AzureAIToolClient with tools: %s", tools) if not self.credentials: raise ValueError("Credentials are required to create Tool Client.") return AzureAIToolClient( diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index 06cfe3bd8489..4ad1719ba56b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -6,19 +6,23 @@ from typing import TYPE_CHECKING, Optional, Any from ._version import VERSION +from .tool_client import ToolClient if TYPE_CHECKING: # pragma: no cover from . import models from azure.core.credentials_async import AsyncTokenCredential -def from_langgraph(agent, credentials: Optional["AsyncTokenCredential"] = None, state_converter: Optional["models.LanggraphStateConverter"] = None, **kwargs: Any) -> "LangGraphAdapter": +def from_langgraph( + agent, + credentials: Optional["AsyncTokenCredential"] = None, + state_converter: Optional["models.LanggraphStateConverter"] = None, + **kwargs: Any +) -> "LangGraphAdapter": from .langgraph import LangGraphAdapter return LangGraphAdapter(agent, credentials=credentials, state_converter=state_converter, **kwargs) -from .tool_client import ToolClient - __all__ = ["from_langgraph", "ToolClient"] __version__ = VERSION diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 27b302e29a18..6aac565660dd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -8,6 +8,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Protocol, Union, Optional, List from langchain_core.runnables import RunnableConfig +from langchain_core.tools import StructuredTool from langgraph.graph.state import CompiledStateGraph from azure.ai.agentserver.core.constants import Constants @@ -22,11 +23,9 @@ from .models.utils import is_state_schema_valid from .tool_client import ToolClient -from langchain_core.tools import StructuredTool - if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential - + logger = get_logger() @@ -53,11 +52,18 @@ class LangGraphAdapter(FoundryCBAgent): Adapter for LangGraph Agent. """ - def __init__(self, graph: Union[CompiledStateGraph, GraphFactory], credentials: "Optional[AsyncTokenCredential]" = None, state_converter: "Optional[LanggraphStateConverter]" = None, **kwargs: Any) -> None: + def __init__( + self, + graph: Union[CompiledStateGraph, GraphFactory], + credentials: "Optional[AsyncTokenCredential]" = None, + state_converter: "Optional[LanggraphStateConverter]" = None, + **kwargs: Any + ) -> None: """ Initialize the LangGraphAdapter with a CompiledStateGraph or a function that returns one. - :param graph: The LangGraph StateGraph to adapt, or a callable that takes ToolClient and returns CompiledStateGraph (sync or async). + :param graph: The LangGraph StateGraph to adapt, or a callable that takes ToolClient + and returns CompiledStateGraph (sync or async). :type graph: Union[CompiledStateGraph, GraphFactory] :param credentials: Azure credentials for authentication. :type credentials: Optional[AsyncTokenCredential] @@ -68,7 +74,7 @@ def __init__(self, graph: Union[CompiledStateGraph, GraphFactory], credentials: self._graph_or_factory: Union[CompiledStateGraph, GraphFactory] = graph self._resolved_graph: "Optional[CompiledStateGraph]" = None self.azure_ai_tracer = None - + # If graph is already compiled, validate and set up state converter if isinstance(graph, CompiledStateGraph): self._resolved_graph = graph @@ -105,7 +111,7 @@ async def agent_run(self, context: AgentRunContext): graph = self._resolved_graph else: graph = self._resolved_graph - + input_data = self.state_converter.request_to_state(context) logger.debug(f"Converted input data: {input_data}") if not context.stream: @@ -122,15 +128,17 @@ async def agent_run(self, context: AgentRunContext): logger.warning(f"Error closing tool_client: {e}") async def _resolve_graph(self, context: AgentRunContext): - """ - Resolve the graph if it's a factory function (for single-use/first-time resolution). + """Resolve the graph if it's a factory function (for single-use/first-time resolution). Creates a ToolClient and calls the factory function with it. This is used for the initial resolution to set up state_converter. + + :param context: The context for the agent run. + :type context: AgentRunContext """ if callable(self._graph_or_factory): logger.debug("Resolving graph from factory function") - - + + # Create ToolClient with credentials tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) tool_client_wrapper = ToolClient(tool_client) @@ -143,14 +151,14 @@ async def _resolve_graph(self, context: AgentRunContext): self._resolved_graph = await result else: self._resolved_graph = result - + # Validate and set up state converter if not already set from initialization if not self.state_converter: if is_state_schema_valid(self._resolved_graph.builder.state_schema): self.state_converter = LanggraphMessageStateConverter() else: raise ValueError("state_converter is required for non-MessagesState graph.") - + logger.debug("Graph resolved successfully") else: # Should not reach here, but just in case @@ -168,7 +176,7 @@ async def _resolve_graph_for_request(self, context: AgentRunContext): :rtype: tuple[CompiledStateGraph, ToolClient] """ logger.debug("Resolving fresh graph from factory function for request") - + # Create ToolClient with credentials tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) tool_client_wrapper = ToolClient(tool_client) @@ -181,14 +189,14 @@ async def _resolve_graph_for_request(self, context: AgentRunContext): graph = await result else: graph = result - + # Ensure state converter is set up (use existing one or create new) if not self.state_converter: if is_state_schema_valid(graph.builder.state_schema): self.state_converter = LanggraphMessageStateConverter() else: raise ValueError("state_converter is required for non-MessagesState graph.") - + logger.debug("Fresh graph resolved successfully for request") return graph, tool_client_wrapper @@ -245,7 +253,13 @@ async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext, logger.error(f"Error during agent run: {e}") raise e - async def agent_run_astream(self, input_data: dict, context: AgentRunContext, graph: CompiledStateGraph, tool_client: "Optional[ToolClient]" = None): + async def agent_run_astream( + self, + input_data: dict, + context: AgentRunContext, + graph: CompiledStateGraph, + tool_client: "Optional[ToolClient]" = None + ): """ Run the agent with streaming response. @@ -275,7 +289,7 @@ async def agent_run_astream(self, input_data: dict, context: AgentRunContext, gr # Close tool_client if provided if tool_client is not None: try: - await tool_client._tool_client.close() + await tool_client.close() logger.debug("Closed tool_client after streaming completed") except Exception as e: logger.warning(f"Error closing tool_client in stream: {e}") diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py index 5a5b75c13a03..49e36c54d802 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py @@ -11,7 +11,7 @@ if TYPE_CHECKING: from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient, FoundryTool - +# pylint: disable=client-accepts-api-version-keyword,missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs class ToolClient: """Client that integrates AzureAIToolClient with LangGraph. @@ -56,7 +56,7 @@ class ToolClient: :meta private: """ - + def __init__(self, tool_client: "AzureAIToolClient") -> None: """Initialize the ToolClient. @@ -65,7 +65,7 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: """ self._tool_client = tool_client self._langchain_tools_cache: List[StructuredTool] = None - + async def list_tools(self) -> List[StructuredTool]: """List all available tools as LangChain BaseTool instances. @@ -91,16 +91,16 @@ async def list_tools(self) -> List[StructuredTool]: # Get tools from AzureAIToolClient if self._langchain_tools_cache is not None: return self._langchain_tools_cache - + azure_tools = await self._tool_client.list_tools() - self._langchain_tools_cache = [] + self._langchain_tools_cache = [] # Convert to LangChain StructuredTool instances for azure_tool in azure_tools: langchain_tool = self._convert_to_langchain_tool(azure_tool) self._langchain_tools_cache.append(langchain_tool) - + return self._langchain_tools_cache - + def _convert_to_langchain_tool(self, azure_tool: "FoundryTool") -> StructuredTool: """Convert an AzureAITool to a LangChain StructuredTool. @@ -111,23 +111,27 @@ def _convert_to_langchain_tool(self, azure_tool: "FoundryTool") -> StructuredToo """ # Get the input schema from the tool descriptor input_schema = azure_tool.input_schema or {} - + # Create a Pydantic model for the tool's input schema args_schema = self._create_pydantic_model( tool_name=azure_tool.name, schema=input_schema ) - + # Create an async function that invokes the tool async def tool_func(**kwargs: Any) -> str: - """Invoke the Azure AI tool.""" + """Invoke the Azure AI tool. + + :return: The result from the tool invocation as a string. + :rtype: str + """ result = await azure_tool(**kwargs) # Convert result to string for LangChain compatibility if isinstance(result, dict): import json return json.dumps(result) return str(result) - + # Create a StructuredTool with the async function structured_tool = StructuredTool( name=azure_tool.name, @@ -135,9 +139,9 @@ async def tool_func(**kwargs: Any) -> str: coroutine=tool_func, args_schema=args_schema, ) - + return structured_tool - + def _create_pydantic_model( self, tool_name: str, @@ -155,16 +159,16 @@ def _create_pydantic_model( # Get properties from schema properties = schema.get("properties", {}) required_fields = schema.get("required", []) - + # Build field definitions for Pydantic model field_definitions = {} for prop_name, prop_schema in properties.items(): prop_type = self._json_type_to_python_type(prop_schema.get("type", "string")) prop_description = prop_schema.get("description", "") - + # Determine if field is required is_required = prop_name in required_fields - + if is_required: field_definitions[prop_name] = ( prop_type, @@ -175,11 +179,11 @@ def _create_pydantic_model( Optional[prop_type], Field(None, description=prop_description) ) - + # Create the model dynamically model_name = f"{tool_name.replace('-', '_').replace(' ', '_').title()}Input" return create_model(model_name, **field_definitions) - + def _json_type_to_python_type(self, json_type: str) -> type: """Convert JSON schema type to Python type. @@ -197,15 +201,24 @@ def _json_type_to_python_type(self, json_type: str) -> type: "object": dict, } return type_mapping.get(json_type, str) - + async def close(self) -> None: await self._tool_client.close() - + async def __aenter__(self) -> "ToolClient": - """Async context manager entry.""" + """Async context manager entry. + + :return: The ToolClient instance. + :rtype: ToolClient + """ return self - + async def __aexit__(self, *exc_details: Any) -> None: - """Async context manager exit.""" + """Async context manager exit. + + :param exc_details: Exception details if an exception occurred. + :type exc_details: Any + :return: None + :rtype: None + """ # The tool_client lifecycle is managed externally - pass From 76f935b2319ee86c092b5d3059a7bb6e04eccb98 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty Date: Thu, 13 Nov 2025 16:32:35 -0800 Subject: [PATCH 17/94] Lint and mypy fixes --- .../agentserver/core/client/tools/_client.py | 56 +----------------- .../core/client/tools/aio/_client.py | 58 +------------------ .../azure/ai/agentserver/core/constants.py | 1 + .../azure/ai/agentserver/core/server/base.py | 41 ++++++++----- .../core/server/common/agent_run_context.py | 6 +- .../ai/agentserver/langgraph/__init__.py | 2 +- .../ai/agentserver/langgraph/langgraph.py | 10 ++-- .../ai/agentserver/langgraph/tool_client.py | 12 ++-- 8 files changed, 47 insertions(+), 139 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py index a7afd935df64..df19d4663fb3 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py @@ -13,58 +13,6 @@ from .operations._operations import MCPToolsOperations, RemoteToolsOperations from ._utils._model_base import InvocationPayloadBuilder from ._model_base import FoundryTool, ToolSource - -class AzureAITool: - """Azure AI tool wrapper for invocation. - - Represents a single tool that can be invoked either via MCP protocol or - Azure AI Tools API. This class provides a convenient interface for tool - invocation and exposes tool metadata. - - :ivar str name: The name of the tool. - :ivar str description: Human-readable description of what the tool does. - :ivar dict metadata: Additional metadata about the tool from the API. - :ivar ~Tool_Client.models.ToolSource source: - The source of the tool (MCP_TOOLS or REMOTE_TOOLS). - - .. admonition:: Example: - - .. literalinclude:: ../samples/simple_example.py - :start-after: [START use_tool] - :end-before: [END use_tool] - :language: python - :dedent: 4 - :caption: Using an AzureAITool instance. - """ - - def __init__(self, client: "AzureAIToolClient", descriptor: FoundryTool) -> None: - """Initialize an Azure AI Tool. - - :param client: Parent client instance for making API calls. - :type client: AzureAIToolClient - :param descriptor: Tool descriptor containing metadata and configuration. - :type descriptor: ~Tool_Client.models.FoundryTool - """ - self._client = client - self._descriptor = descriptor - self.name = descriptor.name - self.description = descriptor.description - self.metadata = dict(descriptor.metadata) - self.source = descriptor.source - - def invoke(self, *args: Any, **kwargs: Any) -> Any: - """Invoke the tool synchronously. - - :param args: Positional arguments to pass to the tool. - :param kwargs: Keyword arguments to pass to the tool. - :return: The result from the tool invocation. - :rtype: Any - """ - payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) - return self._client._invoke_tool(self._descriptor, payload) - - def __call__(self, *args: Any, **kwargs: Any) -> Any: - return self.invoke(*args, **kwargs) class AzureAIToolClient: """Synchronous client for aggregating tools from Azure AI MCP and Tools APIs. @@ -189,11 +137,9 @@ def invoke_tool( return self._invoke_tool(descriptor, payload, **kwargs) def _resolve_tool_descriptor( - self, tool: Union[AzureAITool, str, FoundryTool] + self, tool: Union[str, FoundryTool] ) -> FoundryTool: """Resolve a tool reference to a descriptor.""" - if isinstance(tool, AzureAITool): - return tool._descriptor if isinstance(tool, FoundryTool): return tool if isinstance(tool, str): diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py index 8fd092bab5f1..b49ed2b971cd 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -16,58 +16,6 @@ if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential -class AzureAITool: - """Azure AI tool wrapper for invocation. - - Represents a single tool that can be invoked either via MCP protocol or - Azure AI Tools API. This class provides a convenient interface for tool - invocation and exposes tool metadata. - - :ivar str name: The name of the tool. - :ivar str description: Human-readable description of what the tool does. - :ivar dict metadata: Additional metadata about the tool from the API. - :ivar ~Tool_Client.models.ToolSource source: - The source of the tool (MCP_TOOLS or REMOTE_TOOLS). - - .. admonition:: Example: - - .. literalinclude:: ../samples/simple_example.py - :start-after: [START use_tool] - :end-before: [END use_tool] - :language: python - :dedent: 4 - :caption: Using an AzureAITool instance. - """ - - def __init__(self, client: "AzureAIToolClient", descriptor: FoundryTool) -> None: - """Initialize an Azure AI Tool. - - :param client: Parent client instance for making API calls. - :type client: AzureAIToolClient - :param descriptor: Tool descriptor containing metadata and configuration. - :type descriptor: ~Tool_Client.models.FoundryTool - """ - self._client = client - self._descriptor = descriptor - self.name = descriptor.name - self.description = descriptor.description - self.metadata = dict(descriptor.metadata) - self.source = descriptor.source - - async def invoke(self, *args: Any, **kwargs: Any) -> Any: - """Invoke the tool asynchronously. - - :param args: Positional arguments to pass to the tool. - :param kwargs: Keyword arguments to pass to the tool. - :return: The result from the tool invocation. - :rtype: Any - """ - payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) - return await self._client._invoke_tool(self._descriptor, payload) - - async def __call__(self, *args: Any, **kwargs: Any) -> Any: - return await self.invoke(*args, **kwargs) - class AzureAIToolClient: """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. @@ -177,7 +125,7 @@ async def _invoker(*args, **kwargs): async def invoke_tool( self, - tool: Union[AzureAITool, str, FoundryTool], + tool: Union[str, FoundryTool], *args: Any, **kwargs: Any, ) -> Any: @@ -193,11 +141,9 @@ async def invoke_tool( return await self._invoke_tool(descriptor, payload, **kwargs) async def _resolve_tool_descriptor( - self, tool: Union[AzureAITool, str, FoundryTool] + self, tool: Union[str, FoundryTool] ) -> FoundryTool: """Resolve a tool reference to a descriptor.""" - if isinstance(tool, AzureAITool): - return tool._descriptor if isinstance(tool, FoundryTool): return tool if isinstance(tool, str): diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py index 33fcb0139fea..b8dd5c328780 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py @@ -11,3 +11,4 @@ class Constants: AGENT_LOG_LEVEL = "AGENT_LOG_LEVEL" AGENT_DEBUG_ERRORS = "AGENT_DEBUG_ERRORS" ENABLE_APPLICATION_INSIGHTS_LOGGER = "AGENT_APP_INSIGHTS_ENABLED" + AZURE_AI_WORKSPACE_ENDPOINT = "AZURE_AI_WORKSPACE_ENDPOINT" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 1d1ba9e2eb3c..2b6345c66908 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -88,22 +88,23 @@ def set_run_context_to_context_var(self, run_context): ctx.update(res) request_context.set(ctx) - def set_user_info_to_context_var(self, request): - user_info: UserInfo = {} + def set_user_info_to_context_var(self, request) -> UserInfo: + user_info: UserInfo = None try: object_id_header = request.headers.get("x-aml-oid", None) - tenant_id_header = request.headers.get("x-aml-tenant-id", None) - - if object_id_header: - user_info["object_id"] = object_id_header - if tenant_id_header: - user_info["tenant_id"] = tenant_id_header + tenant_id_header = request.headers.get("x-aml-tid", None) + if not object_id_header and not tenant_id_header: + return None + user_info = UserInfo( + objectId=object_id_header, + tenantId=tenant_id_header + ) except Exception as e: logger.error(f"Failed to parse X-User-Info header: {e}", exc_info=True) if user_info: ctx = request_context.get() or {} - for key, value in user_info.items(): + for key, value in user_info.to_dict().items(): ctx[f"azure.ai.agentserver.user.{key}"] = str(value) request_context.set(ctx) return user_info @@ -340,12 +341,26 @@ def get_tool_client( logger.debug("Creating AzureAIToolClient with tools: %s", tools) if not self.credentials: raise ValueError("Credentials are required to create Tool Client.") - return AzureAIToolClient( + + workspace_endpoint = os.getenv(Constants.AZURE_AI_WORKSPACE_ENDPOINT) + if workspace_endpoint: + agent_name = os.getenv(Constants.AGENT_NAME) + if not agent_name: + raise ValueError("AGENT_NAME environment variable is required when using workspace endpoint.") + return AzureAIToolClient( + endpoint=workspace_endpoint, + credential=self.credentials, + tools=tools, + user=user_info, + agent_name=agent_name, + ) + else: + return AzureAIToolClient( endpoint=os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT), credential=self.credentials, - tools = tools, - user = user_info, - ) + tools=tools, + user=user_info, + ) def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 5188476b8339..5289df0b3524 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -7,7 +7,7 @@ from ...models.projects import AgentId, AgentReference, ResponseConversation1 from .id_generator.foundry_id_generator import FoundryIdGenerator from .id_generator.id_generator import IdGenerator - +from ...client.tools._model_base import UserInfo logger = get_logger() @@ -19,7 +19,7 @@ def __init__(self, payload: dict, **kwargs: Any) -> None: self._response_id = self._id_generator.response_id self._conversation_id = self._id_generator.conversation_id self._stream = self.request.get("stream", False) - self._user_info = kwargs.get("user_info", {}) + self._user_info = kwargs.get("user_info", None) self._agent_tools = kwargs.get("agent_tools", []) @property @@ -70,7 +70,7 @@ def get_tools(self) -> list: return self._agent_tools return request_tools - def get_user_info(self) -> dict: + def get_user_info(self) -> UserInfo: return self._user_info diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index 4ad1719ba56b..569166bc3786 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -7,6 +7,7 @@ from ._version import VERSION from .tool_client import ToolClient +from .langgraph import LangGraphAdapter if TYPE_CHECKING: # pragma: no cover from . import models @@ -19,7 +20,6 @@ def from_langgraph( state_converter: Optional["models.LanggraphStateConverter"] = None, **kwargs: Any ) -> "LangGraphAdapter": - from .langgraph import LangGraphAdapter return LangGraphAdapter(agent, credentials=credentials, state_converter=state_converter, **kwargs) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 6aac565660dd..beae4faf6499 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -70,7 +70,7 @@ def __init__( :param state_converter: custom state converter. Required if graph state is not MessagesState. :type state_converter: Optional[LanggraphStateConverter] """ - super().__init__(credentials=credentials, **kwargs) + super().__init__(credentials=credentials, **kwargs) # pylint: disable=unexpected-keyword-arg self._graph_or_factory: Union[CompiledStateGraph, GraphFactory] = graph self._resolved_graph: "Optional[CompiledStateGraph]" = None self.azure_ai_tracer = None @@ -140,7 +140,7 @@ async def _resolve_graph(self, context: AgentRunContext): # Create ToolClient with credentials - tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) + tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) # pylint: disable=no-member tool_client_wrapper = ToolClient(tool_client) tools = await tool_client_wrapper.list_tools() # Call the factory function with ToolClient @@ -153,7 +153,7 @@ async def _resolve_graph(self, context: AgentRunContext): self._resolved_graph = result # Validate and set up state converter if not already set from initialization - if not self.state_converter: + if not self.state_converter and self._resolved_graph is not None: if is_state_schema_valid(self._resolved_graph.builder.state_schema): self.state_converter = LanggraphMessageStateConverter() else: @@ -178,13 +178,13 @@ async def _resolve_graph_for_request(self, context: AgentRunContext): logger.debug("Resolving fresh graph from factory function for request") # Create ToolClient with credentials - tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) + tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) # pylint: disable=no-member tool_client_wrapper = ToolClient(tool_client) tools = await tool_client_wrapper.list_tools() # Call the factory function with ToolClient # Support both sync and async factories import inspect - result = self._graph_or_factory(tools) + result = self._graph_or_factory(tools) # type: ignore[operator] if inspect.iscoroutine(result): graph = await result else: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py index 49e36c54d802..374db1d1d98b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py @@ -64,7 +64,7 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient """ self._tool_client = tool_client - self._langchain_tools_cache: List[StructuredTool] = None + self._langchain_tools_cache: Optional[List[StructuredTool]] = None async def list_tools(self) -> List[StructuredTool]: """List all available tools as LangChain BaseTool instances. @@ -115,7 +115,7 @@ def _convert_to_langchain_tool(self, azure_tool: "FoundryTool") -> StructuredToo # Create a Pydantic model for the tool's input schema args_schema = self._create_pydantic_model( tool_name=azure_tool.name, - schema=input_schema + schema=dict(input_schema) ) # Create an async function that invokes the tool @@ -176,13 +176,13 @@ def _create_pydantic_model( ) else: field_definitions[prop_name] = ( - Optional[prop_type], - Field(None, description=prop_description) + prop_type, + Field(default=None, description=prop_description) ) # Create the model dynamically - model_name = f"{tool_name.replace('-', '_').replace(' ', '_').title()}Input" - return create_model(model_name, **field_definitions) + model_name = f"{tool_name.replace('-', '_').replace(' ', '_').title()}-Input" + return create_model(model_name, **field_definitions) # type: ignore[call-overload] def _json_type_to_python_type(self, json_type: str) -> type: """Convert JSON schema type to Python type. From ba7ba501c2109ccf3095a9117e4d8adc20da4986 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 17:44:22 -0800 Subject: [PATCH 18/94] fix mypy and pylint --- .../agentserver/agentframework/tool_client.py | 2 +- .../azure/ai/agentserver/core/server/base.py | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py index 0049b3982b1c..aa1086a7050f 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py @@ -57,7 +57,7 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient """ self._tool_client = tool_client - self._aifunction_cache: List[AIFunction] = None + self._aifunction_cache: List[AIFunction] = None # mypy: ignore[assignment] async def list_tools(self) -> List[AIFunction]: """List all available tools as Agent Framework tool definitions. diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 2b6345c66908..d39af78e2014 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=broad-exception-caught,unused-argument,logging-fstring-interpolation,too-many-statements,too-many-return-statements -# mypy: disable-error-code="name-defined,annotation-unchecked,arg-type" +# mypy: ignore-errors import inspect import json import os @@ -341,7 +341,7 @@ def get_tool_client( logger.debug("Creating AzureAIToolClient with tools: %s", tools) if not self.credentials: raise ValueError("Credentials are required to create Tool Client.") - + workspace_endpoint = os.getenv(Constants.AZURE_AI_WORKSPACE_ENDPOINT) if workspace_endpoint: agent_name = os.getenv(Constants.AGENT_NAME) @@ -354,13 +354,12 @@ def get_tool_client( user=user_info, agent_name=agent_name, ) - else: - return AzureAIToolClient( - endpoint=os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT), - credential=self.credentials, - tools=tools, - user=user_info, - ) + return AzureAIToolClient( + endpoint=os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT), + credential=self.credentials, + tools=tools, + user=user_info, + ) def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: From cb15c94161addb5f1afbea08c40ed7b5f4e76b0a Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 18:28:02 -0800 Subject: [PATCH 19/94] fix mypy --- .../azure/ai/agentserver/agentframework/tool_client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py index aa1086a7050f..6f410c29d484 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# mypy: disable-error-code="assignment" """Tool client for integrating AzureAIToolClient with Agent Framework.""" from typing import TYPE_CHECKING, Any, Dict, List, Optional @@ -57,7 +58,7 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient """ self._tool_client = tool_client - self._aifunction_cache: List[AIFunction] = None # mypy: ignore[assignment] + self._aifunction_cache: List[AIFunction] = None async def list_tools(self) -> List[AIFunction]: """List all available tools as Agent Framework tool definitions. From 1259358c28afed8fdb400878952f1751a55dff66 Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Sun, 16 Nov 2025 17:38:33 -0800 Subject: [PATCH 20/94] [ai-agentserver] Fix AF streaming issue (#44068) * fix streaming issue in af * fix streaming issue in af * update version to 1.0.0b5 --- .../CHANGELOG.md | 7 +- .../ai/agentserver/agentframework/_version.py | 2 +- .../agentframework/agent_framework.py | 58 +- ...nt_framework_output_streaming_converter.py | 711 ++++++------------ .../agentframework/models/utils/__init__.py | 5 + .../agentframework/models/utils/async_iter.py | 136 ++++ .../azure-ai-agentserver-core/CHANGELOG.md | 5 + .../azure/ai/agentserver/core/_version.py | 2 +- .../azure/ai/agentserver/core/server/base.py | 2 +- .../CHANGELOG.md | 6 + .../ai/agentserver/langgraph/_version.py | 2 +- 11 files changed, 416 insertions(+), 520 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index 15d90e5660ab..a73c24633579 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,5 +1,10 @@ # Release History +## 1.0.0b5 (2025-11-16) + +### Bugs Fixed + +- Fixed streaming generation issues. ## 1.0.0b4 (2025-11-13) @@ -27,7 +32,7 @@ - Fixed Id generator format. -- Improved stream mode error messsage. +- Improved stream mode error message. - Updated application insights related configuration environment variables. diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index 22553b18fb7e..c7d155d924dd 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b4" +VERSION = "1.0.0b5" diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 77f3a4b1ce85..f90545d7d1f6 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -4,7 +4,6 @@ # pylint: disable=logging-fstring-interpolation,no-name-in-module from __future__ import annotations -import asyncio # pylint: disable=do-not-import-asyncio import os from typing import TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Optional, Protocol, Union, List import inspect @@ -40,14 +39,14 @@ class AgentFactory(Protocol): """Protocol for agent factory functions. - + An agent factory is a callable that takes a ToolClient and returns an AgentProtocol, either synchronously or asynchronously. """ def __call__(self, tools: List[AIFunction]) -> Union[AgentProtocol, Awaitable[AgentProtocol]]: """Create an AgentProtocol using the provided ToolClient. - + :param tools: The list of AIFunction tools available to the agent. :type tools: List[AIFunction] :return: An Agent Framework agent, or an awaitable that resolves to one. @@ -97,7 +96,7 @@ def __init__(self, agent: Union[AgentProtocol, AgentFactory], @property def agent(self) -> "Optional[AgentProtocol]": """Get the resolved agent. This property provides backward compatibility. - + :return: The resolved AgentProtocol if available, None otherwise. :rtype: Optional[AgentProtocol] """ @@ -220,30 +219,35 @@ async def agent_run( # pylint: disable=too-many-statements async def stream_updates(): try: update_count = 0 - timeout_s = self._resolve_stream_timeout(context.request) - logger.info("Starting streaming with idle-timeout=%.2fs", timeout_s) - for ev in streaming_converter.initial_events(): - yield ev - - # Iterate with per-update timeout; terminate if idle too long - aiter = agent.run_stream(message).__aiter__() - while True: - try: - update = await asyncio.wait_for(aiter.__anext__(), timeout=timeout_s) - except StopAsyncIteration: - logger.debug("Agent streaming iterator finished (StopAsyncIteration)") - break - except asyncio.TimeoutError: - logger.warning("Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s) - for ev in streaming_converter.completion_events(): - yield ev - return + updates = agent.run_stream(message) + async for event in streaming_converter.convert(updates): update_count += 1 - transformed = streaming_converter.transform_output_for_streaming(update) - for event in transformed: - yield event - for ev in streaming_converter.completion_events(): - yield ev + yield event + + # timeout_s = self._resolve_stream_timeout(context.request) + # logger.info("Starting streaming with idle-timeout=%.2fs", timeout_s) + # for ev in streaming_converter.initial_events(): + # yield ev + # + # # Iterate with per-update timeout; terminate if idle too long + # aiter = agent.run_stream(message).__aiter__() + # while True: + # try: + # update = await asyncio.wait_for(aiter.__anext__(), timeout=timeout_s) + # except StopAsyncIteration: + # logger.debug("Agent streaming iterator finished (StopAsyncIteration)") + # break + # except asyncio.TimeoutError: + # logger.warning("Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s) + # for ev in streaming_converter.completion_events(): + # yield ev + # return + # update_count += 1 + # transformed = streaming_converter.transform_output_for_streaming(update) + # for event in transformed: + # yield event + # for ev in streaming_converter.completion_events(): + # yield ev logger.info("Streaming completed with %d updates", update_count) finally: # Close tool_client if it was created for this request diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 4e3d12d4563e..96beb535d3fb 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -7,10 +7,9 @@ import datetime import json -import uuid -from typing import Any, List, Optional, cast +from typing import AsyncIterable, List, Optional -from agent_framework import AgentRunResponseUpdate, FunctionApprovalRequestContent, FunctionResultContent +from agent_framework import AgentRunResponseUpdate, BaseContent, FunctionApprovalRequestContent, FunctionResultContent from agent_framework._types import ( ErrorContent, FunctionCallContent, @@ -18,7 +17,6 @@ ) from azure.ai.agentserver.core import AgentRunContext -from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import ( Response as OpenAIResponse, ResponseStreamEvent, @@ -27,6 +25,7 @@ FunctionToolCallItemResource, FunctionToolCallOutputItemResource, ItemContentOutputText, + ItemResource, ResponseCompletedEvent, ResponseContentPartAddedEvent, ResponseContentPartDoneEvent, @@ -43,397 +42,187 @@ ) from .agent_id_generator import AgentIdGenerator - -logger = get_logger() +from .utils.async_iter import chunk_on_change, peek class _BaseStreamingState: """Base interface for streaming state handlers.""" - def prework(self, ctx: Any) -> List[ResponseStreamEvent]: # pylint: disable=unused-argument - return [] - - def convert_content(self, ctx: Any, content) -> List[ResponseStreamEvent]: # pylint: disable=unused-argument + def convert_contents(self, contents: AsyncIterable[BaseContent]) -> AsyncIterable[ResponseStreamEvent]: # pylint: disable=unused-argument raise NotImplementedError - def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]: # pylint: disable=unused-argument - return [] - class _TextContentStreamingState(_BaseStreamingState): """State handler for text and reasoning-text content during streaming.""" - def __init__(self, context: AgentRunContext) -> None: - self.context = context - self.item_id = None - self.output_index = None - self.text_buffer = "" - self.text_part_started = False - - def prework(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if self.item_id is not None: - return events - - # Start a new assistant message item (in_progress) - self.item_id = self.context.id_generator.generate_message_id() - self.output_index = ctx._next_output_index # pylint: disable=protected-access - ctx._next_output_index += 1 - - message_item = ResponsesAssistantMessageItemResource( - id=self.item_id, - status="in_progress", - content=[], + def __init__(self, parent: AgentFrameworkOutputStreamingConverter): + self._parent = parent + + async def convert_contents(self, contents: AsyncIterable[TextContent]) -> AsyncIterable[ResponseStreamEvent]: + item_id = self._parent.context.id_generator.generate_message_id() + output_index = self._parent.next_output_index() + + yield ResponseOutputItemAddedEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=ResponsesAssistantMessageItemResource( + id=item_id, + status="in_progress", + content=[], + ), ) - events.append( - ResponseOutputItemAddedEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, - item=message_item, - ) + yield ResponseContentPartAddedEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + content_index=0, + part=ItemContentOutputText(text="", annotations=[], logprobs=[]), ) - if not self.text_part_started: - empty_part = ItemContentOutputText(text="", annotations=[], logprobs=[]) - events.append( - ResponseContentPartAddedEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, - content_index=0, - part=empty_part, - ) - ) - self.text_part_started = True - return events - - def convert_content(self, ctx: Any, content: TextContent) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if isinstance(content, TextContent): - delta = content.text or "" - else: - delta = getattr(content, "text", None) or getattr(content, "reasoning", "") or "" - - # buffer accumulated text - self.text_buffer += delta - - # emit delta event for text - assert self.item_id is not None, "Text state not initialized: missing item_id" - assert self.output_index is not None, "Text state not initialized: missing output_index" - events.append( - ResponseTextDeltaEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, + text = "" + async for content in contents: + delta = content.text + text += delta + + yield ResponseTextDeltaEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, content_index=0, delta=delta, ) + + yield ResponseTextDoneEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + content_index=0, + text=text, ) - return events - - def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if not self.item_id: - return events - - full_text = self.text_buffer - assert self.item_id is not None and self.output_index is not None - events.append( - ResponseTextDoneEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, - content_index=0, - text=full_text, - ) - ) - final_part = ItemContentOutputText(text=full_text, annotations=[], logprobs=[]) - events.append( - ResponseContentPartDoneEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, - content_index=0, - part=final_part, - ) - ) - completed_item = ResponsesAssistantMessageItemResource( - id=self.item_id, status="completed", content=[final_part] - ) - events.append( - ResponseOutputItemDoneEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, - item=completed_item, - ) + + content_part = ItemContentOutputText(text=text, annotations=[], logprobs=[]) + yield ResponseContentPartDoneEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + content_index=0, + part=content_part, ) - ctx._last_completed_text = full_text # pylint: disable=protected-access - # store for final response - ctx._completed_output_items.append( - { - "id": self.item_id, - "type": "message", - "status": "completed", - "content": [ - { - "type": "output_text", - "text": full_text, - "annotations": [], - "logprobs": [], - } - ], - "role": "assistant", - } + + item = ResponsesAssistantMessageItemResource(id=item_id, status="completed", content=[content_part]) + yield ResponseOutputItemDoneEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=item, ) - # reset state - self.item_id = None - self.output_index = None - self.text_buffer = "" - self.text_part_started = False - return events + + self._parent.add_completed_output_item(item) # pylint: disable=protected-access class _FunctionCallStreamingState(_BaseStreamingState): """State handler for function_call content during streaming.""" - def __init__(self, context: AgentRunContext) -> None: - self.context = context - self.item_id = None - self.output_index = None - self.call_id = None - self.name = None - self.args_buffer = "" - self.requires_approval = False - self.approval_request_id: str | None = None - - def prework(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if self.item_id is not None: - return events - # initialize function-call item - self.item_id = self.context.id_generator.generate_function_call_id() - self.output_index = ctx._next_output_index - ctx._next_output_index += 1 - - self.call_id = self.call_id or str(uuid.uuid4()) - function_item = FunctionToolCallItemResource( - id=self.item_id, - status="in_progress", - call_id=self.call_id, - name=self.name or "", - arguments="", - ) - events.append( - ResponseOutputItemAddedEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, - item=function_item, - ) - ) - return events - - def convert_content(self, ctx: Any, content: FunctionCallContent) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - # record identifiers (once available) - self.name = getattr(content, "name", None) or self.name or "" - self.call_id = getattr(content, "call_id", None) or self.call_id or str(uuid.uuid4()) - - args_delta = content.arguments if isinstance(content.arguments, str) else json.dumps(content.arguments) - args_delta = args_delta or "" - self.args_buffer += args_delta - assert self.item_id is not None and self.output_index is not None - for ch in args_delta: - events.append( - ResponseFunctionCallArgumentsDeltaEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, - delta=ch, + def __init__(self, parent: AgentFrameworkOutputStreamingConverter): + self._parent = parent + + async def convert_contents(self, contents: AsyncIterable[FunctionCallContent]) -> AsyncIterable[ResponseStreamEvent]: + content_by_call_id = {} + ids_by_call_id = {} + + async for content in contents: + if content.call_id not in content_by_call_id: + item_id = self._parent.context.id_generator.generate_function_call_id() + output_index = self._parent.next_output_index() + + content_by_call_id[content.call_id] = content + ids_by_call_id[content.call_id] = (item_id, output_index) + + yield ResponseOutputItemAddedEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=FunctionToolCallItemResource( + id=item_id, + status="in_progress", + call_id=content.call_id, + name=content.name, + arguments="", + ), ) + continue + else: + content_by_call_id[content.call_id] = content_by_call_id[content.call_id] + content + item_id, output_index = ids_by_call_id[content.call_id] + + args_delta = content.arguments if isinstance(content.arguments, str) else "" + yield ResponseFunctionCallArgumentsDeltaEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + delta=args_delta, ) - # finalize if arguments are detected to be complete - is_done = bool( - getattr(content, "is_final", False) - or getattr(content, "final", False) - or getattr(content, "done", False) - or getattr(content, "arguments_final", False) - or getattr(content, "arguments_done", False) - or getattr(content, "finish", False) - ) - if not is_done and self.args_buffer: - try: - json.loads(self.args_buffer) - is_done = True - except Exception: # pylint: disable=broad-exception-caught - pass - - if is_done: - events.append( - ResponseFunctionCallArgumentsDoneEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, - arguments=self.args_buffer, - ) + for call_id, content in content_by_call_id.items(): + item_id, output_index = ids_by_call_id[call_id] + args = content.arguments if isinstance(content.arguments, str) else json.dumps(content.arguments) + yield ResponseFunctionCallArgumentsDoneEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + arguments=args, ) - events.extend(self.afterwork(ctx)) - return events - - def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if not self.item_id: - return events - assert self.call_id is not None - done_item = FunctionToolCallItemResource( - id=self.item_id, - status="completed", - call_id=self.call_id, - name=self.name or "", - arguments=self.args_buffer, - ) - assert self.output_index is not None - events.append( - ResponseOutputItemDoneEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, - item=done_item, + + item = FunctionToolCallItemResource( + id=item_id, + status="completed", + call_id=call_id, + name=content.name, + arguments=args, ) - ) - # store for final response - ctx._completed_output_items.append( - { - "id": self.item_id, - "type": "function_call", - "call_id": self.call_id, - "name": self.name or "", - "arguments": self.args_buffer, - "status": "requires_approval" if self.requires_approval else "completed", - "requires_approval": self.requires_approval, - "approval_request_id": self.approval_request_id, - } - ) - # reset - self.item_id = None - self.output_index = None - self.args_buffer = "" - self.call_id = None - self.name = None - self.requires_approval = False - self.approval_request_id = None - return events + yield ResponseOutputItemDoneEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=item, + ) + + self._parent.add_completed_output_item(item) # pylint: disable=protected-access class _FunctionCallOutputStreamingState(_BaseStreamingState): """Handles function_call_output items streaming (non-chunked simple output).""" - def __init__( - self, - context: AgentRunContext, - call_id: Optional[str] = None, - output: Optional[list[str]] = None, - ) -> None: - # Avoid mutable default argument (Ruff B006) - self.context = context - self.item_id = None - self.output_index = None - self.call_id = call_id - self.output = output if output is not None else [] - - def prework(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if self.item_id is not None: - return events - self.item_id = self.context.id_generator.generate_function_output_id() - self.output_index = ctx._next_output_index - ctx._next_output_index += 1 - - self.call_id = self.call_id or str(uuid.uuid4()) - item = FunctionToolCallOutputItemResource( - id=self.item_id, - status="in_progress", - call_id=self.call_id, - output="", - ) - events.append( - ResponseOutputItemAddedEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, + def __init__(self, parent: AgentFrameworkOutputStreamingConverter): + self._parent = parent + + async def convert_contents(self, contents: AsyncIterable[FunctionResultContent]) -> AsyncIterable[ResponseStreamEvent]: + async for content in contents: + item_id = self._parent.context.id_generator.generate_function_output_id() + output_index = self._parent.next_output_index() + + output = (f"{type(content.exception)}({str(content.exception)})" + if content.exception + else json.dumps(content.result)) + + item = FunctionToolCallOutputItemResource( + id=item_id, + status="completed", + call_id=content.call_id, + output=output, + ) + + yield ResponseOutputItemAddedEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, item=item, ) - ) - return events - - def convert_content(self, ctx: Any, content: Any) -> List[ResponseStreamEvent]: # no delta events for now - events: List[ResponseStreamEvent] = [] - # treat entire output as final - result = [] - raw = getattr(content, "result", None) - if isinstance(raw, str): - result = [raw or self.output] - elif isinstance(raw, list): - for item in raw: - result.append(self._coerce_result_text(item)) - self.output = json.dumps(result) if len(result) > 0 else "" - - events.extend(self.afterwork(ctx)) - return events - - def _coerce_result_text(self, value: Any) -> str | dict: - """ - Return a string if value is already str or a TextContent-like object; else str(value). - - :param value: The value to coerce. - :type value: Any - - :return: The coerced string or dict. - :rtype: str | dict - """ - if value is None: - return "" - if isinstance(value, str): - return value - # Direct TextContent instance - if isinstance(value, TextContent): - content_payload = {"type": "text", "text": getattr(value, "text", "")} - return content_payload - - return "" - - def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if not self.item_id: - return events - # Ensure types conform: call_id must be str (guarantee non-None) and output is a single string - str_call_id = self.call_id or "" - single_output: str = cast(str, self.output[0]) if self.output else "" - done_item = FunctionToolCallOutputItemResource( - id=self.item_id, - status="completed", - call_id=str_call_id, - output=single_output, - ) - assert self.output_index is not None - events.append( - ResponseOutputItemDoneEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, - item=done_item, + + yield ResponseOutputItemDoneEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=item, ) - ) - ctx._completed_output_items.append( - { - "id": self.item_id, - "type": "function_call_output", - "status": "completed", - "call_id": self.call_id, - "output": self.output, - } - ) - self.item_id = None - self.output_index = None - return events + + self._parent.add_completed_output_item(item) # pylint: disable=protected-access class AgentFrameworkOutputStreamingConverter: @@ -442,101 +231,91 @@ class AgentFrameworkOutputStreamingConverter: def __init__(self, context: AgentRunContext) -> None: self._context = context # sequence numbers must start at 0 for first emitted event - self._sequence = 0 - self._response_id = None + self._sequence = -1 + self._next_output_index = -1 + self._response_id = self._context.response_id self._response_created_at = None - self._next_output_index = 0 - self._last_completed_text = "" - self._active_state: Optional[_BaseStreamingState] = None - self._active_kind = None # "text" | "function_call" | "error" - # accumulate completed output items for final response - self._completed_output_items: List[dict] = [] - - def _ensure_response_started(self) -> None: - if not self._response_id: - self._response_id = self._context.response_id - if not self._response_created_at: - self._response_created_at = int(datetime.datetime.now(datetime.timezone.utc).timestamp()) + self._completed_output_items: List[ItemResource] = [] def next_sequence(self) -> int: self._sequence += 1 return self._sequence - def _switch_state(self, kind: str) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if self._active_state and self._active_kind != kind: - events.extend(self._active_state.afterwork(self)) - self._active_state = None - self._active_kind = None - - if self._active_state is None: - if kind == "text": - self._active_state = _TextContentStreamingState(self._context) - elif kind == "function_call": - self._active_state = _FunctionCallStreamingState(self._context) - elif kind == "function_call_output": - self._active_state = _FunctionCallOutputStreamingState(self._context) - else: - self._active_state = None - self._active_kind = kind - if self._active_state: - events.extend(self._active_state.prework(self)) - return events - - def transform_output_for_streaming(self, update: AgentRunResponseUpdate) -> List[ResponseStreamEvent]: - logger.debug( - "Transforming streaming update with %d contents", - len(update.contents) if getattr(update, "contents", None) else 0, - ) + def next_output_index(self) -> int: + self._next_output_index += 1 + return self._next_output_index + + def add_completed_output_item(self, item: ItemResource) -> None: + self._completed_output_items.append(item) + + @property + def context(self) -> AgentRunContext: + return self._context + + async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> AsyncIterable[ResponseStreamEvent]: self._ensure_response_started() - events: List[ResponseStreamEvent] = [] - - if getattr(update, "contents", None): - for i, content in enumerate(update.contents): - logger.debug("Processing content %d: %s", i, type(content)) - if isinstance(content, TextContent): - events.extend(self._switch_state("text")) - if isinstance(self._active_state, _TextContentStreamingState): - events.extend(self._active_state.convert_content(self, content)) - elif isinstance(content, FunctionCallContent): - events.extend(self._switch_state("function_call")) - if isinstance(self._active_state, _FunctionCallStreamingState): - events.extend(self._active_state.convert_content(self, content)) - elif isinstance(content, FunctionResultContent): - events.extend(self._switch_state("function_call_output")) - if isinstance(self._active_state, _FunctionCallOutputStreamingState): - call_id = getattr(content, "call_id", None) - if call_id: - self._active_state.call_id = call_id - events.extend(self._active_state.convert_content(self, content)) - elif isinstance(content, FunctionApprovalRequestContent): - events.extend(self._switch_state("function_call")) - if isinstance(self._active_state, _FunctionCallStreamingState): - self._active_state.requires_approval = True - self._active_state.approval_request_id = getattr(content, "id", None) - events.extend(self._active_state.convert_content(self, content.function_call)) - elif isinstance(content, ErrorContent): - # errors are stateless; flush current state and emit error - events.extend(self._switch_state("error")) - events.append( - ResponseErrorEvent( - sequence_number=self.next_sequence(), - code=getattr(content, "error_code", None) or "server_error", - message=getattr(content, "message", None) or "An error occurred", - param="", - ) - ) - return events - - def finalize_last_content(self) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if self._active_state: - events.extend(self._active_state.afterwork(self)) - self._active_state = None - self._active_kind = None - return events - - def build_response(self, status: str) -> OpenAIResponse: + + created_response = self._build_response(status="in_progress") + yield ResponseCreatedEvent( + sequence_number=self.next_sequence(), + response=created_response, + ) + + yield ResponseInProgressEvent( + sequence_number=self.next_sequence(), + response=created_response, + ) + + is_changed = lambda a, b: a is not None and b is not None and a.message_id != b.message_id + async for group in chunk_on_change(updates, is_changed): + has_value, first, contents = await peek(self._read_updates(group)) + if not has_value: + continue + + state = None + if isinstance(first, TextContent): + state = _TextContentStreamingState(self) + elif isinstance(first, (FunctionCallContent, FunctionApprovalRequestContent)): + state = _FunctionCallStreamingState(self) + elif isinstance(first, FunctionResultContent): + state = _FunctionCallOutputStreamingState(self) + elif isinstance(first, ErrorContent): + yield ResponseErrorEvent( + sequence_number=self.next_sequence(), + code=getattr(first, "error_code", None) or "server_error", + message=getattr(first, "message", None) or "An error occurred", + param="", + ) + continue + + async for content in state.convert_contents(contents): + yield content + + yield ResponseCompletedEvent( + sequence_number=self.next_sequence(), + response=self._build_response(status="completed"), + ) + + @staticmethod + async def _read_updates(updates: AsyncIterable[AgentRunResponseUpdate]) -> AsyncIterable[BaseContent]: + async for update in updates: + if not update.contents: + continue + + accepted_types = (TextContent, + FunctionCallContent, + FunctionApprovalRequestContent, + FunctionResultContent, + ErrorContent) + for content in update.contents: + if isinstance(content, accepted_types): + yield content + + def _ensure_response_started(self) -> None: + if not self._response_created_at: + self._response_created_at = int(datetime.datetime.now(datetime.timezone.utc).timestamp()) + + def _build_response(self, status: str) -> OpenAIResponse: self._ensure_response_started() agent_id = AgentIdGenerator.generate(self._context) response_data = { @@ -550,47 +329,3 @@ def build_response(self, status: str) -> OpenAIResponse: if status == "completed" and self._completed_output_items: response_data["output"] = self._completed_output_items return OpenAIResponse(response_data) - - # High-level helpers to emit lifecycle events for streaming - def initial_events(self) -> List[ResponseStreamEvent]: - """ - Emit ResponseCreatedEvent and an initial ResponseInProgressEvent. - - :return: List of initial response stream events. - :rtype: List[ResponseStreamEvent] - """ - self._ensure_response_started() - events: List[ResponseStreamEvent] = [] - created_response = self.build_response(status="in_progress") - events.append( - ResponseCreatedEvent( - sequence_number=self.next_sequence(), - response=created_response, - ) - ) - events.append( - ResponseInProgressEvent( - sequence_number=self.next_sequence(), - response=self.build_response(status="in_progress"), - ) - ) - return events - - def completion_events(self) -> List[ResponseStreamEvent]: - """ - Finalize any active content and emit a single ResponseCompletedEvent. - - :return: List of completion response stream events. - :rtype: List[ResponseStreamEvent] - """ - self._ensure_response_started() - events: List[ResponseStreamEvent] = [] - events.extend(self.finalize_last_content()) - completed_response = self.build_response(status="completed") - events.append( - ResponseCompletedEvent( - sequence_number=self.next_sequence(), - response=completed_response, - ) - ) - return events diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/__init__.py new file mode 100644 index 000000000000..28077537d94b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/__init__.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py new file mode 100644 index 000000000000..ef8525109554 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py @@ -0,0 +1,136 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +from collections.abc import AsyncIterable, AsyncIterator, Callable +from typing import TypeVar, Optional, Tuple, Awaitable + +TSource = TypeVar("TSource") +TKey = TypeVar("TKey") +T = TypeVar("T") + + +async def chunk_on_change( + source: AsyncIterable[TSource], + is_changed: Optional[Callable[[Optional[TSource], Optional[TSource]], bool]] = None, +) -> AsyncIterator[AsyncIterable[TSource]]: + """ + Chunks an async iterable into groups based on when consecutive elements change. + + :param source: Async iterable of items. + :param is_changed: Function(prev, current) -> bool indicating if value changed. + If None, uses != by default. + :return: An async iterator of async iterables (chunks). + """ + + if is_changed is None: + # Default equality: use the value itself as key, == as equality + async for group in chunk_by_key(source, lambda x: x): + yield group + else: + # Equivalent to C#: EqualityComparer.Create((x, y) => !isChanged(x, y)) + def key_equal(a: TSource, b: TSource) -> bool: + return not is_changed(a, b) + + async for group in chunk_by_key(source, lambda x: x, key_equal=key_equal): + yield group + + +async def chunk_by_key( + source: AsyncIterable[TSource], + key_selector: Callable[[TSource], TKey], + key_equal: Optional[Callable[[TKey, TKey], bool]] = None, +) -> AsyncIterator[AsyncIterable[TSource]]: + """ + Chunks the async iterable into groups based on a key selector. + + :param source: Async iterable of items. + :param key_selector: Function mapping item -> key. + :param key_equal: Optional equality function for keys. Defaults to '=='. + :return: An async iterator of async iterables (chunks). + """ + + if key_equal is None: + def key_equal(a: TKey, b: TKey) -> bool: # type: ignore[no-redef] + return a == b + + it = source.__aiter__() + + # Prime the iterator + try: + pending = await it.__anext__() + except StopAsyncIteration: + return + + pending_key = key_selector(pending) + has_pending = True + + while has_pending: + current_key = pending_key + + async def inner() -> AsyncIterator[TSource]: + nonlocal pending, pending_key, has_pending + + # First element of the group + yield pending + + # Consume until key changes or source ends + while True: + try: + item = await it.__anext__() + except StopAsyncIteration: + # Source ended; tell outer loop to stop after this group + has_pending = False + return + + k = key_selector(item) + if not key_equal(k, current_key): + # Hand first item of next group back to outer loop + pending = item + pending_key = k + return + + yield item + + # Yield an async iterable representing the current chunk + yield inner() + + +async def peek( + source: AsyncIterable[T], +) -> Tuple[bool, Optional[T], AsyncIterable[T]]: + """ + Peeks at the first element of an async iterable without consuming it. + + :param source: Async iterable. + :return: (has_value, first, full_sequence_including_first) + """ + + it = source.__aiter__() + + try: + first = await it.__anext__() + except StopAsyncIteration: + return False, None, _empty_async() + + async def sequence() -> AsyncIterator[T]: + try: + # Yield the peeked element first + yield first + # Then the rest of the original iterator + async for item in it: + yield item + finally: + # Try to close underlying async generator if it supports it + aclose = getattr(it, "aclose", None) + if aclose is not None: + await aclose() + + return True, first, sequence() + + +async def _empty_async() -> AsyncIterator[T]: + if False: + # This is just to make this an async generator for typing + yield None # type: ignore[misc] diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index 15d90e5660ab..a7cfbd49dd22 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,5 +1,10 @@ # Release History +## 1.0.0b5 (2025-11-16) + +### Bugs Fixed + +- Fixed streaming generation issues. ## 1.0.0b4 (2025-11-13) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index 22553b18fb7e..c7d155d924dd 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b4" +VERSION = "1.0.0b5" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index d39af78e2014..bc5a15a37775 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -375,7 +375,7 @@ def _format_error(exc: Exception) -> str: return message if DEBUG_ERRORS: return repr(exc) - return "Internal error" + return f"{type(exc)}: Internal error" def _to_response(result: Union[Response, dict]) -> Response: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index 15d90e5660ab..c7c4aaaa6369 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History +## 1.0.0b5 (2025-11-16) + +### Bugs Fixed + +- Fixed streaming generation issues. + ## 1.0.0b4 (2025-11-13) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index 22553b18fb7e..c7d155d924dd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b4" +VERSION = "1.0.0b5" From a26603e1db87e10aaaf9a9ac51c4e1c2bb083e48 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty <1634042+ganeshyb@users.noreply.github.com> Date: Sun, 16 Nov 2025 23:24:22 -0800 Subject: [PATCH 21/94] Refactor Azure AI Tool Client Configuration and Enhance OAuth Consent Handling * Refactor Azure AI Tool Client Configuration and Enhance OAuth Consent Handling - Consolidated the AzureAIToolClientConfiguration class by removing redundant code and improving clarity. - Introduced OAuth consent handling in the agent's response methods to manage OAuthConsentRequiredError. - Updated the FoundryCBAgent to configure tools endpoint and agent name from environment variables. - Enhanced tool client to propagate OAuth consent errors for better handling in the agent. - Added methods to generate OAuth request IDs and handle OAuth consent requests in the LangGraph response converter. - Updated sample usage to include tool connection ID from environment variables. - Incremented version to 1.0.0b5 for the langgraph package. * Address Pylint and mypy issues * Updated change logs --- .../CHANGELOG.md | 4 + .../agentframework/agent_framework.py | 19 +- .../azure-ai-agentserver-core/CHANGELOG.md | 4 + .../ai/agentserver/core/client/__init__.py | 5 + .../agentserver/core/client/tools/__init__.py | 2 +- .../agentserver/core/client/tools/_client.py | 329 ++++++++--------- .../core/client/tools/_configuration.py | 143 ++++---- .../core/client/tools/_exceptions.py | 77 ++-- .../core/client/tools/_model_base.py | 302 ++++++++-------- .../core/client/tools/aio/__init__.py | 2 +- .../core/client/tools/aio/_client.py | 331 +++++++++--------- .../core/client/tools/aio/_configuration.py | 144 ++++---- .../tools/aio/operations/_operations.py | 38 +- .../client/tools/operations/_operations.py | 81 +++-- .../azure/ai/agentserver/core/constants.py | 1 + .../azure/ai/agentserver/core/server/base.py | 217 +++++++++++- .../common/id_generator/id_generator.py | 3 + .../CHANGELOG.md | 4 + .../ai/agentserver/langgraph/langgraph.py | 42 ++- .../ai/agentserver/langgraph/tool_client.py | 2 + .../use_tool_client_example.py | 3 +- 21 files changed, 1018 insertions(+), 735 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index a73c24633579..a01bc1990909 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -2,6 +2,10 @@ ## 1.0.0b5 (2025-11-16) +### Feature Added + +- Support Tools Oauth + ### Bugs Fixed - Fixed streaming generation issues. diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index f90545d7d1f6..2a7c28f9a3f8 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -12,6 +12,7 @@ from agent_framework.azure import AzureAIAgentClient # pylint: disable=no-name-in-module from opentelemetry import trace +from azure.ai.agentserver.core.client.tools import OAuthConsentRequiredError from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.constants import Constants as AdapterConstants from azure.ai.agentserver.core.logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger @@ -255,8 +256,8 @@ async def stream_updates(): try: await tool_client.close() logger.debug("Closed tool_client after streaming completed") - except Exception as e: # pylint: disable=broad-exception-caught - logger.warning(f"Error closing tool_client in stream: {e}") + except Exception as ex: # pylint: disable=broad-exception-caught + logger.warning(f"Error closing tool_client in stream: {ex}") return stream_updates() @@ -268,11 +269,21 @@ async def stream_updates(): transformed_result = non_streaming_converter.transform_output_for_response(result) logger.info("Agent run and transformation completed successfully") return transformed_result + except OAuthConsentRequiredError as e: + logger.info("OAuth consent required during agent run") + if context.stream: + # Yield OAuth consent response events + # Capture e in the closure by passing it as a default argument + async def oauth_consent_stream(error=e): + async for event in self.respond_with_oauth_consent_astream(context, error): + yield event + return oauth_consent_stream() + return await self.respond_with_oauth_consent(context, e) finally: # Close tool_client if it was created for this request (non-streaming only, streaming handles in generator) if not context.stream and tool_client is not None: try: await tool_client.close() logger.debug("Closed tool_client after request processing") - except Exception as e: # pylint: disable=broad-exception-caught - logger.warning(f"Error closing tool_client: {e}") + except Exception as ex: # pylint: disable=broad-exception-caught + logger.warning(f"Error closing tool_client: {ex}") diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index a7cfbd49dd22..55a56fed54ca 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -2,6 +2,10 @@ ## 1.0.0b5 (2025-11-16) +### Feature Added + +- Support Tools Oauth + ### Bugs Fixed - Fixed streaming generation issues. diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/__init__.py new file mode 100644 index 000000000000..fdf8caba9ef5 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/__init__.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py index 8cf7c6b67389..3800740fb464 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py @@ -10,4 +10,4 @@ "FoundryTool", "OAuthConsentRequiredError", "MCPToolApprovalRequiredError", -] \ No newline at end of file +] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py index df19d4663fb3..ea9a8479637f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py @@ -2,12 +2,11 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from copy import deepcopy -from typing import Any, List, TYPE_CHECKING, Mapping, Union +from typing import Any, List, Mapping, Union from azure.core import PipelineClient from azure.core.pipeline import policies -from azure.core.rest import HttpRequest, HttpResponse from azure.core.credentials import TokenCredential +from azure.core.tracing.decorator import distributed_trace from ._configuration import AzureAIToolClientConfiguration from .operations._operations import MCPToolsOperations, RemoteToolsOperations @@ -15,156 +14,174 @@ from ._model_base import FoundryTool, ToolSource class AzureAIToolClient: - """Synchronous client for aggregating tools from Azure AI MCP and Tools APIs. - - This client provides access to tools from both MCP (Model Context Protocol) servers - and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. - - :param str endpoint: - The fully qualified endpoint for the Azure AI Agents service. - Example: "https://.api.azureml.ms" - :param credential: - Credential for authenticating requests to the service. - Use credentials from azure-identity like DefaultAzureCredential. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str agent_name: - Name of the agent to use for tool operations. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations defining which tools to include. - :keyword Mapping[str, Any] user: - User information for tool invocations (object_id, tenant_id). - :keyword str api_version: - API version to use when communicating with the service. - Default is the latest supported version. - :keyword transport: - Custom transport implementation. Default is RequestsTransport. - :paramtype transport: ~azure.core.pipeline.transport.HttpTransport - - """ - - def __init__( - self, - endpoint: str, - credential: "TokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the synchronous Azure AI Tool Client. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional keyword arguments for client configuration. - """ - self._config = AzureAIToolClientConfiguration( - endpoint, - credential, - **kwargs, - ) - - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: PipelineClient = PipelineClient(base_url=endpoint, policies=_policies, **kwargs) - - # Initialize specialized clients with client and config - self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) - self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) - - def list_tools(self) -> List[FoundryTool]: - """List all available tools from configured sources. - - Retrieves tools from both MCP servers and Azure AI Tools API endpoints, - returning them as FoundryTool instances ready for invocation. - :return: List of available tools from all configured sources. - :rtype: List[~AzureAITool] - :raises ~exceptions.OAuthConsentRequiredError: - Raised when the service requires user OAuth consent. - :raises ~exceptions.MCPToolApprovalRequiredError: - Raised when tool access requires human approval. - :raises ~azure.core.exceptions.HttpResponseError: - Raised for HTTP communication failures. - - """ - - existing_names: set[str] = set() - - tools: List[FoundryTool] = [] - - # Fetch MCP tools - mcp_tools = self._mcp_tools.list_tools(existing_names) - tools.extend(mcp_tools) - - # Fetch Tools API tools - tools_api_tools = self._remote_tools.resolve_tools(existing_names) - tools.extend(tools_api_tools) - - for tool in tools: - # Capture tool in a closure to avoid shadowing issues - def make_invoker(captured_tool): - return lambda *args, **kwargs: self.invoke_tool(captured_tool, *args, **kwargs) - tool.invoker = make_invoker(tool) - return tools - - def invoke_tool( - self, - tool: Union[str, FoundryTool], - *args: Any, - **kwargs: Any, - ) -> Any: - """Invoke a tool by instance, name, or descriptor. - - :param tool: Tool to invoke, specified as an AzureAITool instance, - tool name string, or FoundryTool. - :type tool: Union[str, ~FoundryTool] - :param args: Positional arguments to pass to the tool - """ - descriptor = self._resolve_tool_descriptor(tool) - payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) - return self._invoke_tool(descriptor, payload, **kwargs) - - def _resolve_tool_descriptor( - self, tool: Union[str, FoundryTool] - ) -> FoundryTool: - """Resolve a tool reference to a descriptor.""" - if isinstance(tool, FoundryTool): - return tool - if isinstance(tool, str): - # Fetch all tools and find matching descriptor - descriptors = self.list_tools() - for descriptor in descriptors: - if descriptor.name == tool or descriptor.key == tool: - return descriptor - raise KeyError(f"Unknown tool: {tool}") - raise TypeError("Tool must be an AzureAITool, FoundryTool, or registered name/key") - - def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: - """Invoke a tool descriptor.""" - if descriptor.source is ToolSource.MCP_TOOLS: - return self._mcp_tools.invoke_tool(descriptor, arguments) - if descriptor.source is ToolSource.REMOTE_TOOLS: - return self._remote_tools.invoke_tool(descriptor, arguments) - raise ValueError(f"Unsupported tool source: {descriptor.source}") - - def close(self) -> None: - self._client.close() - - def __enter__(self) -> "AzureAIToolClient": - self._client.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client.__exit__(*exc_details) \ No newline at end of file + """Synchronous client for aggregating tools from Azure AI MCP and Tools APIs. + + This client provides access to tools from both MCP (Model Context Protocol) servers + and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. + + :param str endpoint: + The fully qualified endpoint for the Azure AI Agents service. + Example: "https://.api.azureml.ms" + :param credential: + Credential for authenticating requests to the service. + Use credentials from azure-identity like DefaultAzureCredential. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str agent_name: + Name of the agent to use for tool operations. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations defining which tools to include. + :keyword Mapping[str, Any] user: + User information for tool invocations (object_id, tenant_id). + :keyword str api_version: + API version to use when communicating with the service. + Default is the latest supported version. + :keyword transport: + Custom transport implementation. Default is RequestsTransport. + :paramtype transport: ~azure.core.pipeline.transport.HttpTransport + + """ + + def __init__( + self, + endpoint: str, + credential: "TokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the synchronous Azure AI Tool Client. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional keyword arguments for client configuration. + """ + self._config = AzureAIToolClientConfiguration( + endpoint, + credential, + **kwargs, + ) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=endpoint, policies=_policies, **kwargs) + + # Initialize specialized clients with client and config + self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) + self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) + + def list_tools(self) -> List[FoundryTool]: + """List all available tools from configured sources. + + Retrieves tools from both MCP servers and Azure AI Tools API endpoints, + returning them as FoundryTool instances ready for invocation. + :return: List of available tools from all configured sources. + :rtype: List[~AzureAITool] + :raises ~exceptions.OAuthConsentRequiredError: + Raised when the service requires user OAuth consent. + :raises ~exceptions.MCPToolApprovalRequiredError: + Raised when tool access requires human approval. + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + """ + + existing_names: set[str] = set() + + tools: List[FoundryTool] = [] + + # Fetch MCP tools + mcp_tools = self._mcp_tools.list_tools(existing_names) + tools.extend(mcp_tools) + + # Fetch Tools API tools + tools_api_tools = self._remote_tools.resolve_tools(existing_names) + tools.extend(tools_api_tools) + + for tool in tools: + # Capture tool in a closure to avoid shadowing issues + def make_invoker(captured_tool): + return lambda *args, **kwargs: self.invoke_tool(captured_tool, *args, **kwargs) + tool.invoker = make_invoker(tool) + return tools + + @distributed_trace + def invoke_tool( + self, + tool: Union[str, FoundryTool], + *args: Any, + **kwargs: Any, + ) -> Any: + """Invoke a tool by instance, name, or descriptor. + + :param tool: Tool to invoke, specified as an AzureAITool instance, + tool name string, or FoundryTool. + :type tool: Union[str, ~FoundryTool] + :param args: Positional arguments to pass to the tool. + :type args: Any + :return: The result of invoking the tool. + :rtype: Any + """ + descriptor = self._resolve_tool_descriptor(tool) + payload = InvocationPayloadBuilder.build_payload(args, kwargs, configuration={}) + return self._invoke_tool(descriptor, payload, **kwargs) + + def _resolve_tool_descriptor( + self, tool: Union[str, FoundryTool] + ) -> FoundryTool: + """Resolve a tool reference to a descriptor. + + :param tool: Tool to resolve, either a FoundryTool instance or a string name/key. + :type tool: Union[str, FoundryTool] + :return: The resolved FoundryTool descriptor. + :rtype: FoundryTool + """ + if isinstance(tool, FoundryTool): + return tool + if isinstance(tool, str): + # Fetch all tools and find matching descriptor + descriptors = self.list_tools() + for descriptor in descriptors: + if tool in (descriptor.name, descriptor.key): + return descriptor + raise KeyError(f"Unknown tool: {tool}") + raise TypeError("Tool must be an AzureAITool, FoundryTool, or registered name/key") + + def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: + """Invoke a tool descriptor. + + :param descriptor: The tool descriptor to invoke. + :type descriptor: FoundryTool + :param arguments: Arguments to pass to the tool. + :type arguments: Mapping[str, Any] + :return: The result of the tool invocation. + :rtype: Any + """ + if descriptor.source is ToolSource.MCP_TOOLS: + return self._mcp_tools.invoke_tool(descriptor, arguments, **kwargs) + if descriptor.source is ToolSource.REMOTE_TOOLS: + return self._remote_tools.invoke_tool(descriptor, arguments, **kwargs) + raise ValueError(f"Unsupported tool source: {descriptor.source}") + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "AzureAIToolClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py index 45e2ac178654..71cbdebec911 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py @@ -2,87 +2,84 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from typing import Any, Mapping, List, Optional, TYPE_CHECKING +from typing import Any, List, Optional, TYPE_CHECKING from azure.core.pipeline import policies +from ._utils._model_base import ToolConfigurationParser, UserInfo, ToolDefinition if TYPE_CHECKING: from azure.core.credentials import TokenCredential -from ._utils._model_base import ToolConfigurationParser, UserInfo, ToolDefinition +class AzureAIToolClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for Azure AI Tool Client. + + Manages authentication, endpoint configuration, and policy settings for the + Azure AI Tool Client. This class is used internally by the client and should + not typically be instantiated directly. + + :param str endpoint: + Fully qualified endpoint for the Azure AI Agents service. + :param credential: + Azure TokenCredential for authentication. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str api_version: + API version to use. Default is the latest supported version. + :keyword List[str] credential_scopes: + OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. + :keyword str agent_name: + Name of the agent. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations. + :keyword Mapping[str, Any] user: + User information for tool invocations. + """ + + def __init__( + self, + endpoint: str, + credential: "TokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the configuration. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional configuration options. + """ + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) -class AzureAIToolClientConfiguration: - """Configuration for Azure AI Tool Client. - - Manages authentication, endpoint configuration, and policy settings for the - Azure AI Tool Client. This class is used internally by the client and should - not typically be instantiated directly. - - :param str endpoint: - Fully qualified endpoint for the Azure AI Agents service. - :param credential: - Azure TokenCredential for authentication. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str api_version: - API version to use. Default is the latest supported version. - :keyword List[str] credential_scopes: - OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. - :keyword str agent_name: - Name of the agent. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations. - :keyword Mapping[str, Any] user: - User information for tool invocations. - """ + # Tool configuration + self.agent_name: str = kwargs.pop("agent_name", "$default") + self.tools: Optional[List[ToolDefinition]] = kwargs.pop("tools", None) + self.user: Optional[UserInfo] = kwargs.pop("user", None) - def __init__( - self, - endpoint: str, - credential: "TokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the configuration. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional configuration options. - """ - api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + # Initialize tool configuration parser + self.tool_config = ToolConfigurationParser(self.tools) - self.endpoint = endpoint - self.credential = credential - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) - + self._configure(**kwargs) - # Tool configuration - self.agent_name: str = kwargs.pop("agent_name", "$default") - self.tools: Optional[List[ToolDefinition]] = kwargs.pop("tools", None) - self.user: Optional[UserInfo] = kwargs.pop("user", None) - - # Initialize tool configuration parser - - self.tool_config = ToolConfigurationParser(self.tools) - - self._configure(**kwargs) - - # Warn about unused kwargs - if kwargs: - import warnings - warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) + # Warn about unused kwargs + if kwargs: + import warnings + warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.BearerTokenCredentialPolicy( - self.credential, *self.credential_scopes, **kwargs - ) \ No newline at end of file + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py index aa00b6b5f4b5..41515592d698 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py @@ -1,49 +1,52 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- from typing import Any, Mapping, Optional class OAuthConsentRequiredError(RuntimeError): - """Raised when the service requires end-user OAuth consent. + """Raised when the service requires end-user OAuth consent. - This exception is raised when a tool or service operation requires explicit - OAuth consent from the end user before the operation can proceed. + This exception is raised when a tool or service operation requires explicit + OAuth consent from the end user before the operation can proceed. - :ivar str message: Human-readable guidance returned by the service. - :ivar str consent_url: Link that the end user must visit to provide consent. - :ivar dict payload: Full response payload from the service. + :ivar str message: Human-readable guidance returned by the service. + :ivar str consent_url: Link that the end user must visit to provide consent. + :ivar dict payload: Full response payload from the service. - :param str message: Human-readable guidance returned by the service. - :param str consent_url: Link that the end user must visit to provide the required consent. - :param dict payload: Full response payload supplied by the service. - """ + :param str message: Human-readable guidance returned by the service. + :param str consent_url: Link that the end user must visit to provide the required consent. + :param dict payload: Full response payload supplied by the service. + """ - def __init__(self, message: str, consent_url: Optional[str], payload: Mapping[str, Any]): - super().__init__(message) - self.message = message - self.consent_url = consent_url - self.payload = dict(payload) + def __init__(self, message: str, consent_url: Optional[str], payload: Mapping[str, Any]): + super().__init__(message) + self.message = message + self.consent_url = consent_url + self.payload = dict(payload) class MCPToolApprovalRequiredError(RuntimeError): - """Raised when an MCP tool invocation needs human approval. - - This exception is raised when an MCP (Model Context Protocol) tool requires - explicit human approval before the invocation can proceed, typically for - security or compliance reasons. - - :ivar str message: Human-readable guidance returned by the service. - :ivar dict approval_arguments: - Arguments that must be approved or amended before continuing. - :ivar dict payload: Full response payload from the service. - - :param str message: Human-readable guidance returned by the service. - :param dict approval_arguments: - Arguments that must be approved or amended before continuing. - :param dict payload: Full response payload supplied by the service. - """ - - def __init__(self, message: str, approval_arguments: Mapping[str, Any], payload: Mapping[str, Any]): - super().__init__(message) - self.message = message - self.approval_arguments = dict(approval_arguments) - self.payload = dict(payload) + """Raised when an MCP tool invocation needs human approval. + + This exception is raised when an MCP (Model Context Protocol) tool requires + explicit human approval before the invocation can proceed, typically for + security or compliance reasons. + + :ivar str message: Human-readable guidance returned by the service. + :ivar dict approval_arguments: + Arguments that must be approved or amended before continuing. + :ivar dict payload: Full response payload from the service. + + :param str message: Human-readable guidance returned by the service. + :param dict approval_arguments: + Arguments that must be approved or amended before continuing. + :param dict payload: Full response payload supplied by the service. + """ + + def __init__(self, message: str, approval_arguments: Mapping[str, Any], payload: Mapping[str, Any]): + super().__init__(message) + self.message = message + self.approval_arguments = dict(approval_arguments) + self.payload = dict(payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py index 3c7bed8b5db1..7e20b20edeb0 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py @@ -5,164 +5,170 @@ from enum import Enum import json -from typing import Any, Awaitable, Callable, List, Mapping, Optional +from typing import Any, Awaitable, Callable, Mapping, Optional from dataclasses import dataclass -import asyncio +import asyncio # pylint: disable=do-not-import-asyncio import inspect +from azure.core import CaseInsensitiveEnumMeta -class ToolSource(str, Enum): - """Identifies the origin of a tool. - - Specifies whether a tool comes from an MCP (Model Context Protocol) server - or from the Azure AI Tools API (remote tools). - """ +class ToolSource(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Identifies the origin of a tool. - MCP_TOOLS = "mcp_tools" - REMOTE_TOOLS = "remote_tools" + Specifies whether a tool comes from an MCP (Model Context Protocol) server + or from the Azure AI Tools API (remote tools). + """ + + MCP_TOOLS = "mcp_tools" + REMOTE_TOOLS = "remote_tools" class ToolDefinition: - """Definition of a tool including its parameters. - - :ivar str type: JSON schema type (e.g., "mcp", "a2", other tools). - """ - - def __init__(self, type: str, **kwargs: Any) -> None: - """Initialize ToolDefinition with type and any additional properties. - - :param str type: JSON schema type (e.g., "mcp", "a2", other tools). - :param kwargs: Any additional properties to set on the tool definition. - """ - self.type = type - # Store all additional properties as attributes - for key, value in kwargs.items(): - setattr(self, key, value) - - def __repr__(self) -> str: - """Return a detailed string representation of the ToolDefinition.""" - return json.dumps(self.__dict__, default=str) - - def __str__(self) -> str: - """Return a human-readable string representation.""" - return json.dumps(self.__dict__, default=str) - + """Definition of a tool including its parameters. -@dataclass -class FoundryTool: - """Lightweight description of a tool that can be invoked. - - Represents metadata and configuration for a single tool, including its - name, description, input schema, and source information. - - :ivar str key: Unique identifier for this tool. - :ivar str name: Display name of the tool. - :ivar str description: Human-readable description of what the tool does. - :ivar ~ToolSource source: - Origin of the tool (MCP_TOOLS or REMOTE_TOOLS). - :ivar dict metadata: Raw metadata from the API response. - :ivar dict input_schema: - JSON schema describing the tool's input parameters, or None. - :ivar ToolDefinition tool_definition: - Optional tool definition object, or None. - """ - - key: str - name: str - description: str - source: ToolSource - metadata: Mapping[str, Any] - input_schema: Optional[Mapping[str, Any]] = None - tool_definition: Optional[ToolDefinition] = None - invoker: Optional[Callable[..., Awaitable[Any]]] = None - - def invoke(self, *args: Any, **kwargs: Any) -> Any: - """Invoke the tool synchronously. - - :param args: Positional arguments to pass to the tool. - :param kwargs: Keyword arguments to pass to the tool. - :return: The result from the tool invocation. - :rtype: Any - """ - - - if not self.invoker: - raise NotImplementedError("No invoker function defined for this tool.") - if inspect.iscoroutinefunction(self.invoker): - # If the invoker is async, check if we're already in an event loop - try: - loop = asyncio.get_running_loop() - # We're in a running loop, can't use asyncio.run() - raise RuntimeError( - "Cannot call invoke() on an async tool from within an async context. " - "Use 'await tool.ainvoke(...)' or 'await tool(...)' instead." - ) - except RuntimeError as e: - if "no running event loop" in str(e).lower(): - # No running loop, safe to use asyncio.run() - return asyncio.run(self.invoker(*args, **kwargs)) - else: - # Re-raise our custom error - raise - else: - return self.invoker(*args, **kwargs) - - async def ainvoke(self, *args: Any, **kwargs: Any) -> Any: - """Invoke the tool asynchronously. - - :param args: Positional arguments to pass to the tool. - :param kwargs: Keyword arguments to pass to the tool. - :return: The result from the tool invocation. - :rtype: Any - """ - - if not self.invoker: - raise NotImplementedError("No invoker function defined for this tool.") - if inspect.iscoroutinefunction(self.invoker): - return await self.invoker(*args, **kwargs) - else: - result = self.invoker(*args, **kwargs) - # If the result is awaitable (e.g., a coroutine), await it - if inspect.iscoroutine(result) or hasattr(result, '__await__'): - return await result - return result - - def __call__(self, *args: Any, **kwargs: Any) -> Any: - - # Check if the invoker is async - if self.invoker and inspect.iscoroutinefunction(self.invoker): - # Return coroutine for async context - return self.ainvoke(*args, **kwargs) - else: - # Use sync invoke - return self.invoke(*args, **kwargs) + :ivar str type: JSON schema type (e.g., "mcp", "a2", other tools). + """ + def __init__(self, type: str, **kwargs: Any) -> None: + """Initialize ToolDefinition with type and any additional properties. -class UserInfo: - """Represents user information. - - :ivar str objectId: User's object identifier. - :ivar str tenantId: Tenant identifier. - """ - - def __init__(self, objectId: str, tenantId: str, **kwargs: Any) -> None: - """Initialize UserInfo with user details. - - :param str objectId: User's object identifier. - :param str tenantId: Tenant identifier. - :param kwargs: Any additional properties to set on the user. - """ - self.objectId = objectId - self.tenantId = tenantId - # Store all additional properties as attributes - for key, value in kwargs.items(): - setattr(self, key, value) - - def to_dict(self) -> dict: - """Convert to dictionary for JSON serialization.""" - return { - "objectId": self.objectId, - "tenantId": self.tenantId - } + :param str type: JSON schema type (e.g., "mcp", "a2", other tools). + :param kwargs: Any additional properties to set on the tool definition. + """ + self.type = type + # Store all additional properties as attributes + for key, value in kwargs.items(): + setattr(self, key, value) + + def __repr__(self) -> str: + """Return a detailed string representation of the ToolDefinition. + :return: JSON string representation of the ToolDefinition. + :rtype: str + """ + return json.dumps(self.__dict__, default=str) + def __str__(self) -> str: + """Return a human-readable string representation. + :return: JSON string representation of the ToolDefinition. + :rtype: str + """ + return json.dumps(self.__dict__, default=str) +@dataclass +class FoundryTool: + """Lightweight description of a tool that can be invoked. + + Represents metadata and configuration for a single tool, including its + name, description, input schema, and source information. + + :ivar str key: Unique identifier for this tool. + :ivar str name: Display name of the tool. + :ivar str description: Human-readable description of what the tool does. + :ivar ~ToolSource source: + Origin of the tool (MCP_TOOLS or REMOTE_TOOLS). + :ivar dict metadata: Raw metadata from the API response. + :ivar dict input_schema: + JSON schema describing the tool's input parameters, or None. + :ivar ToolDefinition tool_definition: + Optional tool definition object, or None. + """ + + key: str + name: str + description: str + source: ToolSource + metadata: Mapping[str, Any] + input_schema: Optional[Mapping[str, Any]] = None + tool_definition: Optional[ToolDefinition] = None + invoker: Optional[Callable[..., Awaitable[Any]]] = None + + def invoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool synchronously. + + :param args: Positional arguments to pass to the tool. + :type args: Any + :return: The result from the tool invocation. + :rtype: Any + """ + + if not self.invoker: + raise NotImplementedError("No invoker function defined for this tool.") + if inspect.iscoroutinefunction(self.invoker): + # If the invoker is async, check if we're already in an event loop + try: + asyncio.get_running_loop() + # We're in a running loop, can't use asyncio.run() + raise RuntimeError( + "Cannot call invoke() on an async tool from within an async context. " + "Use 'await tool.ainvoke(...)' or 'await tool(...)' instead." + ) + except RuntimeError as e: + if "no running event loop" in str(e).lower(): + # No running loop, safe to use asyncio.run() + return asyncio.run(self.invoker(*args, **kwargs)) + # Re-raise our custom error + raise + else: + return self.invoker(*args, **kwargs) + + async def ainvoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool asynchronously. + + :param args: Positional arguments to pass to the tool. + :type args: Any + :return: The result from the tool invocation. + :rtype: Any + """ + + if not self.invoker: + raise NotImplementedError("No invoker function defined for this tool.") + if inspect.iscoroutinefunction(self.invoker): + return await self.invoker(*args, **kwargs) + + result = self.invoker(*args, **kwargs) + # If the result is awaitable (e.g., a coroutine), await it + if inspect.iscoroutine(result) or hasattr(result, '__await__'): + return await result + return result + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + + # Check if the invoker is async + if self.invoker and inspect.iscoroutinefunction(self.invoker): + # Return coroutine for async context + return self.ainvoke(*args, **kwargs) + + # Use sync invoke + return self.invoke(*args, **kwargs) + + +class UserInfo: + """Represents user information. + + :ivar str objectId: User's object identifier. + :ivar str tenantId: Tenant identifier. + """ + + def __init__(self, objectId: str, tenantId: str, **kwargs: Any) -> None: + """Initialize UserInfo with user details. + + :param str objectId: User's object identifier. + :param str tenantId: Tenant identifier. + :param kwargs: Any additional properties to set on the user. + """ + self.objectId = objectId + self.tenantId = tenantId + # Store all additional properties as attributes + for key, value in kwargs.items(): + setattr(self, key, value) + + def to_dict(self) -> dict: + """Convert to dictionary for JSON serialization. + + :return: Dictionary containing objectId and tenantId. + :rtype: dict + """ + return { + "objectId": self.objectId, + "tenantId": self.tenantId + } diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py index c0abe5b29bb9..047a3b7919e7 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py @@ -10,4 +10,4 @@ "FoundryTool", "OAuthConsentRequiredError", "MCPToolApprovalRequiredError", -] \ No newline at end of file +] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py index b49ed2b971cd..e2d35b1dd919 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -6,6 +6,7 @@ from azure.core import AsyncPipelineClient from azure.core.pipeline import policies +from azure.core.tracing.decorator_async import distributed_trace_async from ._configuration import AzureAIToolClientConfiguration from .._utils._model_base import InvocationPayloadBuilder @@ -17,159 +18,177 @@ from azure.core.credentials_async import AsyncTokenCredential class AzureAIToolClient: - """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. - - This client provides access to tools from both MCP (Model Context Protocol) servers - and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. - - :param str endpoint: - The fully qualified endpoint for the Azure AI Agents service. - Example: "https://.api.azureml.ms" - :param credential: - Credential for authenticating requests to the service. - Use credentials from azure-identity like DefaultAzureCredential. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str agent_name: - Name of the agent to use for tool operations. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations defining which tools to include. - :keyword Mapping[str, Any] user: - User information for tool invocations (object_id, tenant_id). - :keyword str api_version: - API version to use when communicating with the service. - Default is the latest supported version. - :keyword transport: - Custom transport implementation. Default is RequestsTransport. - :paramtype transport: ~azure.core.pipeline.transport.HttpTransport - - """ - - def __init__( - self, - endpoint: str, - credential: "AsyncTokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the asynchronous Azure AI Tool Client. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional keyword arguments for client configuration. - """ - self._config = AzureAIToolClientConfiguration( - endpoint, - credential, - **kwargs, - ) - - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=endpoint, policies=_policies, **kwargs) - - # Initialize specialized clients with client and config - self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) - self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) - - async def list_tools(self) -> List[FoundryTool]: - """List all available tools from configured sources. - - Retrieves tools from both MCP servers and Azure AI Tools API endpoints, - returning them as AzureAITool instances ready for invocation. - :return: List of available tools from all configured sources. - :rtype: List[~AzureAITool] - :raises ~Tool_Client.exceptions.OAuthConsentRequiredError: - Raised when the service requires user OAuth consent. - :raises ~Tool_Client.exceptions.MCPToolApprovalRequiredError: - Raised when tool access requires human approval. - :raises ~azure.core.exceptions.HttpResponseError: - Raised for HTTP communication failures. - - """ - - existing_names: set[str] = set() - - tools: List[FoundryTool] = [] - - # Fetch MCP tools - mcp_tools = await self._mcp_tools.list_tools(existing_names) - tools.extend(mcp_tools) - # Fetch Tools API tools - tools_api_tools = await self._remote_tools.resolve_tools(existing_names) - tools.extend(tools_api_tools) - - for tool in tools: - # Capture tool in a closure to avoid shadowing issues - def make_invoker(captured_tool): - async def _invoker(*args, **kwargs): - return await self.invoke_tool(captured_tool, *args, **kwargs) - return _invoker - tool.invoker = make_invoker(tool) - - return tools - - async def invoke_tool( - self, - tool: Union[str, FoundryTool], - *args: Any, - **kwargs: Any, - ) -> Any: - """Invoke a tool by instance, name, or descriptor. - - :param tool: Tool to invoke, specified as an AzureAITool instance, - tool name string, or FoundryTool. - :type tool: Union[~AzureAITool, str, ~Tool_Client.models.FoundryTool] - :param args: Positional arguments to pass to the tool - """ - descriptor = await self._resolve_tool_descriptor(tool) - payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) - return await self._invoke_tool(descriptor, payload, **kwargs) - - async def _resolve_tool_descriptor( - self, tool: Union[str, FoundryTool] - ) -> FoundryTool: - """Resolve a tool reference to a descriptor.""" - if isinstance(tool, FoundryTool): - return tool - if isinstance(tool, str): - # Fetch all tools and find matching descriptor - descriptors = await self.list_tools() - for descriptor in descriptors: - if descriptor.name == tool or descriptor.key == tool: - return descriptor - raise KeyError(f"Unknown tool: {tool}") - raise TypeError("Tool must be an AsyncAzureAITool, FoundryTool, or registered name/key") - - async def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: - """Invoke a tool descriptor.""" - if descriptor.source is ToolSource.MCP_TOOLS: - return await self._mcp_tools.invoke_tool(descriptor, arguments) - if descriptor.source is ToolSource.REMOTE_TOOLS: - return await self._remote_tools.invoke_tool(descriptor, arguments) - raise ValueError(f"Unsupported tool source: {descriptor.source}") - - async def close(self) -> None: - """Close the underlying HTTP pipeline.""" - await self._client.close() - - async def __aenter__(self) -> "AzureAIToolClient": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client.__aexit__(*exc_details) \ No newline at end of file + """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. + + This client provides access to tools from both MCP (Model Context Protocol) servers + and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. + + :param str endpoint: + The fully qualified endpoint for the Azure AI Agents service. + Example: "https://.api.azureml.ms" + :param credential: + Credential for authenticating requests to the service. + Use credentials from azure-identity like DefaultAzureCredential. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str agent_name: + Name of the agent to use for tool operations. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations defining which tools to include. + :keyword Mapping[str, Any] user: + User information for tool invocations (object_id, tenant_id). + :keyword str api_version: + API version to use when communicating with the service. + Default is the latest supported version. + :keyword transport: + Custom transport implementation. Default is RequestsTransport. + :paramtype transport: ~azure.core.pipeline.transport.HttpTransport + + """ + + def __init__( + self, + endpoint: str, + credential: "AsyncTokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the asynchronous Azure AI Tool Client. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional keyword arguments for client configuration. + """ + self._config = AzureAIToolClientConfiguration( + endpoint, + credential, + **kwargs, + ) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=endpoint, policies=_policies, **kwargs) + + # Initialize specialized clients with client and config + self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) + self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) + + async def list_tools(self) -> List[FoundryTool]: + """List all available tools from configured sources. + + Retrieves tools from both MCP servers and Azure AI Tools API endpoints, + returning them as AzureAITool instances ready for invocation. + :return: List of available tools from all configured sources. + :rtype: List[~AzureAITool] + :raises ~Tool_Client.exceptions.OAuthConsentRequiredError: + Raised when the service requires user OAuth consent. + :raises ~Tool_Client.exceptions.MCPToolApprovalRequiredError: + Raised when tool access requires human approval. + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + """ + + existing_names: set[str] = set() + + tools: List[FoundryTool] = [] + + # Fetch MCP tools + mcp_tools = await self._mcp_tools.list_tools(existing_names) + tools.extend(mcp_tools) + # Fetch Tools API tools + tools_api_tools = await self._remote_tools.resolve_tools(existing_names) + tools.extend(tools_api_tools) + + for tool in tools: + # Capture tool in a closure to avoid shadowing issues + def make_invoker(captured_tool): + async def _invoker(*args, **kwargs): + return await self.invoke_tool(captured_tool, *args, **kwargs) + return _invoker + tool.invoker = make_invoker(tool) + + return tools + + @distributed_trace_async + async def invoke_tool( + self, + tool: Union[str, FoundryTool], + *args: Any, + **kwargs: Any, + ) -> Any: + """Invoke a tool by instance, name, or descriptor. + + :param tool: Tool to invoke, specified as an AzureAITool instance, + tool name string, or FoundryTool. + :type tool: Union[~AzureAITool, str, ~Tool_Client.models.FoundryTool] + :param args: Positional arguments to pass to the tool. + :type args: Any + :return: The result of invoking the tool. + :rtype: Any + """ + descriptor = await self._resolve_tool_descriptor(tool) + payload = InvocationPayloadBuilder.build_payload(args, kwargs, configuration={}) + return await self._invoke_tool(descriptor, payload, **kwargs) + + async def _resolve_tool_descriptor( + self, tool: Union[str, FoundryTool] + ) -> FoundryTool: + """Resolve a tool reference to a descriptor. + + :param tool: Tool to resolve, either a FoundryTool instance or a string name/key. + :type tool: Union[str, FoundryTool] + :return: The resolved FoundryTool descriptor. + :rtype: FoundryTool + """ + if isinstance(tool, FoundryTool): + return tool + if isinstance(tool, str): + # Fetch all tools and find matching descriptor + descriptors = await self.list_tools() + for descriptor in descriptors: + if tool in (descriptor.name, descriptor.key): + return descriptor + raise KeyError(f"Unknown tool: {tool}") + raise TypeError("Tool must be an AsyncAzureAITool, FoundryTool, or registered name/key") + + async def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: #pylint: disable=unused-argument + """Invoke a tool descriptor. + + :param descriptor: The tool descriptor to invoke. + :type descriptor: FoundryTool + :param arguments: Arguments to pass to the tool. + :type arguments: Mapping[str, Any] + :return: The result of the tool invocation. + :rtype: Any + """ + if descriptor.source is ToolSource.MCP_TOOLS: + return await self._mcp_tools.invoke_tool(descriptor, arguments) + if descriptor.source is ToolSource.REMOTE_TOOLS: + return await self._remote_tools.invoke_tool(descriptor, arguments) + raise ValueError(f"Unsupported tool source: {descriptor.source}") + + async def close(self) -> None: + """Close the underlying HTTP pipeline.""" + await self._client.close() + + async def __aenter__(self) -> "AzureAIToolClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py index 79b819863399..4eb5503dee8d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py @@ -6,83 +6,81 @@ from azure.core.pipeline import policies +from .._utils._model_base import ToolConfigurationParser + if TYPE_CHECKING: - from azure.core.credentials_async import AsyncTokenCredential + from azure.core.credentials_async import AsyncTokenCredential -from .._utils._model_base import ToolConfigurationParser +class AzureAIToolClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for Azure AI Tool Client. + + Manages authentication, endpoint configuration, and policy settings for the + Azure AI Tool Client. This class is used internally by the client and should + not typically be instantiated directly. + + :param str endpoint: + Fully qualified endpoint for the Azure AI Agents service. + :param credential: + Azure TokenCredential for authentication. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str api_version: + API version to use. Default is the latest supported version. + :keyword List[str] credential_scopes: + OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. + :keyword str agent_name: + Name of the agent. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations. + :keyword Mapping[str, Any] user: + User information for tool invocations. + """ + + def __init__( + self, + endpoint: str, + credential: "AsyncTokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the configuration. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional configuration options. + """ + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) -class AzureAIToolClientConfiguration: - """Configuration for Azure AI Tool Client. - - Manages authentication, endpoint configuration, and policy settings for the - Azure AI Tool Client. This class is used internally by the client and should - not typically be instantiated directly. - - :param str endpoint: - Fully qualified endpoint for the Azure AI Agents service. - :param credential: - Azure TokenCredential for authentication. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str api_version: - API version to use. Default is the latest supported version. - :keyword List[str] credential_scopes: - OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. - :keyword str agent_name: - Name of the agent. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations. - :keyword Mapping[str, Any] user: - User information for tool invocations. - """ + # Tool configuration + self.agent_name: str = kwargs.pop("agent_name", "$default") + self.tools: Optional[List[Mapping[str, Any]]] = kwargs.pop("tools", None) + self.user: Optional[Mapping[str, Any]] = kwargs.pop("user", None) - def __init__( - self, - endpoint: str, - credential: "AsyncTokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the configuration. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional configuration options. - """ - api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + # Initialize tool configuration parser + self.tool_config = ToolConfigurationParser(self.tools) - self.endpoint = endpoint - self.credential = credential - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) - + self._configure(**kwargs) - # Tool configuration - self.agent_name: str = kwargs.pop("agent_name", "$default") - self.tools: Optional[List[Mapping[str, Any]]] = kwargs.pop("tools", None) - self.user: Optional[Mapping[str, Any]] = kwargs.pop("user", None) - - # Initialize tool configuration parser - - self.tool_config = ToolConfigurationParser(self.tools) - - self._configure(**kwargs) - - # Warn about unused kwargs - if kwargs: - import warnings - warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) + # Warn about unused kwargs + if kwargs: + import warnings + warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( - self.credential, *self.credential_scopes, **kwargs - ) \ No newline at end of file + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py index e55be880fb6a..7d1310518519 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py @@ -11,13 +11,13 @@ from .._configuration import AzureAIToolClientConfiguration from ...operations._operations import ( - build_remotetools_invoke_tool_request, - build_remotetools_resolve_tools_request, - prepare_remotetools_invoke_tool_request_content, + build_remotetools_invoke_tool_request, + build_remotetools_resolve_tools_request, + prepare_remotetools_invoke_tool_request_content, prepare_remotetools_resolve_tools_request_content, - build_mcptools_list_tools_request, + build_mcptools_list_tools_request, prepare_mcptools_list_tools_request_content, - build_mcptools_invoke_tool_request, + build_mcptools_invoke_tool_request, prepare_mcptools_invoke_tool_request_content, API_VERSION, MCP_ENDPOINT_PATH, @@ -57,7 +57,7 @@ class MCPToolsOperations: def __init__(self, *args, **kwargs) -> None: """Initialize MCP client. - + Parameters ---------- client : AsyncPipelineClient @@ -71,10 +71,10 @@ def __init__(self, *args, **kwargs) -> None: if self._client is None or self._config is None: raise ValueError("Both 'client' and 'config' must be provided") - + self._endpoint_path = MCP_ENDPOINT_PATH self._api_version = API_VERSION - + async def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: """List MCP tools. @@ -82,16 +82,16 @@ async def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTo :rtype: List[FoundryTool] """ _request, error_map, remaining_kwargs = build_list_tools_request(self._api_version, kwargs) - + path_format_arguments = {"endpoint": self._config.endpoint} _request.url = self._client.format_url(_request.url, **path_format_arguments) pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **remaining_kwargs) response = pipeline_response.http_response - + handle_response_error(response, error_map) return process_list_tools_response(response, self._config.tool_config._named_mcp_tools, existing_names) - + async def invoke_tool( self, tool: FoundryTool, @@ -114,14 +114,14 @@ async def invoke_tool( pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **kwargs) response = pipeline_response.http_response - + handle_response_error(response, error_map) return response.json().get("result") class RemoteToolsOperations: def __init__(self, *args, **kwargs) -> None: """Initialize Tools API client. - + :param client: Azure PipelineClient for HTTP requests. :type client: ~azure.core.PipelineClient :param config: Configuration object. @@ -135,7 +135,7 @@ def __init__(self, *args, **kwargs) -> None: if self._client is None or self._config is None: raise ValueError("Both 'client' and 'config' must be provided") - + # Apply agent name substitution to endpoint paths self.agent = self._config.agent_name.strip() if self._config.agent_name and self._config.agent_name.strip() else "$default" self._api_version = API_VERSION @@ -149,18 +149,18 @@ async def resolve_tools(self, existing_names: set, **kwargs: Any) -> List[Foundr result = build_resolve_tools_request(self.agent, self._api_version, self._config.tool_config, self._config.user, kwargs) if result[0] is None: return [] - + _request, error_map, remaining_kwargs = result - + path_format_arguments = {"endpoint": self._config.endpoint} _request.url = self._client.format_url(_request.url, **path_format_arguments) pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **remaining_kwargs) response = pipeline_response.http_response - + handle_response_error(response, error_map) return process_resolve_tools_response(response, self._config.tool_config._remote_tools, existing_names) - + async def invoke_tool( self, tool: FoundryTool, @@ -182,6 +182,6 @@ async def invoke_tool( pipeline_response: PipelineResponse = await self._client._pipeline.run(_request) response = pipeline_response.http_response - + handle_response_error(response, error_map) return process_invoke_remote_tool_response(response) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py index b54d2d7f6538..0a84ef2e6409 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py @@ -63,7 +63,7 @@ # Helper functions for request/response processing def prepare_request_headers(base_headers: Dict[str, str], custom_headers: Mapping[str, str] = None) -> Dict[str, str]: """Prepare request headers by merging base and custom headers. - + :param base_headers: Base headers to use :param custom_headers: Custom headers to merge :return: Merged headers dictionary @@ -75,7 +75,7 @@ def prepare_request_headers(base_headers: Dict[str, str], custom_headers: Mappin def prepare_error_map(custom_error_map: Mapping[int, Any] = None) -> MutableMapping: """Prepare error map by merging default and custom error mappings. - + :param custom_error_map: Custom error mappings to merge :return: Merged error map """ @@ -91,7 +91,7 @@ def format_and_execute_request( **kwargs: Any ) -> HttpResponse: """Format request URL and execute pipeline. - + :param client: Pipeline client :param request: HTTP request to execute :param endpoint: Endpoint URL for formatting @@ -104,7 +104,7 @@ def format_and_execute_request( def handle_response_error(response: HttpResponse, error_map: MutableMapping) -> None: """Handle HTTP response errors. - + :param response: HTTP response to check :param error_map: Error map for status code mapping :raises HttpResponseError: If response status is not 200 @@ -119,7 +119,7 @@ def process_list_tools_response( existing_names: set ) -> List[FoundryTool]: """Process list_tools response and build descriptors. - + :param response: HTTP response with MCP tools :param named_mcp_tools: Named MCP tools configuration :param existing_names: Set of existing tool names @@ -139,13 +139,24 @@ def process_resolve_tools_response( existing_names: set ) -> List[FoundryTool]: """Process resolve_tools response and build descriptors. - + :param response: HTTP response with remote tools :param remote_tools: Remote tools configuration :param existing_names: Set of existing tool names :return: List of tool descriptors """ - toolResponse = ToolsResponse.from_dict(response.json(), remote_tools) + payload = response.json() + response_type = payload.get("type") + result = payload.get("toolResult") + + if response_type == "OAuthConsentRequired": + consent_url = result.get("consentUrl") + message = result.get("message") + if not consent_url: + consent_url = message + raise OAuthConsentRequiredError(message, consent_url=consent_url, payload=payload) + + toolResponse = ToolsResponse.from_dict(payload, remote_tools) return ToolDescriptorBuilder.build_descriptors( toolResponse.enriched_tools, ToolSource.REMOTE_TOOLS, @@ -157,7 +168,7 @@ def build_list_tools_request( kwargs: Dict[str, Any] ) -> Tuple[HttpRequest, MutableMapping, Dict[str, str]]: """Build request for listing MCP tools. - + :param api_version: API version :param kwargs: Additional arguments (headers, params, error_map) :return: Tuple of (request, error_map, params) @@ -165,11 +176,11 @@ def build_list_tools_request( error_map = prepare_error_map(kwargs.pop("error_map", None)) _headers = prepare_request_headers(MCP_HEADERS, kwargs.pop("headers", None)) _params = kwargs.pop("params", {}) or {} - + _content = prepare_mcptools_list_tools_request_content() content = json.dumps(_content) _request = build_mcptools_list_tools_request(api_version=api_version, headers=_headers, params=_params, content=content) - + return _request, error_map, kwargs def build_invoke_mcp_tool_request( @@ -179,7 +190,7 @@ def build_invoke_mcp_tool_request( **kwargs: Any ) -> Tuple[HttpRequest, MutableMapping]: """Build request for invoking MCP tool. - + :param api_version: API version :param tool: Tool descriptor :param arguments: Tool arguments @@ -188,12 +199,12 @@ def build_invoke_mcp_tool_request( error_map = prepare_error_map() _headers = prepare_request_headers(MCP_HEADERS) _params = {} - + _content = prepare_mcptools_invoke_tool_request_content(tool, arguments, TOOL_PROPERTY_OVERRIDES) content = json.dumps(_content) _request = build_mcptools_invoke_tool_request(api_version=api_version, headers=_headers, params=_params, content=content) - + return _request, error_map def build_resolve_tools_request( @@ -204,7 +215,7 @@ def build_resolve_tools_request( kwargs: Dict[str, Any] ) -> Union[Tuple[HttpRequest, MutableMapping, Dict[str, Any]], Tuple[None, None, None]]: """Build request for resolving remote tools. - + :param agent_name: Agent name :param api_version: API version :param tool_config: Tool configuration @@ -215,14 +226,14 @@ def build_resolve_tools_request( error_map = prepare_error_map(kwargs.pop("error_map", None)) _headers = prepare_request_headers(REMOTE_TOOLS_HEADERS, kwargs.pop("headers", None)) _params = kwargs.pop("params", {}) or {} - + _content = prepare_remotetools_resolve_tools_request_content(tool_config, user) if _content is None: return None, None, None - + content = json.dumps(_content.to_dict()) _request = build_remotetools_resolve_tools_request(agent_name, api_version=api_version, headers=_headers, params=_params, content=content) - + return _request, error_map, kwargs def build_invoke_remote_tool_request( @@ -233,7 +244,7 @@ def build_invoke_remote_tool_request( arguments: Mapping[str, Any] ) -> Tuple[HttpRequest, MutableMapping]: """Build request for invoking remote tool. - + :param agent_name: Agent name :param api_version: API version :param tool: Tool descriptor @@ -244,16 +255,16 @@ def build_invoke_remote_tool_request( error_map = prepare_error_map() _headers = prepare_request_headers(REMOTE_TOOLS_HEADERS) _params = {} - + _content = prepare_remotetools_invoke_tool_request_content(tool, user, arguments) content = json.dumps(_content) _request = build_remotetools_invoke_tool_request(agent_name, api_version=api_version, headers=_headers, params=_params, content=content) - + return _request, error_map def process_invoke_remote_tool_response(response: HttpResponse) -> Any: """Process remote tool invocation response. - + :param response: HTTP response :return: Tool result :raises OAuthConsentRequiredError: If OAuth consent is required @@ -261,7 +272,7 @@ def process_invoke_remote_tool_response(response: HttpResponse) -> Any: payload = response.json() response_type = payload.get("type") result = payload.get("toolResult") - + if response_type == "OAuthConsentRequired": raise OAuthConsentRequiredError(result.get("message"), consent_url=result.get("consentUrl"), payload=payload) return result @@ -270,7 +281,7 @@ class MCPToolsOperations: def __init__(self, *args, **kwargs) -> None: """Initialize MCP client. - + Parameters ---------- client : PipelineClient @@ -284,10 +295,10 @@ def __init__(self, *args, **kwargs) -> None: if self._client is None or self._config is None: raise ValueError("Both 'client' and 'config' must be provided") - + self._endpoint_path = MCP_ENDPOINT_PATH self._api_version = API_VERSION - + def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: """List MCP tools. @@ -298,12 +309,11 @@ def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: response = format_and_execute_request(self._client, _request, self._config.endpoint, **remaining_kwargs) handle_response_error(response, error_map) return process_list_tools_response(response, self._config.tool_config._named_mcp_tools, existing_names) - + def invoke_tool( self, tool: FoundryTool, arguments: Mapping[str, Any], - **kwargs: Any ) -> Any: """Invoke an MCP tool. @@ -315,7 +325,7 @@ def invoke_tool( :rtype: Any """ _request, error_map = build_invoke_mcp_tool_request(self._api_version, tool, arguments) - response = format_and_execute_request(self._client, _request, self._config.endpoint, **kwargs) + response = format_and_execute_request(self._client, _request, self._config.endpoint) handle_response_error(response, error_map) return response.json().get("result") @@ -357,9 +367,9 @@ def prepare_mcptools_invoke_tool_request_content(tool: FoundryTool, arguments: M "name": tool.name, "arguments": dict(arguments), } - + if tool.tool_definition: - + key_overrides = tool_overrides.get(tool.name, {}) meta_config = MetadataMapper.prepare_metadata_dict( tool.metadata, @@ -404,7 +414,7 @@ def build_mcptools_invoke_tool_request( class RemoteToolsOperations: def __init__(self, *args, **kwargs) -> None: """Initialize Tools API client. - + :param client: Azure PipelineClient for HTTP requests. :type client: ~azure.core.PipelineClient :param config: Configuration object. @@ -418,7 +428,7 @@ def __init__(self, *args, **kwargs) -> None: if self._client is None or self._config is None: raise ValueError("Both 'client' and 'config' must be provided") - + # Apply agent name substitution to endpoint paths self.agent = self._config.agent_name.strip() if self._config.agent_name and self._config.agent_name.strip() else "$default" self._api_version = API_VERSION @@ -432,12 +442,12 @@ def resolve_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool] result = build_resolve_tools_request(self.agent, self._api_version, self._config.tool_config, self._config.user, kwargs) if result[0] is None: return [] - + _request, error_map, remaining_kwargs = result response = format_and_execute_request(self._client, _request, self._config.endpoint, **remaining_kwargs) handle_response_error(response, error_map) return process_resolve_tools_response(response, self._config.tool_config._remote_tools, existing_names) - + def invoke_tool( self, tool: FoundryTool, @@ -456,7 +466,7 @@ def invoke_tool( response = format_and_execute_request(self._client, _request, self._config.endpoint) handle_response_error(response, error_map) return process_invoke_remote_tool_response(response) - + def prepare_remotetools_invoke_tool_request_content(tool: FoundryTool, user: UserInfo, arguments: Mapping[str, Any]) -> Any: payload = { "toolName": tool.name, @@ -539,4 +549,3 @@ def build_remotetools_resolve_tools_request( _url = f"/agents/{agent_name}/tools/resolve" return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) - \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py index b8dd5c328780..7844eee8d155 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py @@ -12,3 +12,4 @@ class Constants: AGENT_DEBUG_ERRORS = "AGENT_DEBUG_ERRORS" ENABLE_APPLICATION_INSIGHTS_LOGGER = "AGENT_APP_INSIGHTS_ENABLED" AZURE_AI_WORKSPACE_ENDPOINT = "AZURE_AI_WORKSPACE_ENDPOINT" + AZURE_AI_TOOLS_ENDPOINT = "AZURE_AI_TOOLS_ENDPOINT" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index bc5a15a37775..1724be6e1f3b 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -6,6 +6,7 @@ import inspect import json import os +import time import traceback from abc import abstractmethod from typing import Any, AsyncGenerator, Generator, Optional, Union @@ -20,7 +21,7 @@ from starlette.responses import JSONResponse, Response, StreamingResponse from starlette.routing import Route from starlette.types import ASGIApp - +from ..models import projects as project_models from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, request_context from ..models import ( @@ -35,14 +36,13 @@ logger = get_logger() DEBUG_ERRORS = os.environ.get(Constants.AGENT_DEBUG_ERRORS, "false").lower() == "true" - class AgentRunContextMiddleware(BaseHTTPMiddleware): def __init__(self, app: ASGIApp, agent: Optional['FoundryCBAgent'] = None): super().__init__(app) self.agent = agent async def dispatch(self, request: Request, call_next): - user_info = {} + user_info: Optional[UserInfo] = None if request.url.path in ("/runs", "/responses"): try: user_info = self.set_user_info_to_context_var(request) @@ -88,8 +88,8 @@ def set_run_context_to_context_var(self, run_context): ctx.update(res) request_context.set(ctx) - def set_user_info_to_context_var(self, request) -> UserInfo: - user_info: UserInfo = None + def set_user_info_to_context_var(self, request) -> Optional[UserInfo]: + user_info: Optional[UserInfo] = None try: object_id_header = request.headers.get("x-aml-oid", None) tenant_id_header = request.headers.get("x-aml-tid", None) @@ -111,6 +111,9 @@ def set_user_info_to_context_var(self, request) -> UserInfo: class FoundryCBAgent: + _cached_tools_endpoint: Optional[str] = None + _cached_agent_name: Optional[str] = None + def __init__(self, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any) -> None: self.credentials = credentials self.tools = kwargs.get("tools", []) @@ -254,6 +257,118 @@ async def agent_run( ) -> Union[OpenAIResponse, Generator[ResponseStreamEvent, Any, Any], AsyncGenerator[ResponseStreamEvent, Any]]: raise NotImplementedError + async def respond_with_oauth_consent(self, context, error) -> project_models.Response: + """Generate a response indicating that OAuth consent is required. + + :param context: The agent run context. + :type context: AgentRunContext + :param error: The OAuthConsentRequiredError instance. + :type error: OAuthConsentRequiredError + :return: A Response indicating the need for OAuth consent. + :rtype: project_models.Response + """ + output = [ + project_models.OAuthConsentRequestItemResource( + id=context.id_generator.generate_oauthreq_id(), + consent_link=error.consent_url, + server_label="server_label" + ) + ] + agent_id = context.get_agent_id_object() + conversation = context.get_conversation_object() + response = project_models.Response({ + "object": "response", + "id": context.response_id, + "agent": agent_id, + "conversation": conversation, + "metadata": context.request.get("metadata"), + "created_at": int(time.time()), + "output": output, + }) + return response + + async def respond_with_oauth_consent_astream(self, context, error) -> AsyncGenerator[ResponseStreamEvent, None]: + """Generate a response stream indicating that OAuth consent is required. + + :param context: The agent run context. + :type context: AgentRunContext + :param error: The OAuthConsentRequiredError instance. + :type error: OAuthConsentRequiredError + :return: An async generator yielding ResponseStreamEvent instances. + :rtype: AsyncGenerator[ResponseStreamEvent, None] + """ + sequence_number = 0 + agent_id = context.get_agent_id_object() + conversation = context.get_conversation_object() + + response = project_models.Response({ + "object": "response", + "id": context.response_id, + "agent": agent_id, + "conversation": conversation, + "metadata": context.request.get("metadata"), + "status": "in_progress", + "created_at": int(time.time()), + }) + yield project_models.ResponseCreatedEvent(sequence_number=sequence_number, response=response) + sequence_number += 1 + + response = project_models.Response({ + "object": "response", + "id": context.response_id, + "agent": agent_id, + "conversation": conversation, + "metadata": context.request.get("metadata"), + "status": "in_progress", + "created_at": int(time.time()), + }) + yield project_models.ResponseInProgressEvent(sequence_number=sequence_number, response=response) + + sequence_number += 1 + output_index = 0 + oauth_id = context.id_generator.generate_oauthreq_id() + item = project_models.OAuthConsentRequestItemResource({ + "id": oauth_id, + "type": "oauth_consent_request", + "consent_link": error.consent_url, + "server_label": "server_label", + }) + yield project_models.ResponseOutputItemAddedEvent(sequence_number=sequence_number, + output_index=output_index, item=item) + sequence_number += 1 + yield project_models.ResponseStreamEvent({ + "sequence_number": sequence_number, + "output_index": output_index, + "id": oauth_id, + "type": "response.oauth_consent_requested", + "consent_link": error.consent_url, + "server_label": "server_label", + }) + + sequence_number += 1 + yield project_models.ResponseOutputItemDoneEvent(sequence_number=sequence_number, + output_index=output_index, item=item) + sequence_number += 1 + output = [ + project_models.OAuthConsentRequestItemResource( + id= oauth_id, + consent_link=error.consent_url, + server_label="server_label" + ) + ] + + response = project_models.Response({ + "object": "response", + "id": context.response_id, + "agent": agent_id, + "conversation": conversation, + "metadata": context.request.get("metadata"), + "created_at": int(time.time()), + "status": "completed", + "output": output, + }) + yield project_models.ResponseCompletedEvent(sequence_number=sequence_number, response=response) + async def agent_liveness(self, request) -> Union[Response, dict]: return Response(status_code=200) @@ -335,6 +450,81 @@ def setup_otlp_exporter(self, endpoint, provider): provider.add_span_processor(processor) logger.info(f"Tracing setup with OTLP exporter: {endpoint}") + @staticmethod + def _configure_endpoint() -> tuple[str, Optional[str]]: + """Configure and return the tools endpoint and agent name from environment variables. + + :return: A tuple of (tools_endpoint, agent_name). + :rtype: tuple[str, Optional[str]] + """ + if not FoundryCBAgent._cached_tools_endpoint: + project_endpoint_format: str = "https://{account_name}.services.ai.azure.com/api/projects/{project_name}" + workspace_endpoint = os.getenv(Constants.AZURE_AI_WORKSPACE_ENDPOINT) + tools_endpoint = os.getenv(Constants.AZURE_AI_TOOLS_ENDPOINT) + project_endpoint = os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT) + + if not tools_endpoint: + # project endpoint corrupted could have been an overridden enviornment variable + # try to reconstruct tools endpoint from workspace endpoint + # Robustly reconstruct project_endpoint from workspace_endpoint if needed. + + if workspace_endpoint: + # Expected format: + # "https://.api.azureml.ms/subscriptions//resourceGroups// + # providers/Microsoft.MachineLearningServices/workspaces/@@AML" + from urllib.parse import urlparse + parsed_url = urlparse(workspace_endpoint) + path_parts = [p for p in parsed_url.path.split('/') if p] + # Find the 'workspaces' part and extract account_name@project_name@AML + try: + workspaces_idx = path_parts.index("workspaces") + if workspaces_idx + 1 >= len(path_parts): + raise ValueError( + f"Workspace endpoint path does not contain workspace info " + f"after 'workspaces': {workspace_endpoint}" + ) + workspace_info = path_parts[workspaces_idx + 1] + workspace_parts = workspace_info.split('@') + if len(workspace_parts) < 2: + raise ValueError( + f"Workspace info '{workspace_info}' does not contain both account_name " + f"and project_name separated by '@'." + ) + account_name = workspace_parts[0] + project_name = workspace_parts[1] + # Documented expected format for PROJECT_ENDPOINT_FORMAT: + # "https://.api.azureml.ms/api/projects/{project_name}" + project_endpoint = project_endpoint_format.format( + account_name=account_name, project_name=project_name + ) + except (ValueError, IndexError) as e: + raise ValueError( + f"Failed to reconstruct project endpoint from workspace endpoint " + f"'{workspace_endpoint}': {e}" + ) from e + # should never reach here + logger.info("Reconstructed tools endpoint from project endpoint %s", project_endpoint) + tools_endpoint = project_endpoint + + tools_endpoint = project_endpoint + + if not tools_endpoint: + raise ValueError( + "Project endpoint needed for Azure AI tools endpoint is not found. " + ) + FoundryCBAgent._cached_tools_endpoint = tools_endpoint + + agent_name = os.getenv(Constants.AGENT_NAME) + if agent_name is None: + if os.getenv("CONTAINER_APP_NAME"): + raise ValueError( + "Agent name needed for Azure AI hosted agents is not found. " + ) + agent_name = "$default" + FoundryCBAgent._cached_agent_name = agent_name + + return FoundryCBAgent._cached_tools_endpoint, FoundryCBAgent._cached_agent_name + def get_tool_client( self, tools: Optional[list[ToolDefinition]], user_info: Optional[UserInfo] ) -> AzureAIToolClient: @@ -342,23 +532,14 @@ def get_tool_client( if not self.credentials: raise ValueError("Credentials are required to create Tool Client.") - workspace_endpoint = os.getenv(Constants.AZURE_AI_WORKSPACE_ENDPOINT) - if workspace_endpoint: - agent_name = os.getenv(Constants.AGENT_NAME) - if not agent_name: - raise ValueError("AGENT_NAME environment variable is required when using workspace endpoint.") - return AzureAIToolClient( - endpoint=workspace_endpoint, + tools_endpoint, agent_name = self._configure_endpoint() + + return AzureAIToolClient( + endpoint=tools_endpoint, credential=self.credentials, tools=tools, user=user_info, agent_name=agent_name, - ) - return AzureAIToolClient( - endpoint=os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT), - credential=self.credentials, - tools=tools, - user=user_info, ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py index 48f0d9add17d..5b602a7fc686 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py @@ -17,3 +17,6 @@ def generate_function_output_id(self) -> str: def generate_message_id(self) -> str: return self.generate("msg") + + def generate_oauthreq_id(self) -> str: + return self.generate("oauthreq") diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index c7c4aaaa6369..6b02bba9a0fd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -2,6 +2,10 @@ ## 1.0.0b5 (2025-11-16) +### Feature Added + +- Support Tools Oauth + ### Bugs Fixed - Fixed streaming generation issues. diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index beae4faf6499..65d76c6a5a03 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -11,6 +11,7 @@ from langchain_core.tools import StructuredTool from langgraph.graph.state import CompiledStateGraph +from azure.ai.agentserver.core.client.tools import OAuthConsentRequiredError from azure.ai.agentserver.core.constants import Constants from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.server.base import FoundryCBAgent @@ -31,7 +32,7 @@ class GraphFactory(Protocol): """Protocol for graph factory functions. - + A graph factory is a callable that takes a ToolClient and returns a CompiledStateGraph, either synchronously or asynchronously. """ @@ -93,7 +94,7 @@ def __init__( def graph(self) -> "Optional[CompiledStateGraph]": """ Get the resolved graph. This property provides backward compatibility. - + :return: The resolved CompiledStateGraph if available, None otherwise. :rtype: Optional[CompiledStateGraph] """ @@ -115,17 +116,34 @@ async def agent_run(self, context: AgentRunContext): input_data = self.state_converter.request_to_state(context) logger.debug(f"Converted input data: {input_data}") if not context.stream: - response = await self.agent_run_non_stream(input_data, context, graph) - return response + try: + response = await self.agent_run_non_stream(input_data, context, graph) + return response + finally: + # Close tool_client for non-streaming requests + if tool_client is not None: + try: + await tool_client.close() + logger.debug("Closed tool_client after non-streaming request") + except Exception as e: + logger.warning(f"Error closing tool_client: {e}") + + # For streaming, pass tool_client to be closed after streaming completes return self.agent_run_astream(input_data, context, graph, tool_client) - finally: - # Close tool_client if it was created for this request + except OAuthConsentRequiredError as e: + # Clean up tool_client if OAuth error occurs before streaming starts if tool_client is not None: - try: - await tool_client.close() - logger.debug("Closed tool_client after request processing") - except Exception as e: - logger.warning(f"Error closing tool_client: {e}") + await tool_client.close() + + if not context.stream: + response = await self.respond_with_oauth_consent(context, e) + return response + return self.respond_with_oauth_consent_astream(context, e) + except Exception: + # Clean up tool_client if error occurs before streaming starts + if tool_client is not None: + await tool_client.close() + raise async def _resolve_graph(self, context: AgentRunContext): """Resolve the graph if it's a factory function (for single-use/first-time resolution). @@ -169,7 +187,7 @@ async def _resolve_graph_for_request(self, context: AgentRunContext): Resolve a fresh graph instance for a single request to avoid concurrency issues. Creates a ToolClient and calls the factory function with it. This method returns a new graph instance and the tool_client for cleanup. - + :param context: The context for the agent run. :type context: AgentRunContext :return: A tuple of (compiled graph instance, tool_client wrapper). diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py index 374db1d1d98b..cde9a3756a58 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py @@ -124,7 +124,9 @@ async def tool_func(**kwargs: Any) -> str: :return: The result from the tool invocation as a string. :rtype: str + :raises OAuthConsentRequiredError: If OAuth consent is required for the tool invocation. """ + # Let OAuthConsentRequiredError propagate up to be handled by the agent result = await azure_tool(**kwargs) # Convert result to string for LangChain compatibility if isinstance(result, dict): diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py index f77a0b31b1d5..7daa62d0ec9f 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py @@ -44,13 +44,14 @@ async def quickstart(): "Set it to your Azure AI project endpoint, e.g., " "https://.services.ai.azure.com/api/projects/" ) + tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") # Create Azure credentials credential = DefaultAzureCredential() tool_definitions = [ { "type": "mcp", - "project_connection_id": "" + "project_connection_id": tool_connection_id }, { "type": "code_interpreter", From 9cdf214fba92f636d5c07af1691ae4570354b91c Mon Sep 17 00:00:00 2001 From: junanchen Date: Mon, 17 Nov 2025 00:21:14 -0800 Subject: [PATCH 22/94] Fix function output parse --- ...nt_framework_output_streaming_converter.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 96beb535d3fb..2a1d2fb55366 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -7,7 +7,7 @@ import datetime import json -from typing import AsyncIterable, List, Optional +from typing import Any, AsyncIterable, List, Optional from agent_framework import AgentRunResponseUpdate, BaseContent, FunctionApprovalRequestContent, FunctionResultContent from agent_framework._types import ( @@ -201,7 +201,7 @@ async def convert_contents(self, contents: AsyncIterable[FunctionResultContent]) output = (f"{type(content.exception)}({str(content.exception)})" if content.exception - else json.dumps(content.result)) + else self._to_output(content.result)) item = FunctionToolCallOutputItemResource( id=item_id, @@ -224,6 +224,21 @@ async def convert_contents(self, contents: AsyncIterable[FunctionResultContent]) self._parent.add_completed_output_item(item) # pylint: disable=protected-access + @classmethod + def _to_output(cls, result: Any) -> str: + if isinstance(result, str): + return result + elif isinstance(result, list): + text = [] + for item in result: + if isinstance(item, BaseContent): + text.append(item.to_dict()) + else: + text.append(str(item)) + return json.dumps(text) + else: + return "" + class AgentFrameworkOutputStreamingConverter: """Streaming converter using content-type-specific state handlers.""" From 9b46eb90e8d6d6a7784e7e0a1f0ee162e3faa174 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty Date: Mon, 17 Nov 2025 01:12:56 -0800 Subject: [PATCH 23/94] Refactor ToolClient to handle optional schema properties and required fields --- .../agentserver/agentframework/tool_client.py | 38 +++++++++---------- .../ai/agentserver/langgraph/tool_client.py | 4 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py index 6f410c29d484..8b7142f0862a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py @@ -16,32 +16,32 @@ # pylint: disable=client-accepts-api-version-keyword,missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs class ToolClient: """Client that integrates AzureAIToolClient with Agent Framework. - + This class provides methods to list tools from AzureAIToolClient and invoke them in a format compatible with Agent Framework agents. - + :param tool_client: The AzureAIToolClient instance to use for tool operations. :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient - + .. admonition:: Example: - + .. code-block:: python - + from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient from azure.ai.agentserver.agentframework import ToolClient from azure.identity.aio import DefaultAzureCredential - + async with DefaultAzureCredential() as credential: tool_client = AzureAIToolClient( endpoint="https://", credential=credential ) - + client = ToolClient(tool_client) - + # List tools as Agent Framework tool definitions tools = await client.list_tools() - + # Invoke a tool directly result = await client.invoke_tool( tool_name="my_tool", @@ -53,7 +53,7 @@ class ToolClient: def __init__(self, tool_client: "AzureAIToolClient") -> None: """Initialize the ToolClient. - + :param tool_client: The AzureAIToolClient instance to use for tool operations. :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient """ @@ -62,19 +62,19 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: async def list_tools(self) -> List[AIFunction]: """List all available tools as Agent Framework tool definitions. - + Retrieves tools from AzureAIToolClient and returns them in a format compatible with Agent Framework. - + :return: List of tool definitions. :rtype: List[AIFunction] :raises ~azure.core.exceptions.HttpResponseError: Raised for HTTP communication failures. - + .. admonition:: Example: - + .. code-block:: python - + client = ToolClient(tool_client) tools = await client.list_tools() """ @@ -94,7 +94,7 @@ async def list_tools(self) -> List[AIFunction]: def _convert_to_agent_framework_tool(self, azure_tool: "FoundryTool") -> AIFunction: """Convert an AzureAITool to an Agent Framework AI Function - + :param azure_tool: The AzureAITool to convert. :type azure_tool: ~azure.ai.agentserver.core.client.tools.aio.FoundryTool :return: An AI Function Tool. @@ -104,8 +104,8 @@ def _convert_to_agent_framework_tool(self, azure_tool: "FoundryTool") -> AIFunct input_schema = azure_tool.input_schema or {} # Create a Pydantic model from the input schema - properties = input_schema.get("properties", {}) - required_fields = set(input_schema.get("required", [])) + properties = input_schema.get("properties") or {} + required_fields = set(input_schema.get("required") or []) # Build field definitions for the Pydantic model field_definitions: Dict[str, Any] = {} @@ -146,7 +146,7 @@ async def tool_func(**kwargs: Any) -> Any: def _json_schema_type_to_python(self, json_type: str) -> type: """Convert JSON schema type to Python type. - + :param json_type: The JSON schema type string. :type json_type: str :return: The corresponding Python type. diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py index cde9a3756a58..78baf96bee80 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py @@ -159,8 +159,8 @@ def _create_pydantic_model( :rtype: type[BaseModel] """ # Get properties from schema - properties = schema.get("properties", {}) - required_fields = schema.get("required", []) + properties = schema.get("properties") or {} + required_fields = schema.get("required") or [] # Build field definitions for Pydantic model field_definitions = {} From f50155a8316fabdf12a5092f6757e35f211cb68a Mon Sep 17 00:00:00 2001 From: junanchen Date: Mon, 17 Nov 2025 09:42:19 -0800 Subject: [PATCH 24/94] fix mypy error on AF --- .../models/agent_framework_output_streaming_converter.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 2a1d2fb55366..0b862020c721 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -302,6 +302,8 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async param="", ) continue + if not state: + continue async for content in state.convert_contents(contents): yield content From b0dcb07445ea4264c8409eb398ea29f126009b4c Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 09:59:23 -0800 Subject: [PATCH 25/94] do not index AgentRunContext --- .../doc/azure.ai.agentserver.core.server.common.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst index 26c4aaf4d15a..01e54afab103 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst @@ -24,3 +24,4 @@ azure.ai.agentserver.core.server.common.agent\_run\_context module :inherited-members: :members: :undoc-members: + :no-index: From b72be774556331c04c4f69da2e318324e75ecef7 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty Date: Mon, 17 Nov 2025 10:04:50 -0800 Subject: [PATCH 26/94] Filter tools and update project dependenceis --- .../pyproject.toml | 4 +-- .../agentserver/core/client/tools/_client.py | 16 ++++++++--- .../core/client/tools/aio/_client.py | 27 ++++++++++++++----- .../pyproject.toml | 4 +-- 4 files changed, 36 insertions(+), 15 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 052d36d10c7d..e7422ec02146 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core", + "azure-ai-agentserver-core>=1.0.0b5", "agent-framework-azure-ai==1.0.0b251007", "agent-framework-core==1.0.0b251007", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", @@ -66,4 +66,4 @@ pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false mindependency = false # depends on -core package -whl_no_aio = false \ No newline at end of file +whl_no_aio = false diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py index ea9a8479637f..f28beb498fa8 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py @@ -104,12 +104,20 @@ def list_tools(self) -> List[FoundryTool]: tools: List[FoundryTool] = [] # Fetch MCP tools - mcp_tools = self._mcp_tools.list_tools(existing_names) - tools.extend(mcp_tools) + if ( + self._config.tool_config._named_mcp_tools + and len(self._config.tool_config._named_mcp_tools) > 0 + ): + mcp_tools = self._mcp_tools.list_tools(existing_names) + tools.extend(mcp_tools) # Fetch Tools API tools - tools_api_tools = self._remote_tools.resolve_tools(existing_names) - tools.extend(tools_api_tools) + if ( + self._config.tool_config._remote_tools + and len(self._config.tool_config._remote_tools) > 0 + ): + tools_api_tools = self._remote_tools.resolve_tools(existing_names) + tools.extend(tools_api_tools) for tool in tools: # Capture tool in a closure to avoid shadowing issues diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py index e2d35b1dd919..277be7930df5 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- from typing import Any, List, Mapping, Union, TYPE_CHECKING - +from asyncio import gather from azure.core import AsyncPipelineClient from azure.core.pipeline import policies from azure.core.tracing.decorator_async import distributed_trace_async @@ -107,12 +107,25 @@ async def list_tools(self) -> List[FoundryTool]: tools: List[FoundryTool] = [] - # Fetch MCP tools - mcp_tools = await self._mcp_tools.list_tools(existing_names) - tools.extend(mcp_tools) - # Fetch Tools API tools - tools_api_tools = await self._remote_tools.resolve_tools(existing_names) - tools.extend(tools_api_tools) + # Fetch MCP tools and Tools API tools in parallel + # Build list of coroutines to gather based on configuration + tasks = [] + if ( + self._config.tool_config._named_mcp_tools + and len(self._config.tool_config._named_mcp_tools) > 0 + ): + tasks.append(self._mcp_tools.list_tools(existing_names)) + if ( + self._config.tool_config._remote_tools + and len(self._config.tool_config._remote_tools) > 0 + ): + tasks.append(self._remote_tools.resolve_tools(existing_names)) + + # Execute all tasks in parallel if any exist + if tasks: + results = await gather(*tasks) + for result in results: + tools.extend(result) for tool in tools: # Capture tool in a closure to avoid shadowing issues diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index a5140068e12d..77492d1496dd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -19,7 +19,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core", + "azure-ai-agentserver-core>=1.0.0b5", "langchain>0.3.5", "langchain-openai>0.3.10", "langchain-azure-ai[opentelemetry]>=0.1.4", @@ -67,4 +67,4 @@ pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false mindependency = false # depends on -core package -whl_no_aio = false \ No newline at end of file +whl_no_aio = false From bee67f7c9d281b5eb014f226091e9bb5aa50c4bc Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 10:08:35 -0800 Subject: [PATCH 27/94] fixing pylint --- .../ai/agentserver/agentframework/agent_framework.py | 6 ++++-- .../models/agent_framework_output_streaming_converter.py | 9 +++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 2a7c28f9a3f8..81082ade7d7c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,no-name-in-module +# pylint: disable=logging-fstring-interpolation,no-name-in-module,no-member from __future__ import annotations import os @@ -239,7 +239,9 @@ async def stream_updates(): # logger.debug("Agent streaming iterator finished (StopAsyncIteration)") # break # except asyncio.TimeoutError: - # logger.warning("Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s) + # logger.warning( + # "Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s + # ) # for ev in streaming_converter.completion_events(): # yield ev # return diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 0b862020c721..d09e1f84fc24 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -228,7 +228,7 @@ async def convert_contents(self, contents: AsyncIterable[FunctionResultContent]) def _to_output(cls, result: Any) -> str: if isinstance(result, str): return result - elif isinstance(result, list): + if isinstance(result, list): text = [] for item in result: if isinstance(item, BaseContent): @@ -236,8 +236,7 @@ def _to_output(cls, result: Any) -> str: else: text.append(str(item)) return json.dumps(text) - else: - return "" + return "" class AgentFrameworkOutputStreamingConverter: @@ -281,7 +280,9 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async response=created_response, ) - is_changed = lambda a, b: a is not None and b is not None and a.message_id != b.message_id + is_changed = ( + lambda a, b: a is not None and b is not None and a.message_id != b.message_id + ) async for group in chunk_on_change(updates, is_changed): has_value, first, contents = await peek(self._read_updates(group)) if not has_value: From cba5fdc74de280af08cc0074c79e022ecb8f61de Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 10:41:48 -0800 Subject: [PATCH 28/94] fix build --- ...nt_framework_output_streaming_converter.py | 29 ++++++++++--------- .../agentframework/models/utils/async_iter.py | 13 +++++++-- .../agentserver/core/client/tools/_client.py | 2 +- .../core/client/tools/aio/_client.py | 2 +- .../ai/agentserver/langgraph/langgraph.py | 2 +- 5 files changed, 30 insertions(+), 18 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index d09e1f84fc24..47aafbeb8a49 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -7,7 +7,7 @@ import datetime import json -from typing import Any, AsyncIterable, List, Optional +from typing import Any, AsyncIterable, List from agent_framework import AgentRunResponseUpdate, BaseContent, FunctionApprovalRequestContent, FunctionResultContent from agent_framework._types import ( @@ -48,7 +48,7 @@ class _BaseStreamingState: """Base interface for streaming state handlers.""" - def convert_contents(self, contents: AsyncIterable[BaseContent]) -> AsyncIterable[ResponseStreamEvent]: # pylint: disable=unused-argument + async def convert_contents(self, contents: AsyncIterable[BaseContent]) -> AsyncIterable[ResponseStreamEvent]: # pylint: disable=unused-argument raise NotImplementedError @@ -126,7 +126,9 @@ class _FunctionCallStreamingState(_BaseStreamingState): def __init__(self, parent: AgentFrameworkOutputStreamingConverter): self._parent = parent - async def convert_contents(self, contents: AsyncIterable[FunctionCallContent]) -> AsyncIterable[ResponseStreamEvent]: + async def convert_contents( + self, contents: AsyncIterable[FunctionCallContent] + ) -> AsyncIterable[ResponseStreamEvent]: content_by_call_id = {} ids_by_call_id = {} @@ -149,18 +151,17 @@ async def convert_contents(self, contents: AsyncIterable[FunctionCallContent]) - arguments="", ), ) - continue else: content_by_call_id[content.call_id] = content_by_call_id[content.call_id] + content item_id, output_index = ids_by_call_id[content.call_id] - args_delta = content.arguments if isinstance(content.arguments, str) else "" - yield ResponseFunctionCallArgumentsDeltaEvent( - sequence_number=self._parent.next_sequence(), - item_id=item_id, - output_index=output_index, - delta=args_delta, - ) + args_delta = content.arguments if isinstance(content.arguments, str) else "" + yield ResponseFunctionCallArgumentsDeltaEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + delta=args_delta, + ) for call_id, content in content_by_call_id.items(): item_id, output_index = ids_by_call_id[call_id] @@ -194,7 +195,9 @@ class _FunctionCallOutputStreamingState(_BaseStreamingState): def __init__(self, parent: AgentFrameworkOutputStreamingConverter): self._parent = parent - async def convert_contents(self, contents: AsyncIterable[FunctionResultContent]) -> AsyncIterable[ResponseStreamEvent]: + async def convert_contents( + self, contents: AsyncIterable[FunctionResultContent] + ) -> AsyncIterable[ResponseStreamEvent]: async for content in contents: item_id = self._parent.context.id_generator.generate_function_output_id() output_index = self._parent.next_output_index() @@ -281,7 +284,7 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async ) is_changed = ( - lambda a, b: a is not None and b is not None and a.message_id != b.message_id + lambda a, b: a is not None and b is not None and a.message_id != b.message_id # pylint: disable=unnecessary-lambda-assignment ) async for group in chunk_on_change(updates, is_changed): has_value, first, contents = await peek(self._read_updates(group)) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py index ef8525109554..fdf3b2fbb2a3 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py @@ -4,7 +4,7 @@ from __future__ import annotations from collections.abc import AsyncIterable, AsyncIterator, Callable -from typing import TypeVar, Optional, Tuple, Awaitable +from typing import TypeVar, Optional, Tuple TSource = TypeVar("TSource") TKey = TypeVar("TKey") @@ -19,9 +19,12 @@ async def chunk_on_change( Chunks an async iterable into groups based on when consecutive elements change. :param source: Async iterable of items. + :type source: AsyncIterable[TSource] :param is_changed: Function(prev, current) -> bool indicating if value changed. If None, uses != by default. + :type is_changed: Optional[Callable[[Optional[TSource], Optional[TSource]], bool]] :return: An async iterator of async iterables (chunks). + :rtype: AsyncIterator[AsyncIterable[TSource]] """ if is_changed is None: @@ -46,9 +49,13 @@ async def chunk_by_key( Chunks the async iterable into groups based on a key selector. :param source: Async iterable of items. + :type source: AsyncIterable[TSource] :param key_selector: Function mapping item -> key. + :type key_selector: Callable[[TSource], TKey] :param key_equal: Optional equality function for keys. Defaults to '=='. + :type key_equal: Optional[Callable[[TKey, TKey], bool]] :return: An async iterator of async iterables (chunks). + :rtype: AsyncIterator[AsyncIterable[TSource]] """ if key_equal is None: @@ -104,7 +111,9 @@ async def peek( Peeks at the first element of an async iterable without consuming it. :param source: Async iterable. + :type source: AsyncIterable[T] :return: (has_value, first, full_sequence_including_first) + :rtype: Tuple[bool, Optional[T], AsyncIterable[T]] """ it = source.__aiter__() @@ -131,6 +140,6 @@ async def sequence() -> AsyncIterator[T]: async def _empty_async() -> AsyncIterator[T]: - if False: + if False: # pylint: disable=using-constant-test # This is just to make this an async generator for typing yield None # type: ignore[misc] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py index f28beb498fa8..ee56a4d44a94 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- - +# pylint: disable=protected-access from typing import Any, List, Mapping, Union from azure.core import PipelineClient from azure.core.pipeline import policies diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py index 277be7930df5..986e8756e1b6 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- - +# pylint: disable=protected-access,do-not-import-asyncio from typing import Any, List, Mapping, Union, TYPE_CHECKING from asyncio import gather from azure.core import AsyncPipelineClient diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 65d76c6a5a03..e6bf10d0b5c2 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,broad-exception-caught +# pylint: disable=logging-fstring-interpolation,broad-exception-caught,no-member # mypy: disable-error-code="assignment,arg-type" import os import re From b5b2086d793c77eaf0f44ee135c9b27c2f3c2d90 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 11:18:04 -0800 Subject: [PATCH 29/94] fix mypy --- .../models/agent_framework_output_streaming_converter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 47aafbeb8a49..72aea41419a8 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=attribute-defined-outside-init,protected-access -# mypy: disable-error-code="call-overload,assignment,arg-type" +# mypy: disable-error-code="call-overload,assignment,arg-type,override" from __future__ import annotations import datetime From 7d42f0db0be6d56d3b10502f861ed57b61a4d54f Mon Sep 17 00:00:00 2001 From: junanchen Date: Mon, 17 Nov 2025 13:23:17 -0800 Subject: [PATCH 30/94] remove DONE when getting error --- .../agentframework/agent_framework.py | 26 ------------------- ...nt_framework_output_streaming_converter.py | 17 +++++++----- .../azure/ai/agentserver/core/server/base.py | 1 - 3 files changed, 10 insertions(+), 34 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 81082ade7d7c..07142d888d37 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -225,32 +225,6 @@ async def stream_updates(): update_count += 1 yield event - # timeout_s = self._resolve_stream_timeout(context.request) - # logger.info("Starting streaming with idle-timeout=%.2fs", timeout_s) - # for ev in streaming_converter.initial_events(): - # yield ev - # - # # Iterate with per-update timeout; terminate if idle too long - # aiter = agent.run_stream(message).__aiter__() - # while True: - # try: - # update = await asyncio.wait_for(aiter.__anext__(), timeout=timeout_s) - # except StopAsyncIteration: - # logger.debug("Agent streaming iterator finished (StopAsyncIteration)") - # break - # except asyncio.TimeoutError: - # logger.warning( - # "Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s - # ) - # for ev in streaming_converter.completion_events(): - # yield ev - # return - # update_count += 1 - # transformed = streaming_converter.transform_output_for_streaming(update) - # for event in transformed: - # yield event - # for ev in streaming_converter.completion_events(): - # yield ev logger.info("Streaming completed with %d updates", update_count) finally: # Close tool_client if it was created for this request diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 72aea41419a8..406c8191850a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -299,13 +299,16 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async elif isinstance(first, FunctionResultContent): state = _FunctionCallOutputStreamingState(self) elif isinstance(first, ErrorContent): - yield ResponseErrorEvent( - sequence_number=self.next_sequence(), - code=getattr(first, "error_code", None) or "server_error", - message=getattr(first, "message", None) or "An error occurred", - param="", - ) - continue + code=getattr(first, "error_code", None) or "server_error" + message=getattr(first, "message", None) or "An error occurred" + raise ValueError(f"ErrorContent received: code={code}, message={message}") + # yield ResponseErrorEvent( + # sequence_number=self.next_sequence(), + # code=getattr(first, "error_code", None) or "server_error", + # message=getattr(first, "message", None) or "An error occurred", + # param="", + # ) + # continue if not state: continue diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 1724be6e1f3b..618bb13441e5 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -158,7 +158,6 @@ def gen(): logger.error("Error in non-async generator: %s\n%s", e, traceback.format_exc()) payload = {"error": err_msg} yield f"event: error\ndata: {json.dumps(payload)}\n\n" - yield "data: [DONE]\n\n" error_sent = True finally: logger.info("End of processing CreateResponse request.") From dfd1bd18803a7e5e71597745b087bd425dad5e1b Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 13:59:31 -0800 Subject: [PATCH 31/94] fix pylint --- .../models/agent_framework_output_streaming_converter.py | 1 - 1 file changed, 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 406c8191850a..12cfab983643 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -30,7 +30,6 @@ ResponseContentPartAddedEvent, ResponseContentPartDoneEvent, ResponseCreatedEvent, - ResponseErrorEvent, ResponseFunctionCallArgumentsDeltaEvent, ResponseFunctionCallArgumentsDoneEvent, ResponseInProgressEvent, From c18d494dbdf492c14cc6c2df8d1f53c6a97094af Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 15:34:15 -0800 Subject: [PATCH 32/94] do not add user oid to tracing --- .../azure/ai/agentserver/core/server/base.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 618bb13441e5..cf7d4567f37f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -105,6 +105,8 @@ def set_user_info_to_context_var(self, request) -> Optional[UserInfo]: if user_info: ctx = request_context.get() or {} for key, value in user_info.to_dict().items(): + if key == "objectId": + continue # skip user objectId ctx[f"azure.ai.agentserver.user.{key}"] = str(value) request_context.set(ctx) return user_info From 30b78e9064d21ed83d1dc85c3c05f984d8e87af5 Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Wed, 19 Nov 2025 10:57:33 -0800 Subject: [PATCH 33/94] [AgentServer][Agentframework] update agent framework version (#44102) * upgrade af version * update agent framework azure client * revert change on -core * async init af tracing * fix minor --- .../agentframework/agent_framework.py | 23 +++++++++++++++---- .../pyproject.toml | 4 ++-- .../azure/ai/agentserver/core/server/base.py | 3 ++- .../azure-ai-agentserver-core/pyproject.toml | 2 +- 4 files changed, 24 insertions(+), 8 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 07142d888d37..01929cd00040 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -9,7 +9,7 @@ import inspect from agent_framework import AgentProtocol, AIFunction -from agent_framework.azure import AzureAIAgentClient # pylint: disable=no-name-in-module +from agent_framework.azure import AzureAIClient # pylint: disable=no-name-in-module from opentelemetry import trace from azure.ai.agentserver.core.client.tools import OAuthConsentRequiredError @@ -182,11 +182,26 @@ def init_tracing(self): applicationinsights_connection_string=app_insights_conn_str, ) elif project_endpoint: - project_client = AIProjectClient(endpoint=project_endpoint, credential=DefaultAzureCredential()) - agent_client = AzureAIAgentClient(project_client=project_client) - agent_client.setup_azure_ai_observability() + self.setup_tracing_with_azure_ai_client(project_endpoint) self.tracer = trace.get_tracer(__name__) + def setup_tracing_with_azure_ai_client(self, project_endpoint: str): + async def setup_async(): + async with AzureAIClient( + project_endpoint=project_endpoint, async_credential=self.credentials + ) as agent_client: + await agent_client.setup_azure_ai_observability() + + import asyncio + + loop = asyncio.get_event_loop() + if loop.is_running(): + # If loop is already running, schedule as a task + asyncio.create_task(setup_async()) + else: + # Run in new event loop + loop.run_until_complete(setup_async()) + async def agent_run( # pylint: disable=too-many-statements self, context: AgentRunContext ) -> Union[ diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index e7422ec02146..19840e57fadb 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -21,8 +21,8 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-ai-agentserver-core>=1.0.0b5", - "agent-framework-azure-ai==1.0.0b251007", - "agent-framework-core==1.0.0b251007", + "agent-framework-azure-ai>=1.0.0b251112", + "agent-framework-core>=1.0.0b251112", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", ] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index cf7d4567f37f..2603fd997b0e 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -12,6 +12,7 @@ from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn +from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette @@ -117,7 +118,7 @@ class FoundryCBAgent: _cached_agent_name: Optional[str] = None def __init__(self, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any) -> None: - self.credentials = credentials + self.credentials = credentials or AsyncDefaultTokenCredential() self.tools = kwargs.get("tools", []) async def runs_endpoint(request): diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index ad882b2ab596..76683e032cd0 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -20,7 +20,7 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-monitor-opentelemetry>=1.5.0", - "azure-ai-projects==1.1.0b4", + "azure-ai-projects>=1.1.0b4", "azure-ai-agents==1.2.0b5", "azure-core>=1.35.0", "azure-identity", From 1137076289bbd10bed7e9ec4cf450a8a1fe30fa7 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 19 Nov 2025 16:44:33 -0800 Subject: [PATCH 34/94] fix dependency --- sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 76683e032cd0..4b2f295cec80 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -29,6 +29,7 @@ dependencies = [ "opentelemetry-exporter-otlp-proto-http", "starlette>=0.45.0", "uvicorn>=0.31.0", + "aiohttp", # used by azure-identity aio ] [build-system] From b7a2280fc87d2418e37d09602e529d0e14737b3d Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Fri, 21 Nov 2025 16:29:28 -0800 Subject: [PATCH 35/94] [AgentServer] fix build pipelines (#44145) * fix mindependency for -core * fix langgraph min dependency * try test with mindependency * disable mindependency for af because of azure-ai-projects version * fix analyze build * upgrade version * fix pylint --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 11 ++++++++++- .../azure/ai/agentserver/agentframework/_version.py | 2 +- .../ai/agentserver/agentframework/agent_framework.py | 4 +--- .../azure-ai-agentserver-agentframework/cspell.json | 3 ++- .../pyproject.toml | 2 +- .../azure-ai-agentserver-core/CHANGELOG.md | 9 +++++++++ .../azure/ai/agentserver/core/_version.py | 2 +- .../azure/ai/agentserver/core/server/base.py | 6 ++++-- sdk/agentserver/azure-ai-agentserver-core/cspell.json | 3 ++- .../azure-ai-agentserver-core/pyproject.toml | 4 ++-- .../azure-ai-agentserver-langgraph/CHANGELOG.md | 8 ++++++++ .../azure/ai/agentserver/langgraph/_version.py | 2 +- .../azure-ai-agentserver-langgraph/pyproject.toml | 8 +++----- 13 files changed, 45 insertions(+), 19 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index a01bc1990909..2393a16515f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,5 +1,13 @@ # Release History + +## 1.0.0b6 (2025-11-26) + +### Feature Added + +- Support Agent-framework greater than 251112 + + ## 1.0.0b5 (2025-11-16) ### Feature Added @@ -10,6 +18,7 @@ - Fixed streaming generation issues. + ## 1.0.0b4 (2025-11-13) ### Feature Added @@ -36,7 +45,7 @@ - Fixed Id generator format. -- Improved stream mode error message. +- Improved stream mode error messsage. - Updated application insights related configuration environment variables. diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index c7d155d924dd..d17ec8abfb6f 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b5" +VERSION = "1.0.0b6" diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 01929cd00040..4a0a074bd635 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,no-name-in-module,no-member +# pylint: disable=logging-fstring-interpolation,no-name-in-module,no-member,do-not-import-asyncio from __future__ import annotations import os @@ -21,8 +21,6 @@ Response as OpenAIResponse, ResponseStreamEvent, ) -from azure.ai.projects import AIProjectClient -from azure.identity import DefaultAzureCredential from .models.agent_framework_input_converters import AgentFrameworkInputConverter from .models.agent_framework_output_non_streaming_converter import ( diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json b/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json index 48c11927e406..951bfab2c88a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json @@ -6,7 +6,8 @@ "envtemplate", "pysort", "redef", - "aifunction" + "aifunction", + "ainvoke" ], "ignorePaths": [ "*.csv", diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 19840e57fadb..2b5ddf89a8de 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b5", + "azure-ai-agentserver-core>=1.0.0b6", "agent-framework-azure-ai>=1.0.0b251112", "agent-framework-core>=1.0.0b251112", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index 55a56fed54ca..2393a16515f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,5 +1,13 @@ # Release History + +## 1.0.0b6 (2025-11-26) + +### Feature Added + +- Support Agent-framework greater than 251112 + + ## 1.0.0b5 (2025-11-16) ### Feature Added @@ -10,6 +18,7 @@ - Fixed streaming generation issues. + ## 1.0.0b4 (2025-11-13) ### Feature Added diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index c7d155d924dd..d17ec8abfb6f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b5" +VERSION = "1.0.0b6" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 2603fd997b0e..eeb4b85cdc34 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -12,7 +12,6 @@ from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn -from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette @@ -22,6 +21,9 @@ from starlette.responses import JSONResponse, Response, StreamingResponse from starlette.routing import Route from starlette.types import ASGIApp + +from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential + from ..models import projects as project_models from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, request_context @@ -466,7 +468,7 @@ def _configure_endpoint() -> tuple[str, Optional[str]]: project_endpoint = os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT) if not tools_endpoint: - # project endpoint corrupted could have been an overridden enviornment variable + # project endpoint corrupted could have been an overridden environment variable # try to reconstruct tools endpoint from workspace endpoint # Robustly reconstruct project_endpoint from workspace_endpoint if needed. diff --git a/sdk/agentserver/azure-ai-agentserver-core/cspell.json b/sdk/agentserver/azure-ai-agentserver-core/cspell.json index 17fb91b1e58f..55131ced0609 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-core/cspell.json @@ -18,7 +18,8 @@ "SETFL", "Planifica", "mcptools", - "ainvoke" + "ainvoke", + "oauthreq" ], "ignorePaths": [ "*.csv", diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 4b2f295cec80..9f3d01c09c88 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -23,13 +23,13 @@ dependencies = [ "azure-ai-projects>=1.1.0b4", "azure-ai-agents==1.2.0b5", "azure-core>=1.35.0", - "azure-identity", + "azure-identity>=1.25.1", "openai>=1.80.0", "opentelemetry-api>=1.35", "opentelemetry-exporter-otlp-proto-http", "starlette>=0.45.0", "uvicorn>=0.31.0", - "aiohttp", # used by azure-identity aio + "aiohttp>=3.13.0", # used by azure-identity aio ] [build-system] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index 6b02bba9a0fd..2393a16515f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,5 +1,13 @@ # Release History + +## 1.0.0b6 (2025-11-26) + +### Feature Added + +- Support Agent-framework greater than 251112 + + ## 1.0.0b5 (2025-11-16) ### Feature Added diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index c7d155d924dd..d17ec8abfb6f 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b5" +VERSION = "1.0.0b6" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index 77492d1496dd..9abeff0d58d6 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -20,11 +20,9 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-ai-agentserver-core>=1.0.0b5", - "langchain>0.3.5", + "langchain>0.3.20", "langchain-openai>0.3.10", - "langchain-azure-ai[opentelemetry]>=0.1.4", - "langgraph>0.5.0", - "opentelemetry-exporter-otlp-proto-http", + "langchain-azure-ai[opentelemetry]>=0.1.8", ] [build-system] @@ -66,5 +64,5 @@ breaking = false # incompatible python version pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false -mindependency = false # depends on -core package +# mindependency = false # depends on -core package whl_no_aio = false From c7825d228f48823169da453d270a507e66b510ff Mon Sep 17 00:00:00 2001 From: Min Shi <39176492+Jasmin3q@users.noreply.github.com> Date: Wed, 3 Dec 2025 18:30:05 -0800 Subject: [PATCH 36/94] feat: Add created_by to responses (#44229) * add created_by * extract author name from message when non-streaming * add author_name to workflow agent test * extract author_name for every message --- ...ramework_output_non_streaming_converter.py | 43 ++++++++++----- ...nt_framework_output_streaming_converter.py | 52 +++++++++++++++---- .../workflow_agent_simple.py | 2 +- 3 files changed, 73 insertions(+), 24 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index 823846f3ca7e..6e1fcdd4aba2 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -14,6 +14,8 @@ from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import Response as OpenAIResponse from azure.ai.agentserver.core.models.projects import ( + AgentId, + CreatedBy, ItemContentOutputText, ResponsesAssistantMessageItemResource, ) @@ -41,11 +43,19 @@ def _ensure_response_started(self) -> None: def _build_item_content_output_text(self, text: str) -> ItemContentOutputText: return ItemContentOutputText(text=text, annotations=[]) - def _new_assistant_message_item(self, message_text: str) -> ResponsesAssistantMessageItemResource: - item_content = self._build_item_content_output_text(message_text) - return ResponsesAssistantMessageItemResource( - id=self._context.id_generator.generate_message_id(), status="completed", content=[item_content] - ) + def _build_created_by(self, author_name: str) -> dict: + self._ensure_response_started() + + agent_dict = { + "type": "agent_id", + "name": author_name or "", + "version": "", # Default to empty string + } + + return { + "agent": agent_dict, + "response_id": self._response_id, + } def transform_output_for_response(self, response: AgentRunResponse) -> OpenAIResponse: """Build an OpenAIResponse capturing all supported content types. @@ -72,9 +82,11 @@ def transform_output_for_response(self, response: AgentRunResponse) -> OpenAIRes contents = getattr(message, "contents", None) if not contents: continue + # Extract author_name from this message + msg_author_name = getattr(message, "author_name", None) or "" for j, content in enumerate(contents): logger.debug(" content index=%d in message=%d type=%s", j, i, type(content).__name__) - self._append_content_item(content, completed_items) + self._append_content_item(content, completed_items, msg_author_name) response_data = self._construct_response_data(completed_items) openai_response = OpenAIResponse(response_data) @@ -87,7 +99,7 @@ def transform_output_for_response(self, response: AgentRunResponse) -> OpenAIRes # ------------------------- helper append methods ------------------------- - def _append_content_item(self, content: Any, sink: List[dict]) -> None: + def _append_content_item(self, content: Any, sink: List[dict], author_name: str) -> None: """Dispatch a content object to the appropriate append helper. Adding this indirection keeps the main transform method compact and makes it @@ -97,20 +109,22 @@ def _append_content_item(self, content: Any, sink: List[dict]) -> None: :type content: Any :param sink: The list to append the converted content dict to. :type sink: List[dict] + :param author_name: The author name for the created_by field. + :type author_name: str :return: None :rtype: None """ if isinstance(content, TextContent): - self._append_text_content(content, sink) + self._append_text_content(content, sink, author_name) elif isinstance(content, FunctionCallContent): - self._append_function_call_content(content, sink) + self._append_function_call_content(content, sink, author_name) elif isinstance(content, FunctionResultContent): - self._append_function_result_content(content, sink) + self._append_function_result_content(content, sink, author_name) else: logger.debug("unsupported content type skipped: %s", type(content).__name__) - def _append_text_content(self, content: TextContent, sink: List[dict]) -> None: + def _append_text_content(self, content: TextContent, sink: List[dict], author_name: str) -> None: text_value = getattr(content, "text", None) if not text_value: return @@ -129,11 +143,12 @@ def _append_text_content(self, content: TextContent, sink: List[dict]) -> None: "logprobs": [], } ], + "created_by": self._build_created_by(author_name), } ) logger.debug(" added message item id=%s text_len=%d", item_id, len(text_value)) - def _append_function_call_content(self, content: FunctionCallContent, sink: List[dict]) -> None: + def _append_function_call_content(self, content: FunctionCallContent, sink: List[dict], author_name: str) -> None: name = getattr(content, "name", "") or "" arguments = getattr(content, "arguments", "") if not isinstance(arguments, str): @@ -151,6 +166,7 @@ def _append_function_call_content(self, content: FunctionCallContent, sink: List "call_id": call_id, "name": name, "arguments": arguments or "", + "created_by": self._build_created_by(author_name), } ) logger.debug( @@ -161,7 +177,7 @@ def _append_function_call_content(self, content: FunctionCallContent, sink: List len(arguments or ""), ) - def _append_function_result_content(self, content: FunctionResultContent, sink: List[dict]) -> None: + def _append_function_result_content(self, content: FunctionResultContent, sink: List[dict], author_name: str) -> None: # Coerce the function result into a simple display string. result = [] raw = getattr(content, "result", None) @@ -179,6 +195,7 @@ def _append_function_result_content(self, content: FunctionResultContent, sink: "status": "completed", "call_id": call_id, "output": json.dumps(result) if len(result) > 0 else "", + "created_by": self._build_created_by(author_name), } ) logger.debug( diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 12cfab983643..769536dce9bc 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -22,6 +22,8 @@ ResponseStreamEvent, ) from azure.ai.agentserver.core.models.projects import ( + AgentId, + CreatedBy, FunctionToolCallItemResource, FunctionToolCallOutputItemResource, ItemContentOutputText, @@ -47,7 +49,7 @@ class _BaseStreamingState: """Base interface for streaming state handlers.""" - async def convert_contents(self, contents: AsyncIterable[BaseContent]) -> AsyncIterable[ResponseStreamEvent]: # pylint: disable=unused-argument + async def convert_contents(self, contents: AsyncIterable[BaseContent], author_name: str) -> AsyncIterable[ResponseStreamEvent]: # pylint: disable=unused-argument raise NotImplementedError @@ -57,7 +59,7 @@ class _TextContentStreamingState(_BaseStreamingState): def __init__(self, parent: AgentFrameworkOutputStreamingConverter): self._parent = parent - async def convert_contents(self, contents: AsyncIterable[TextContent]) -> AsyncIterable[ResponseStreamEvent]: + async def convert_contents(self, contents: AsyncIterable[TextContent], author_name: str) -> AsyncIterable[ResponseStreamEvent]: item_id = self._parent.context.id_generator.generate_message_id() output_index = self._parent.next_output_index() @@ -68,6 +70,7 @@ async def convert_contents(self, contents: AsyncIterable[TextContent]) -> AsyncI id=item_id, status="in_progress", content=[], + created_by=self._parent._build_created_by(author_name), ), ) @@ -109,7 +112,12 @@ async def convert_contents(self, contents: AsyncIterable[TextContent]) -> AsyncI part=content_part, ) - item = ResponsesAssistantMessageItemResource(id=item_id, status="completed", content=[content_part]) + item = ResponsesAssistantMessageItemResource( + id=item_id, + status="completed", + content=[content_part], + created_by=self._parent._build_created_by(author_name), + ) yield ResponseOutputItemDoneEvent( sequence_number=self._parent.next_sequence(), output_index=output_index, @@ -126,7 +134,7 @@ def __init__(self, parent: AgentFrameworkOutputStreamingConverter): self._parent = parent async def convert_contents( - self, contents: AsyncIterable[FunctionCallContent] + self, contents: AsyncIterable[FunctionCallContent], author_name: str ) -> AsyncIterable[ResponseStreamEvent]: content_by_call_id = {} ids_by_call_id = {} @@ -148,6 +156,7 @@ async def convert_contents( call_id=content.call_id, name=content.name, arguments="", + created_by=self._parent._build_created_by(author_name), ), ) else: @@ -178,6 +187,7 @@ async def convert_contents( call_id=call_id, name=content.name, arguments=args, + created_by=self._parent._build_created_by(author_name), ) yield ResponseOutputItemDoneEvent( sequence_number=self._parent.next_sequence(), @@ -195,7 +205,7 @@ def __init__(self, parent: AgentFrameworkOutputStreamingConverter): self._parent = parent async def convert_contents( - self, contents: AsyncIterable[FunctionResultContent] + self, contents: AsyncIterable[FunctionResultContent], author_name: str ) -> AsyncIterable[ResponseStreamEvent]: async for content in contents: item_id = self._parent.context.id_generator.generate_function_output_id() @@ -210,6 +220,7 @@ async def convert_contents( status="completed", call_id=content.call_id, output=output, + created_by=self._parent._build_created_by(author_name), ) yield ResponseOutputItemAddedEvent( @@ -286,10 +297,12 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async lambda a, b: a is not None and b is not None and a.message_id != b.message_id # pylint: disable=unnecessary-lambda-assignment ) async for group in chunk_on_change(updates, is_changed): - has_value, first, contents = await peek(self._read_updates(group)) + has_value, first_tuple, contents_with_author = await peek(self._read_updates(group)) if not has_value: continue + first, author_name = first_tuple # Extract content and author_name from tuple + state = None if isinstance(first, TextContent): state = _TextContentStreamingState(self) @@ -311,7 +324,12 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async if not state: continue - async for content in state.convert_contents(contents): + # Extract just the content from (content, author_name) tuples using async generator + async def extract_contents(): + async for content, _ in contents_with_author: + yield content + + async for content in state.convert_contents(extract_contents(), author_name): yield content yield ResponseCompletedEvent( @@ -319,12 +337,26 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async response=self._build_response(status="completed"), ) - @staticmethod - async def _read_updates(updates: AsyncIterable[AgentRunResponseUpdate]) -> AsyncIterable[BaseContent]: + def _build_created_by(self, author_name: str) -> dict: + agent_dict = { + "type": "agent_id", + "name": author_name or "", + "version": "", + } + + return { + "agent": agent_dict, + "response_id": self._response_id, + } + + async def _read_updates(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> AsyncIterable[tuple[BaseContent, str]]: async for update in updates: if not update.contents: continue + # Extract author_name from each update + author_name = getattr(update, "author_name", "") or "" + accepted_types = (TextContent, FunctionCallContent, FunctionApprovalRequestContent, @@ -332,7 +364,7 @@ async def _read_updates(updates: AsyncIterable[AgentRunResponseUpdate]) -> Async ErrorContent) for content in update.contents: if isinstance(content, accepted_types): - yield content + yield (content, author_name) def _ensure_response_started(self) -> None: if not self._response_created_at: diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py index ce3cca956273..4d2569c38932 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py @@ -219,7 +219,7 @@ async def handle_review_response( AgentRunUpdateEvent( self.id, data=AgentRunResponseUpdate( - contents=contents, role=ChatRole.ASSISTANT + contents=contents, role=ChatRole.ASSISTANT, author_name=self.id ), ) ) From 01fbe117738b1160d0fe9dc400950c7a1d6085f9 Mon Sep 17 00:00:00 2001 From: Min Shi <39176492+Jasmin3q@users.noreply.github.com> Date: Fri, 5 Dec 2025 09:52:47 -0800 Subject: [PATCH 37/94] update version (#44285) --- .../azure/ai/agentserver/agentframework/_version.py | 2 +- .../azure-ai-agentserver-agentframework/pyproject.toml | 2 +- .../azure/ai/agentserver/core/_version.py | 2 +- .../azure/ai/agentserver/langgraph/_version.py | 2 +- sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index d17ec8abfb6f..84058978c521 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b6" +VERSION = "1.0.0b7" diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 2b5ddf89a8de..67a15a4842f8 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b6", + "azure-ai-agentserver-core>=1.0.0b7", "agent-framework-azure-ai>=1.0.0b251112", "agent-framework-core>=1.0.0b251112", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index d17ec8abfb6f..84058978c521 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b6" +VERSION = "1.0.0b7" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index d17ec8abfb6f..84058978c521 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b6" +VERSION = "1.0.0b7" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index 9abeff0d58d6..a6fc7b3227db 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -19,7 +19,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b5", + "azure-ai-agentserver-core>=1.0.0b7", "langchain>0.3.20", "langchain-openai>0.3.10", "langchain-azure-ai[opentelemetry]>=0.1.8", From f9b2cc0d0804ba0b7ea91361bc7d391118e056b1 Mon Sep 17 00:00:00 2001 From: Min Shi <39176492+Jasmin3q@users.noreply.github.com> Date: Fri, 5 Dec 2025 11:19:40 -0800 Subject: [PATCH 38/94] Update changelog for agentserver 1.0.0b7 (#44286) * Update changelog --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 7 +++++++ sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md | 7 +++++++ .../azure-ai-agentserver-langgraph/CHANGELOG.md | 7 +++++++ 3 files changed, 21 insertions(+) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index 2393a16515f1..e4067f818fa7 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,6 +1,13 @@ # Release History +## 1.0.0b7 (2025-12-05) + +### Feature Added + +- Update response with created_by + + ## 1.0.0b6 (2025-11-26) ### Feature Added diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index 2393a16515f1..e4067f818fa7 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,6 +1,13 @@ # Release History +## 1.0.0b7 (2025-12-05) + +### Feature Added + +- Update response with created_by + + ## 1.0.0b6 (2025-11-26) ### Feature Added diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index 2393a16515f1..e4067f818fa7 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,6 +1,13 @@ # Release History +## 1.0.0b7 (2025-12-05) + +### Feature Added + +- Update response with created_by + + ## 1.0.0b6 (2025-11-26) ### Feature Added From 5d8905e513ac0e2cc5f6715485ffb8f648053457 Mon Sep 17 00:00:00 2001 From: Min Shi <39176492+Jasmin3q@users.noreply.github.com> Date: Fri, 5 Dec 2025 14:40:12 -0800 Subject: [PATCH 39/94] [Agent Server] Fix deploy issue about changelog, mypy and version (#44289) * fix mypy issue * update changelog * recover pyproject version to 1.0.0b5 --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 8 ++++---- .../models/agent_framework_output_streaming_converter.py | 2 +- .../azure-ai-agentserver-agentframework/pyproject.toml | 2 +- sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md | 8 ++++---- .../azure-ai-agentserver-langgraph/CHANGELOG.md | 2 +- .../azure-ai-agentserver-langgraph/pyproject.toml | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index e4067f818fa7..9f55aa85b60e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -3,21 +3,21 @@ ## 1.0.0b7 (2025-12-05) -### Feature Added +### Features Added - Update response with created_by ## 1.0.0b6 (2025-11-26) -### Feature Added +### Features Added - Support Agent-framework greater than 251112 ## 1.0.0b5 (2025-11-16) -### Feature Added +### Features Added - Support Tools Oauth @@ -28,7 +28,7 @@ ## 1.0.0b4 (2025-11-13) -### Feature Added +### Features Added - Adapters support tools diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 769536dce9bc..5c2b0ae552cb 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -298,7 +298,7 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async ) async for group in chunk_on_change(updates, is_changed): has_value, first_tuple, contents_with_author = await peek(self._read_updates(group)) - if not has_value: + if not has_value or first_tuple is None: continue first, author_name = first_tuple # Extract content and author_name from tuple diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 67a15a4842f8..19840e57fadb 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b7", + "azure-ai-agentserver-core>=1.0.0b5", "agent-framework-azure-ai>=1.0.0b251112", "agent-framework-core>=1.0.0b251112", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index e4067f818fa7..9f55aa85b60e 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -3,21 +3,21 @@ ## 1.0.0b7 (2025-12-05) -### Feature Added +### Features Added - Update response with created_by ## 1.0.0b6 (2025-11-26) -### Feature Added +### Features Added - Support Agent-framework greater than 251112 ## 1.0.0b5 (2025-11-16) -### Feature Added +### Features Added - Support Tools Oauth @@ -28,7 +28,7 @@ ## 1.0.0b4 (2025-11-13) -### Feature Added +### Features Added - Adapters support tools diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index e4067f818fa7..da32978eb374 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -3,7 +3,7 @@ ## 1.0.0b7 (2025-12-05) -### Feature Added +### Features Added - Update response with created_by diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index a6fc7b3227db..9abeff0d58d6 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -19,7 +19,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b7", + "azure-ai-agentserver-core>=1.0.0b5", "langchain>0.3.20", "langchain-openai>0.3.10", "langchain-azure-ai[opentelemetry]>=0.1.8", From a365fe67db77015c24fc28ce759f9acc97f14347 Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Fri, 5 Dec 2025 22:51:01 -0800 Subject: [PATCH 40/94] [AgentServer] Fix error response in streaming & non-streaming (#44299) --- ...ramework_output_non_streaming_converter.py | 5 +- ...nt_framework_output_streaming_converter.py | 11 +-- .../azure/ai/agentserver/core/server/base.py | 69 +++++++------------ 3 files changed, 27 insertions(+), 58 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index 6e1fcdd4aba2..fbece993305a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -7,8 +7,7 @@ import json from typing import Any, List -from agent_framework import AgentRunResponse, FunctionResultContent -from agent_framework._types import FunctionCallContent, TextContent +from agent_framework import AgentRunResponse, FunctionCallContent, FunctionResultContent, ErrorContent, TextContent from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.logger import get_logger @@ -121,6 +120,8 @@ def _append_content_item(self, content: Any, sink: List[dict], author_name: str) self._append_function_call_content(content, sink, author_name) elif isinstance(content, FunctionResultContent): self._append_function_result_content(content, sink, author_name) + elif isinstance(content, ErrorContent): + raise ValueError(f"ErrorContent received: code={content.error_code}, message={content.message}") else: logger.debug("unsupported content type skipped: %s", type(content).__name__) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 5c2b0ae552cb..92f1cb983e08 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -311,16 +311,7 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async elif isinstance(first, FunctionResultContent): state = _FunctionCallOutputStreamingState(self) elif isinstance(first, ErrorContent): - code=getattr(first, "error_code", None) or "server_error" - message=getattr(first, "message", None) or "An error occurred" - raise ValueError(f"ErrorContent received: code={code}, message={message}") - # yield ResponseErrorEvent( - # sequence_number=self.next_sequence(), - # code=getattr(first, "error_code", None) or "server_error", - # message=getattr(first, "message", None) or "An error occurred", - # param="", - # ) - # continue + raise ValueError(f"ErrorContent received: code={first.error_code}, message={first.message}") if not state: continue diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index eeb4b85cdc34..5d25dea61be6 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -7,7 +7,6 @@ import json import os import time -import traceback from abc import abstractmethod from typing import Any, AsyncGenerator, Generator, Optional, Union @@ -141,80 +140,58 @@ async def runs_endpoint(request): resp = await self.agent_run(context) if inspect.isgenerator(resp): - # Prefetch first event to allow 500 status if generation fails immediately - try: - first_event = next(resp) - except Exception as e: # noqa: BLE001 - err_msg = _format_error(e) - logger.error("Generator initialization failed: %s\n%s", e, traceback.format_exc()) - return JSONResponse({"error": err_msg}, status_code=500) - def gen(): ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier) token = otel_context.attach(ctx) - error_sent = False + seq = 0 try: - # yield prefetched first event - yield _event_to_sse_chunk(first_event) for event in resp: + seq += 1 yield _event_to_sse_chunk(event) except Exception as e: # noqa: BLE001 - err_msg = _format_error(e) - logger.error("Error in non-async generator: %s\n%s", e, traceback.format_exc()) - payload = {"error": err_msg} - yield f"event: error\ndata: {json.dumps(payload)}\n\n" - error_sent = True + logger.error("Error in non-async generator: %s", e, exc_info=True) + err = project_models.ResponseErrorEvent( + sequence_number=seq + 1, + code=project_models.ResponseErrorCode.SERVER_ERROR, + message=_format_error(e), + param="") + yield _event_to_sse_chunk(err) finally: logger.info("End of processing CreateResponse request.") otel_context.detach(token) - if not error_sent: - yield "data: [DONE]\n\n" return StreamingResponse(gen(), media_type="text/event-stream") if inspect.isasyncgen(resp): - # Prefetch first async event to allow early 500 - try: - first_event = await resp.__anext__() - except StopAsyncIteration: - # No items produced; treat as empty successful stream - def empty_gen(): - yield "data: [DONE]\n\n" - - return StreamingResponse(empty_gen(), media_type="text/event-stream") - except Exception as e: # noqa: BLE001 - err_msg = _format_error(e) - logger.error("Async generator initialization failed: %s\n%s", e, traceback.format_exc()) - return JSONResponse({"error": err_msg}, status_code=500) - async def gen_async(): ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier) token = otel_context.attach(ctx) - error_sent = False + seq = 0 try: - # yield prefetched first event - yield _event_to_sse_chunk(first_event) async for event in resp: + seq += 1 yield _event_to_sse_chunk(event) except Exception as e: # noqa: BLE001 - err_msg = _format_error(e) - logger.error("Error in async generator: %s\n%s", e, traceback.format_exc()) - payload = {"error": err_msg} - yield f"event: error\ndata: {json.dumps(payload)}\n\n" - yield "data: [DONE]\n\n" - error_sent = True + logger.error("Error in async generator: %s", e, exc_info=True) + err = project_models.ResponseErrorEvent( + sequence_number=seq + 1, + code=project_models.ResponseErrorCode.SERVER_ERROR, + message=_format_error(e), + param="") + yield _event_to_sse_chunk(err) finally: logger.info("End of processing CreateResponse request.") otel_context.detach(token) - if not error_sent: - yield "data: [DONE]\n\n" return StreamingResponse(gen_async(), media_type="text/event-stream") logger.info("End of processing CreateResponse request.") return JSONResponse(resp.as_dict()) except Exception as e: # TODO: extract status code from exception - logger.error(f"Error processing CreateResponse request: {traceback.format_exc()}") - return JSONResponse({"error": str(e)}, status_code=500) + logger.error(f"Error processing CreateResponse request: {e}", exc_info=True) + err = project_models.ResponseError( + code=project_models.ResponseErrorCode.SERVER_ERROR, + message=_format_error(e)) + return JSONResponse(err.as_dict()) async def liveness_endpoint(request): result = await self.agent_liveness(request) From 738f1a32f01260450a013fe2a40b9df94d53cbd6 Mon Sep 17 00:00:00 2001 From: junanchen Date: Fri, 5 Dec 2025 22:59:25 -0800 Subject: [PATCH 41/94] add bugfix log in 1.0.0b7 --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 4 ++++ sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md | 4 ++++ sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index 9f55aa85b60e..ec53a2a60c03 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -7,6 +7,10 @@ - Update response with created_by +### Bugs Fixed + +- Fixed error response handling in stream and non-stream modes + ## 1.0.0b6 (2025-11-26) diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index 9f55aa85b60e..ec53a2a60c03 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -7,6 +7,10 @@ - Update response with created_by +### Bugs Fixed + +- Fixed error response handling in stream and non-stream modes + ## 1.0.0b6 (2025-11-26) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index da32978eb374..9c1e35949882 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -7,6 +7,10 @@ - Update response with created_by +### Bugs Fixed + +- Fixed error response handling in stream and non-stream modes + ## 1.0.0b6 (2025-11-26) From a2fed8dd550dad7aa6e52fd821700fd76e016977 Mon Sep 17 00:00:00 2001 From: junanchen Date: Fri, 5 Dec 2025 23:01:54 -0800 Subject: [PATCH 42/94] disable mindependency --- sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index 9abeff0d58d6..90edfbceb523 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -64,5 +64,5 @@ breaking = false # incompatible python version pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false -# mindependency = false # depends on -core package +mindependency = false # depends on -core package whl_no_aio = false From d7a1a9316d223c0f92c8cc43264fec37905b3fa1 Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Tue, 9 Dec 2025 10:50:37 -0800 Subject: [PATCH 43/94] [AgentServer] Refine error handling (#44302) * [AgentServer] Fix error response in streaming & non-streaming * optimize error handling * cache error in tracing init * release -core 1.0.0b7 first * fix response iterator * fix tests * suppress error while testing * upgrade -core dep to 1.0.0b7 for -af & -lg * removed unused import & disable pyling on af * bypass checks for lg * add bugfix in changelog * enable tox checks --- .../CHANGELOG.md | 1 - .../agentframework/agent_framework.py | 31 ++++--- .../pyproject.toml | 8 +- .../azure-ai-agentserver-core/CHANGELOG.md | 1 - .../azure/ai/agentserver/core/server/base.py | 86 ++++++++----------- .../CHANGELOG.md | 1 - .../ai/agentserver/langgraph/langgraph.py | 4 +- .../pyproject.toml | 8 +- 8 files changed, 67 insertions(+), 73 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index ec53a2a60c03..84c4a76a27e5 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -11,7 +11,6 @@ - Fixed error response handling in stream and non-stream modes - ## 1.0.0b6 (2025-11-26) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 4a0a074bd635..233436ac84ea 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -167,20 +167,23 @@ async def _resolve_agent_for_request(self, context: AgentRunContext): return agent, tool_client_wrapper def init_tracing(self): - exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) - app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) - project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT) - - if exporter or app_insights_conn_str: - from agent_framework.observability import setup_observability - - setup_observability( - enable_sensitive_data=True, - otlp_endpoint=exporter, - applicationinsights_connection_string=app_insights_conn_str, - ) - elif project_endpoint: - self.setup_tracing_with_azure_ai_client(project_endpoint) + try: + exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) + app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) + project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT) + + if exporter or app_insights_conn_str: + from agent_framework.observability import setup_observability + + setup_observability( + enable_sensitive_data=True, + otlp_endpoint=exporter, + applicationinsights_connection_string=app_insights_conn_str, + ) + elif project_endpoint: + self.setup_tracing_with_azure_ai_client(project_endpoint) + except Exception as e: + logger.warning(f"Failed to initialize tracing: {e}", exc_info=True) self.tracer = trace.get_tracer(__name__) def setup_tracing_with_azure_ai_client(self, project_endpoint: str): diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 19840e57fadb..a86c9eef2648 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b5", + "azure-ai-agentserver-core>=1.0.0b7", "agent-framework-azure-ai>=1.0.0b251112", "agent-framework-core>=1.0.0b251112", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", @@ -65,5 +65,9 @@ breaking = false # incompatible python version pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false -mindependency = false # depends on -core package +#mindependency = false # depends on -core package +#latestdependency = false +#whl = false +#depends = false +#pylint = false whl_no_aio = false diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index ec53a2a60c03..84c4a76a27e5 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -11,7 +11,6 @@ - Fixed error response handling in stream and non-stream modes - ## 1.0.0b6 (2025-11-26) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 5d25dea61be6..bc749a1fd782 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -14,6 +14,7 @@ from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette +from starlette.concurrency import iterate_in_threadpool from starlette.middleware.base import BaseHTTPMiddleware from starlette.middleware.cors import CORSMiddleware from starlette.requests import Request @@ -137,61 +138,46 @@ async def runs_endpoint(request): context_carrier = {} TraceContextTextMapPropagator().inject(context_carrier) + ex = None resp = await self.agent_run(context) - - if inspect.isgenerator(resp): - def gen(): - ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier) - token = otel_context.attach(ctx) - seq = 0 - try: - for event in resp: - seq += 1 - yield _event_to_sse_chunk(event) - except Exception as e: # noqa: BLE001 - logger.error("Error in non-async generator: %s", e, exc_info=True) - err = project_models.ResponseErrorEvent( - sequence_number=seq + 1, - code=project_models.ResponseErrorCode.SERVER_ERROR, - message=_format_error(e), - param="") - yield _event_to_sse_chunk(err) - finally: - logger.info("End of processing CreateResponse request.") - otel_context.detach(token) - - return StreamingResponse(gen(), media_type="text/event-stream") - if inspect.isasyncgen(resp): - async def gen_async(): - ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier) - token = otel_context.attach(ctx) - seq = 0 - try: - async for event in resp: - seq += 1 - yield _event_to_sse_chunk(event) - except Exception as e: # noqa: BLE001 - logger.error("Error in async generator: %s", e, exc_info=True) - err = project_models.ResponseErrorEvent( - sequence_number=seq + 1, - code=project_models.ResponseErrorCode.SERVER_ERROR, - message=_format_error(e), - param="") - yield _event_to_sse_chunk(err) - finally: - logger.info("End of processing CreateResponse request.") - otel_context.detach(token) - - return StreamingResponse(gen_async(), media_type="text/event-stream") - logger.info("End of processing CreateResponse request.") - return JSONResponse(resp.as_dict()) except Exception as e: # TODO: extract status code from exception logger.error(f"Error processing CreateResponse request: {e}", exc_info=True) - err = project_models.ResponseError( + ex = e + + if not context.stream: + logger.info("End of processing CreateResponse request.") + result = resp if not ex else project_models.ResponseError( code=project_models.ResponseErrorCode.SERVER_ERROR, - message=_format_error(e)) - return JSONResponse(err.as_dict()) + message=_format_error(ex)) + return JSONResponse(result.as_dict()) + + async def gen_async(ex): + ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier) + token = otel_context.attach(ctx) + seq = 0 + try: + if ex: + return + it = iterate_in_threadpool(resp) if inspect.isgenerator(resp) else resp + async for event in it: + seq += 1 + yield _event_to_sse_chunk(event) + logger.info("End of processing CreateResponse request.") + except Exception as e: # noqa: BLE001 + logger.error("Error in async generator: %s", e, exc_info=True) + ex = e + finally: + if ex: + err = project_models.ResponseErrorEvent( + sequence_number=seq + 1, + code=project_models.ResponseErrorCode.SERVER_ERROR, + message=_format_error(ex), + param="") + yield _event_to_sse_chunk(err) + otel_context.detach(token) + + return StreamingResponse(gen_async(ex), media_type="text/event-stream") async def liveness_endpoint(request): result = await self.agent_liveness(request) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index 9c1e35949882..abea93ee106a 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -11,7 +11,6 @@ - Fixed error response handling in stream and non-stream modes - ## 1.0.0b6 (2025-11-26) ### Feature Added diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index e6bf10d0b5c2..51937fe31986 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -268,7 +268,7 @@ async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext, output = self.state_converter.state_to_response(result, context) return output except Exception as e: - logger.error(f"Error during agent run: {e}") + logger.error(f"Error during agent run: {e}", exc_info=True) raise e async def agent_run_astream( @@ -301,7 +301,7 @@ async def agent_run_astream( async for result in self.state_converter.state_to_response_stream(stream, context): yield result except Exception as e: - logger.error(f"Error during streaming agent run: {e}") + logger.error(f"Error during streaming agent run: {e}", exc_info=True) raise e finally: # Close tool_client if provided diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index 90edfbceb523..b970062738ee 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -19,7 +19,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b5", + "azure-ai-agentserver-core>=1.0.0b7", "langchain>0.3.20", "langchain-openai>0.3.10", "langchain-azure-ai[opentelemetry]>=0.1.8", @@ -64,5 +64,9 @@ breaking = false # incompatible python version pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false -mindependency = false # depends on -core package +#mindependency = false # depends on -core package +#latestdependency = false +#whl = false +#depends = false +#pylint = false whl_no_aio = false From 8be4d6d15f780ce045a0c5b32e2fd7c8be5b295a Mon Sep 17 00:00:00 2001 From: Min Shi <39176492+Jasmin3q@users.noreply.github.com> Date: Tue, 16 Dec 2025 15:02:13 -0800 Subject: [PATCH 44/94] [AgentServer] Add keep alive (#44404) * add keep alive * user asyncio.shield --- .../azure/ai/agentserver/core/server/base.py | 61 ++++++++++++++++++- 1 file changed, 58 insertions(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index bc749a1fd782..cf85b2fcea07 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -3,6 +3,7 @@ # --------------------------------------------------------- # pylint: disable=broad-exception-caught,unused-argument,logging-fstring-interpolation,too-many-statements,too-many-return-statements # mypy: ignore-errors +import asyncio import inspect import json import os @@ -38,6 +39,7 @@ logger = get_logger() DEBUG_ERRORS = os.environ.get(Constants.AGENT_DEBUG_ERRORS, "false").lower() == "true" +KEEP_ALIVE_INTERVAL = 15.0 # seconds class AgentRunContextMiddleware(BaseHTTPMiddleware): def __init__(self, app: ASGIApp, agent: Optional['FoundryCBAgent'] = None): @@ -160,9 +162,14 @@ async def gen_async(ex): if ex: return it = iterate_in_threadpool(resp) if inspect.isgenerator(resp) else resp - async for event in it: - seq += 1 - yield _event_to_sse_chunk(event) + # Wrap iterator with keep-alive mechanism + async for event in _iter_with_keep_alive(it): + if event is None: + # Keep-alive signal + yield _keep_alive_comment() + else: + seq += 1 + yield _event_to_sse_chunk(event) logger.info("End of processing CreateResponse request.") except Exception as e: # noqa: BLE001 logger.error("Error in async generator: %s", e, exc_info=True) @@ -517,6 +524,54 @@ def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: return f"data: {event_data}\n\n" +def _keep_alive_comment() -> str: + """Generate a keep-alive SSE comment to maintain connection.""" + return ": keep-alive\n\n" + + +async def _iter_with_keep_alive( + it: AsyncGenerator[ResponseStreamEvent, None] +) -> AsyncGenerator[Optional[ResponseStreamEvent], None]: + """Wrap an async iterator with keep-alive mechanism. + + If no event is received within KEEP_ALIVE_INTERVAL seconds, + yields None as a signal to send a keep-alive comment. + The original iterator is protected with asyncio.shield to ensure + it continues running even when timeout occurs. + """ + it_anext = it.__anext__ + pending_task: Optional[asyncio.Task] = None + + while True: + try: + # If there's a pending task from previous timeout, wait for it first + if pending_task is not None: + event = await pending_task + pending_task = None + yield event + continue + + # Create a task for the next event + next_event_task = asyncio.create_task(it_anext()) + + try: + # Shield the task and wait with timeout + event = await asyncio.wait_for( + asyncio.shield(next_event_task), + timeout=KEEP_ALIVE_INTERVAL + ) + yield event + except asyncio.TimeoutError: + # Timeout occurred, but task continues due to shield + # Save task to check in next iteration + pending_task = next_event_task + yield None + + except StopAsyncIteration: + # Iterator exhausted + break + + def _format_error(exc: Exception) -> str: message = str(exc) if message: From 99484aaf0b084b492136a4c344e7250b41b66b03 Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Wed, 7 Jan 2026 10:40:37 -0800 Subject: [PATCH 45/94] [agentserver][langgraph] support human in the loop (#44486) * implementing human in the loop helper * human in the loop happy path for non-streaming * refined response converter * reorginazed and runnable non stream * stream convert working * move constant to -core package * refined converter * refine human-in-the-loop sample * updated reserved function name * make hitl helper as kwarg for stream converter * fix comments * move validate to parent class * updated return hint --- .../core/server/common/constants.py | 6 + .../ai/agentserver/langgraph/langgraph.py | 91 ++++---- .../agentserver/langgraph/models/__init__.py | 12 -- .../models/human_in_the_loop_helper.py | 119 ++++++++++ .../models/human_in_the_loop_json_helper.py | 85 ++++++++ .../models/langgraph_state_converter.py | 143 ------------ .../langgraph_stream_response_converter.py | 74 ------- .../models/response_api_converter.py | 94 ++++++++ .../models/response_api_default_converter.py | 127 +++++++++++ ...onse_api_non_stream_response_converter.py} | 70 ++++-- ...r.py => response_api_request_converter.py} | 18 +- .../response_api_stream_response_converter.py | 108 ++++++++++ .../item_resource_helpers.py | 27 ++- ..._function_call_argument_event_generator.py | 53 +++-- .../response_output_item_event_generator.py | 29 ++- .../response_stream_event_generator.py | 5 +- .../samples/human_in_the_loop/.env-template | 4 + .../samples/human_in_the_loop/README.md | 133 ++++++++++++ .../samples/human_in_the_loop/main.py | 204 ++++++++++++++++++ .../human_in_the_loop/requirements.txt | 3 + 20 files changed, 1082 insertions(+), 323 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_state_converter.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_stream_response_converter.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{langgraph_response_converter.py => response_api_non_stream_response_converter.py} (71%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{langgraph_request_converter.py => response_api_request_converter.py} (93%) create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/.env-template create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/README.md create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/main.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/requirements.txt diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py new file mode 100644 index 000000000000..7d21ee7a31ff --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py @@ -0,0 +1,6 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +# Reserved function name for HITL. +HUMAN_IN_THE_LOOP_FUNCTION_NAME = "__hosted_agent_adapter_hitl__" \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 51937fe31986..23a144c62cc0 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -17,10 +17,8 @@ from azure.ai.agentserver.core.server.base import FoundryCBAgent from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext -from .models import ( - LanggraphMessageStateConverter, - LanggraphStateConverter, -) +from .models.response_api_converter import ResponseAPIConverter, GraphInputArguments +from .models.response_api_default_converter import ResponseAPIDefaultConverter from .models.utils import is_state_schema_valid from .tool_client import ToolClient @@ -57,7 +55,7 @@ def __init__( self, graph: Union[CompiledStateGraph, GraphFactory], credentials: "Optional[AsyncTokenCredential]" = None, - state_converter: "Optional[LanggraphStateConverter]" = None, + converter: "Optional[ResponseAPIConverter]" = None, **kwargs: Any ) -> None: """ @@ -68,8 +66,8 @@ def __init__( :type graph: Union[CompiledStateGraph, GraphFactory] :param credentials: Azure credentials for authentication. :type credentials: Optional[AsyncTokenCredential] - :param state_converter: custom state converter. Required if graph state is not MessagesState. - :type state_converter: Optional[LanggraphStateConverter] + :param converter: custom response converter. + :type converter: Optional[ResponseAPIConverter] """ super().__init__(credentials=credentials, **kwargs) # pylint: disable=unexpected-keyword-arg self._graph_or_factory: Union[CompiledStateGraph, GraphFactory] = graph @@ -79,16 +77,16 @@ def __init__( # If graph is already compiled, validate and set up state converter if isinstance(graph, CompiledStateGraph): self._resolved_graph = graph - if not state_converter: + if not converter: if is_state_schema_valid(self._resolved_graph.builder.state_schema): - self.state_converter = LanggraphMessageStateConverter() + self.converter = ResponseAPIDefaultConverter(graph=self._resolved_graph) else: - raise ValueError("state_converter is required for non-MessagesState graph.") + raise ValueError("converter is required for non-MessagesState graph.") else: - self.state_converter = state_converter + self.converter = converter else: # Defer validation until graph is resolved - self.state_converter = state_converter + self.converter = converter @property def graph(self) -> "Optional[CompiledStateGraph]": @@ -113,11 +111,11 @@ async def agent_run(self, context: AgentRunContext): else: graph = self._resolved_graph - input_data = self.state_converter.request_to_state(context) - logger.debug(f"Converted input data: {input_data}") + input_arguments = await self.converter.convert_request(context) + self.ensure_runnable_config(context, input_arguments) if not context.stream: try: - response = await self.agent_run_non_stream(input_data, context, graph) + response = await self.agent_run_non_stream(input_arguments, context, graph) return response finally: # Close tool_client for non-streaming requests @@ -129,7 +127,7 @@ async def agent_run(self, context: AgentRunContext): logger.warning(f"Error closing tool_client: {e}") # For streaming, pass tool_client to be closed after streaming completes - return self.agent_run_astream(input_data, context, graph, tool_client) + return self.agent_run_astream(input_arguments, context, graph, tool_client) except OAuthConsentRequiredError as e: # Clean up tool_client if OAuth error occurs before streaming starts if tool_client is not None: @@ -148,7 +146,7 @@ async def agent_run(self, context: AgentRunContext): async def _resolve_graph(self, context: AgentRunContext): """Resolve the graph if it's a factory function (for single-use/first-time resolution). Creates a ToolClient and calls the factory function with it. - This is used for the initial resolution to set up state_converter. + This is used for the initial resolution to set up converter. :param context: The context for the agent run. :type context: AgentRunContext @@ -156,7 +154,6 @@ async def _resolve_graph(self, context: AgentRunContext): if callable(self._graph_or_factory): logger.debug("Resolving graph from factory function") - # Create ToolClient with credentials tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) # pylint: disable=no-member tool_client_wrapper = ToolClient(tool_client) @@ -171,12 +168,11 @@ async def _resolve_graph(self, context: AgentRunContext): self._resolved_graph = result # Validate and set up state converter if not already set from initialization - if not self.state_converter and self._resolved_graph is not None: + if not self.converter and self._resolved_graph is not None: if is_state_schema_valid(self._resolved_graph.builder.state_schema): - self.state_converter = LanggraphMessageStateConverter() + self.converter = ResponseAPIDefaultConverter(graph=self._resolved_graph) else: - raise ValueError("state_converter is required for non-MessagesState graph.") - + raise ValueError("converter is required for non-MessagesState graph.") logger.debug("Graph resolved successfully") else: # Should not reach here, but just in case @@ -209,12 +205,11 @@ async def _resolve_graph_for_request(self, context: AgentRunContext): graph = result # Ensure state converter is set up (use existing one or create new) - if not self.state_converter: + if not self.converter: if is_state_schema_valid(graph.builder.state_schema): - self.state_converter = LanggraphMessageStateConverter() + self.converter = ResponseAPIDefaultConverter(graph=graph) else: - raise ValueError("state_converter is required for non-MessagesState graph.") - + raise ValueError("converter is required for non-MessagesState graph.") logger.debug("Fresh graph resolved successfully for request") return graph, tool_client_wrapper @@ -246,7 +241,7 @@ def get_trace_attributes(self): attrs["service.namespace"] = "azure.ai.agentserver.langgraph" return attrs - async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext, graph: CompiledStateGraph): + async def agent_run_non_stream(self, input_arguments: GraphInputArguments, context: AgentRunContext, graph: CompiledStateGraph): """ Run the agent with non-streaming response. @@ -262,10 +257,8 @@ async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext, """ try: - config = self.create_runnable_config(context) - stream_mode = self.state_converter.get_stream_mode(context) - result = await graph.ainvoke(input_data, config=config, stream_mode=stream_mode) - output = self.state_converter.state_to_response(result, context) + result = await graph.ainvoke(**input_arguments) + output = await self.converter.convert_response_non_stream(result, context) return output except Exception as e: logger.error(f"Error during agent run: {e}", exc_info=True) @@ -273,7 +266,7 @@ async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext, async def agent_run_astream( self, - input_data: dict, + input_arguments: GraphInputArguments, context: AgentRunContext, graph: CompiledStateGraph, tool_client: "Optional[ToolClient]" = None @@ -295,11 +288,9 @@ async def agent_run_astream( """ try: logger.info(f"Starting streaming agent run {context.response_id}") - config = self.create_runnable_config(context) - stream_mode = self.state_converter.get_stream_mode(context) - stream = graph.astream(input=input_data, config=config, stream_mode=stream_mode) - async for result in self.state_converter.state_to_response_stream(stream, context): - yield result + stream = graph.astream(**input_arguments) + async for output_event in self.converter.convert_response_stream(stream, context): + yield output_event except Exception as e: logger.error(f"Error during streaming agent run: {e}", exc_info=True) raise e @@ -312,23 +303,25 @@ async def agent_run_astream( except Exception as e: logger.warning(f"Error closing tool_client in stream: {e}") - def create_runnable_config(self, context: AgentRunContext) -> RunnableConfig: + def ensure_runnable_config(self, context: AgentRunContext, input_arguments: GraphInputArguments): """ - Create a RunnableConfig from the converted request data. + Ensure the RunnableConfig is set in the input arguments. :param context: The context for the agent run. :type context: AgentRunContext - - :return: The RunnableConfig for the agent run. - :rtype: RunnableConfig + :param input_arguments: The input arguments for the agent run. + :type input_arguments: GraphInputArguments """ - config = RunnableConfig( - configurable={ - "thread_id": context.conversation_id, - }, - callbacks=[self.azure_ai_tracer] if self.azure_ai_tracer else None, - ) - return config + config = input_arguments.get("config", {}) + configurable = config.get("configurable", {}) + configurable["thread_id"] = context.conversation_id + config["configurable"] = configurable + + callbacks = config.get("callbacks", []) + if self.azure_ai_tracer and self.azure_ai_tracer not in callbacks: + callbacks.append(self.azure_ai_tracer) + config["callbacks"] = callbacks + input_arguments["config"] = config def format_otlp_endpoint(self, endpoint: str) -> str: m = re.match(r"^(https?://[^/]+)", endpoint) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py index eb6285a6279b..d540fd20468c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py @@ -1,15 +1,3 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from .langgraph_request_converter import LangGraphRequestConverter -from .langgraph_response_converter import LangGraphResponseConverter -from .langgraph_state_converter import LanggraphMessageStateConverter, LanggraphStateConverter -from .langgraph_stream_response_converter import LangGraphStreamResponseConverter - -__all__ = [ - "LangGraphRequestConverter", - "LangGraphResponseConverter", - "LangGraphStreamResponseConverter", - "LanggraphStateConverter", - "LanggraphMessageStateConverter", -] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py new file mode 100644 index 000000000000..8c4a453180ed --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py @@ -0,0 +1,119 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from typing import Optional, Union + +from langgraph.types import ( + Command, + Interrupt, + StateSnapshot, +) + +from azure.ai.agentserver.core.logger import get_logger +from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models.openai import ( + ResponseInputParam, + ResponseInputItemParam, +) +from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext + +INTERRUPT_NODE_NAME = "__interrupt__" +logger = get_logger() + +class HumanInTheLoopHelper: + """Helper class for managing human-in-the-loop interactions in LangGraph.""" + def __init__(self, context: AgentRunContext = None): + self.context = context + + def has_interrupt(self, state: StateSnapshot) -> bool: + """Check if the LangGraph state contains an interrupt node.""" + if not state or not isinstance(state, StateSnapshot): + return False + return state.interrupts is not None and len(state.interrupts) > 0 + + def convert_interrupts(self, interrupts: tuple) -> list[project_models.ItemResource]: + """Convert LangGraph interrupts to ItemResource objects.""" + if not interrupts or not isinstance(interrupts, tuple): + return [] + result = [] + # should be only one interrupt for now + for interrupt_info in interrupts: + if not isinstance(interrupt_info, Interrupt): + # skip invalid interrupt + continue + item = self.convert_interrupt(interrupt_info) + if item: + result.append(item) + return result + + def convert_interrupt(self, interrupt_info: Interrupt) -> project_models.ItemResource: + """Convert a single LangGraph Interrupt to an ItemResource object. + + :param interrupt_info: The interrupt information from LangGraph. + :type interrupt_info: Interrupt + + :return: The corresponding ItemResource object. + :rtype: project_models.ItemResource + """ + raise NotImplementedError("Subclasses must implement convert_interrupt method.") + + def validate_and_convert_human_feedback( + self, state: StateSnapshot, input: Union[str, ResponseInputParam] + ) -> Optional[Command]: + """Validate if the human feedback input corresponds to the interrupt in state. + If valid, convert the input to a LangGraph Command. + + :param state: The current LangGraph state snapshot. + :type state: StateSnapshot + :param input: The human feedback input from the request. + :type input: Union[str, ResponseInputParam] + + :return: Command if valid feedback is provided, else None. + :rtype: Union[Command, None] + """ + if not self.has_interrupt(state): + # No interrupt in state + logger.info("No interrupt found in state.") + return None + interrupt_obj = state.interrupts[0] # Assume single interrupt for simplicity + if not interrupt_obj or not isinstance(interrupt_obj, Interrupt): + logger.warning(f"No interrupt object found in state") + return None + + logger.info(f"Retrived interrupt from state, validating and converting human feedback.") + if isinstance(input, str): + # expect a list of function call output items + logger.warning(f"Expecting function call output item, got string: {input}") + return None + if isinstance(input, list): + if len(input) != 1: + # expect exactly one function call output item + logger.warning(f"Expected exactly one interrupt input item, got {len(input)} items.") + return None + item = input[0] + # validate item type + item_type = item.get("type", None) + if item_type != project_models.ItemType.FUNCTION_CALL_OUTPUT: + logger.warning(f"Invalid interrupt input item type: {item_type}, expected FUNCTION_CALL_OUTPUT.") + return None + + # validate call_id matches + if item.get("call_id") != interrupt_obj.id: + logger.warning(f"Interrupt input call_id {item.call_id} does not match interrupt id {interrupt_obj.id}.") + return None + + return self.convert_input_item_to_command(item) + else: + logger.error(f"Unsupported interrupt input type: {type(input)}, {input}") + return None + + def convert_input_item_to_command(self, input: ResponseInputItemParam) -> Union[Command, None]: + """Convert ItemParams to a LangGraph Command for interrupt handling. + + :param input: The item parameters containing interrupt information. + :type input: ResponseInputItemParam + :return: The LangGraph Command. + :rtype: Union[Command, None] + """ + raise NotImplementedError("Subclasses must implement convert_request_to_command method.") diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py new file mode 100644 index 000000000000..11c2f4a5ac03 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py @@ -0,0 +1,85 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +import json +from typing import Union + +from langgraph.types import ( + Command, + Interrupt, + StateSnapshot, +) + +from azure.ai.agentserver.core.logger import get_logger +from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models.openai import ( + ResponseInputParam, + ResponseInputItemParam, +) +from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME + +from .human_in_the_loop_helper import HumanInTheLoopHelper + +logger = get_logger() + + +class HumanInTheLoopJsonHelper(HumanInTheLoopHelper): + """ + Helper class for managing human-in-the-loop interactions in LangGraph. + Interrupts are converted to FunctionToolCallItemResource objects. + Human feedback will be sent back as FunctionCallOutputItemParam. + All values are serialized as JSON strings. + """ + + def convert_interrupt(self, interrupt_info: Interrupt) -> project_models.ItemResource: + if not isinstance(interrupt_info, Interrupt): + logger.warning(f"Interrupt is not of type Interrupt: {interrupt_info}") + return None + name, call_id, arguments = self.interrupt_to_function_call(interrupt_info) + return project_models.FunctionToolCallItemResource( + call_id=call_id, + name=name, + arguments=arguments, + id=self.context.id_generator.generate_function_call_id(), + status="inprogress", + ) + + def interrupt_to_function_call(self, interrupt: Interrupt) : + """ + Convert an Interrupt to a function call tuple. + + :param interrupt: The Interrupt object to convert. + :type interrupt: Interrupt + + :return: A tuple of (name, call_id, argument). + :rtype: tuple[str | None, str | None, str | None] + """ + if isinstance(interrupt.value, str): + arguments = interrupt.value + else: + try: + arguments = json.dumps(interrupt.value) + except Exception as e: # pragma: no cover - fallback # pylint: disable=broad-exception-caught + logger.error(f"Failed to serialize interrupt value to JSON: {interrupt.value}, error: {e}") + arguments = str(interrupt.value) + return HUMAN_IN_THE_LOOP_FUNCTION_NAME, interrupt.id, arguments + + def convert_input_item_to_command(self, input: ResponseInputItemParam) -> Union[Command, None]: + output_str = input.get("output") + try: + output = json.loads(output_str) + except json.JSONDecodeError as e: + logger.error(f"Invalid JSON in function call output: {input}") + return None + resume = output.get("resume", None) + update = output.get("update", None) + goto = output.get("goto", None) + if resume is None and update is None and goto is None: + logger.warning(f"No valid Command fields found in function call output: {input}") + return None + return Command( + resume=resume, + update=update, + goto=goto, + ) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_state_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_state_converter.py deleted file mode 100644 index a1bc2181f919..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_state_converter.py +++ /dev/null @@ -1,143 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# mypy: disable-error-code="call-overload,override" -"""Base interface for converting between LangGraph internal state and OpenAI-style responses. - -A LanggraphStateConverter implementation bridges: - 1. Incoming CreateResponse (wrapped in AgentRunContext) -> initial graph state - 2. Internal graph state -> final non-streaming Response - 3. Streaming graph state events -> ResponseStreamEvent sequence - 4. Declares which stream mode (if any) is supported for a given run context - -Concrete implementations should: - * Decide and document the shape of the state dict they return in request_to_state - * Handle aggregation, error mapping, and metadata propagation in state_to_response - * Incrementally translate async stream_state items in state_to_response_stream - -Do NOT perform network I/O directly inside these methods (other than awaiting the -provided async iterator). Keep them pure transformation layers so they are testable. -""" - -from __future__ import annotations - -import time -from abc import ABC, abstractmethod -from typing import Any, AsyncGenerator, AsyncIterator, Dict - -from azure.ai.agentserver.core.models import Response, ResponseStreamEvent -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext - -from .langgraph_request_converter import LangGraphRequestConverter -from .langgraph_response_converter import LangGraphResponseConverter -from .langgraph_stream_response_converter import LangGraphStreamResponseConverter - - -class LanggraphStateConverter(ABC): - """ - Abstract base class for LangGraph state <-> response conversion. - - :meta private: - """ - - @abstractmethod - def get_stream_mode(self, context: AgentRunContext) -> str: - """Return a string indicating streaming mode for this run. - - Examples: "values", "updates", "messages", "custom", "debug". - Implementations may inspect context.request.stream or other flags. - Must be fast and side-effect free. - - :param context: The context for the agent run. - :type context: AgentRunContext - - :return: The streaming mode as a string. - :rtype: str - """ - - @abstractmethod - def request_to_state(self, context: AgentRunContext) -> Dict[str, Any]: - """Convert the incoming request (via context) to an initial LangGraph state. - - Return a serializable dict that downstream graph execution expects. - Should not mutate the context. Raise ValueError on invalid input. - - :param context: The context for the agent run. - :type context: AgentRunContext - - :return: The initial LangGraph state as a dictionary. - :rtype: Dict[str, Any] - """ - - @abstractmethod - def state_to_response(self, state: Any, context: AgentRunContext) -> Response: - """Convert a completed LangGraph state into a final non-streaming Response object. - - Implementations must construct and return an models.Response. - The returned object should include output items, usage (if available), - and reference the agent / conversation from context. - - :param state: The completed LangGraph state. - :type state: Any - :param context: The context for the agent run. - :type context: AgentRunContext - - :return: The final non-streaming Response object. - :rtype: Response - """ - - @abstractmethod - async def state_to_response_stream( - self, stream_state: AsyncIterator[Dict[str, Any] | Any], context: AgentRunContext - ) -> AsyncGenerator[ResponseStreamEvent, None]: - """Convert an async iterator of partial state updates into stream events. - - Yield ResponseStreamEvent objects in the correct order. Implementations - are responsible for emitting lifecycle events (created, in_progress, deltas, - completed, errors) consistent with the OpenAI Responses streaming contract. - - :param stream_state: An async iterator of partial LangGraph state updates. - :type stream_state: AsyncIterator[Dict[str, Any] | Any] - :param context: The context for the agent run. - :type context: AgentRunContext - - :return: An async generator yielding ResponseStreamEvent objects. - :rtype: AsyncGenerator[ResponseStreamEvent, None] - """ - - -class LanggraphMessageStateConverter(LanggraphStateConverter): - """Converter implementation for langgraph built-in MessageState.""" - - def get_stream_mode(self, context: AgentRunContext) -> str: - if context.request.get("stream"): - return "messages" - return "updates" - - def request_to_state(self, context: AgentRunContext) -> Dict[str, Any]: - converter = LangGraphRequestConverter(context.request) - return converter.convert() - - def state_to_response(self, state: Any, context: AgentRunContext) -> Response: - converter = LangGraphResponseConverter(context, state) - output = converter.convert() - - agent_id = context.get_agent_id_object() - conversation = context.get_conversation_object() - response = Response( - object="response", - id=context.response_id, - agent=agent_id, - conversation=conversation, - metadata=context.request.get("metadata"), - created_at=int(time.time()), - output=output, - ) - return response - - async def state_to_response_stream( - self, stream_state: AsyncIterator[Dict[str, Any] | Any], context: AgentRunContext - ) -> AsyncGenerator[ResponseStreamEvent, None]: - response_converter = LangGraphStreamResponseConverter(stream_state, context) - async for result in response_converter.convert(): - yield result diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_stream_response_converter.py deleted file mode 100644 index cba1db014ed8..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_stream_response_converter.py +++ /dev/null @@ -1,74 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation -# mypy: disable-error-code="assignment,valid-type" -from typing import List - -from langchain_core.messages import AnyMessage - -from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import ResponseStreamEvent -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext - -from .response_event_generators import ( - ResponseEventGenerator, - ResponseStreamEventGenerator, - StreamEventState, -) - -logger = get_logger() - - -class LangGraphStreamResponseConverter: - def __init__(self, stream, context: AgentRunContext): - self.stream = stream - self.context = context - - self.stream_state = StreamEventState() - self.current_generator: ResponseEventGenerator = None - - async def convert(self): - async for message, _ in self.stream: - try: - if self.current_generator is None: - self.current_generator = ResponseStreamEventGenerator(logger, None) - - converted = self.try_process_message(message, self.context) - for event in converted: - yield event # yield each event separately - except Exception as e: - logger.error(f"Error converting message {message}: {e}") - raise ValueError(f"Error converting message {message}") from e - - logger.info("Stream ended, finalizing response.") - # finalize the stream - converted = self.try_process_message(None, self.context) - for event in converted: - yield event # yield each event separately - - def try_process_message(self, event: AnyMessage, context: AgentRunContext) -> List[ResponseStreamEvent]: - if event and not self.current_generator: - self.current_generator = ResponseStreamEventGenerator(logger, None) - - is_processed = False - next_processor = self.current_generator - returned_events = [] - while not is_processed: - is_processed, next_processor, processed_events = self.current_generator.try_process_message( - event, context, self.stream_state - ) - returned_events.extend(processed_events) - if not is_processed and next_processor == self.current_generator: - logger.warning( - f"Message can not be processed by current generator {type(self.current_generator).__name__}:" - + f" {type(event)}: {event}" - ) - break - if next_processor != self.current_generator: - logger.info( - f"Switching processor from {type(self.current_generator).__name__} " - + f"to {type(next_processor).__name__}" - ) - self.current_generator = next_processor - return returned_events diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py new file mode 100644 index 000000000000..c93d922a97de --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py @@ -0,0 +1,94 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +# mypy: disable-error-code="call-overload,override" +"""Base interface for converting between LangGraph internal state and OpenAI-style responses. + +A ResponseAPIConverter implementation bridges: + 1. Incoming CreateResponse (wrapped in AgentRunContext) -> GraphInputArguments to invoke graph + 2. Graph output -> final non-streaming Response + 3. Streaming graph output events -> ResponseStreamEvent sequence + +Concrete implementations should: + * Decide and document the shape of input arguments they return in convert_request + * Handle aggregation, error mapping, and metadata propagation in convert_response_non_stream + * Incrementally translate async stream_state items in convert_response_stream + +Do NOT perform network I/O directly inside these methods (other than awaiting the +provided async iterator). Keep them pure transformation layers so they are testable. +""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Any, AsyncGenerator, AsyncIterator, Dict, TypedDict, Union + +from langgraph.types import Command + +from azure.ai.agentserver.core.models import Response, ResponseStreamEvent +from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext + + +class GraphInputArguments(TypedDict): + """TypedDict for LangGraph input arguments.""" + input: Union[Dict[str, Any], Command, None] + config: Dict[str, Any] + context: Dict[str, Any] + stream_mode: str + + +class ResponseAPIConverter(ABC): + """ + Abstract base class for LangGraph input/output <-> response conversion. + + Orchestrates the conversions + + :meta private: + """ + @abstractmethod + async def convert_request(self, context: AgentRunContext) -> GraphInputArguments: + """Convert the incoming request to a serializable dict for LangGraph. + + This is a convenience wrapper around request_to_state that only returns + dict states, raising ValueError if a Command is returned instead. + + :param context: The context for the agent run. + :type context: AgentRunContext + + :return: The initial LangGraph arguments + :rtype: GraphInputArguments + """ + + @abstractmethod + async def convert_response_non_stream(self, output: Any, context: AgentRunContext) -> Response: + """Convert the completed LangGraph state into a final non-streaming Response object. + + This is a convenience wrapper around state_to_response that retrieves + the current state snapshot asynchronously. + + :param context: The context for the agent run. + :type context: AgentRunContext + + :return: The final non-streaming Response object. + :rtype: Response + """ + + @abstractmethod + async def convert_response_stream( + self, + output: AsyncIterator[Dict[str, Any] | Any], + context: AgentRunContext, + ) -> AsyncGenerator[ResponseStreamEvent, None]: + """Convert an async iterator of LangGraph stream events into stream events. + + This is a convenience wrapper around state_to_response_stream that retrieves + the current stream of state updates asynchronously. + + :param output: An async iterator yielding LangGraph stream events + :type output: AsyncIterator[Dict[str, Any] | Any] + :param context: The context for the agent run. + :type context: AgentRunContext + + :return: An async generator yielding ResponseStreamEvent objects. + :rtype: AsyncGenerator[ResponseStreamEvent, None] + """ diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py new file mode 100644 index 000000000000..e98f914ae4f7 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py @@ -0,0 +1,127 @@ +import time +from typing import Any, AsyncGenerator, AsyncIterator, Dict, TypedDict, Union + +from langchain_core.runnables import RunnableConfig +from langgraph.types import Command, Interrupt, StateSnapshot +from langgraph.graph.state import CompiledStateGraph + +from azure.ai.agentserver.core.models import Response, ResponseStreamEvent +from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext + +from .response_api_request_converter import ResponseAPIRequestConverter, ResponseAPIMessageRequestConverter +from .response_api_stream_response_converter import ResponseAPIStreamResponseConverter, ResponseAPIMessagesStreamResponseConverter +from .response_api_non_stream_response_converter import ResponseAPINonStreamResponseConverter, ResponseAPIMessagesNonStreamResponseConverter +from .human_in_the_loop_helper import HumanInTheLoopHelper +from .human_in_the_loop_json_helper import HumanInTheLoopJsonHelper +from .response_api_converter import ResponseAPIConverter, GraphInputArguments + +class ResponseAPIDefaultConverter(ResponseAPIConverter): + """ + Default implementation of ResponseAPIConverter for LangGraph input/output <-> Response API. + Orchestrates the conversions using default converters and HITL helper. + """ + def __init__(self, + graph: CompiledStateGraph, + create_request_converter=None, + create_stream_response_converter=None, + create_non_stream_response_converter=None, + create_human_in_the_loop_helper=None): + self._graph = graph + if create_request_converter: + self._create_request_converter = create_request_converter + if create_stream_response_converter: + self._create_stream_response_converter = create_stream_response_converter + if create_non_stream_response_converter: + self._create_non_stream_response_converter = create_non_stream_response_converter + if create_human_in_the_loop_helper: + self._create_human_in_the_loop_helper = create_human_in_the_loop_helper + + async def convert_request(self, context: AgentRunContext) -> GraphInputArguments: + prev_state = await self._aget_state(context) + input_data = self._convert_request_input(context, prev_state) + stream_mode = self.get_stream_mode(context) + return GraphInputArguments({ + "input": input_data, + "stream_mode": stream_mode}) + + async def convert_response_non_stream(self, output: Any, context: AgentRunContext) -> Response: + converter = self._create_non_stream_response_converter(context) + output = converter.convert(output) + + agent_id = context.get_agent_id_object() + conversation = context.get_conversation_object() + response = Response( + object="response", + id=context.response_id, + agent=agent_id, + conversation=conversation, + metadata=context.request.get("metadata"), + created_at=int(time.time()), + output=output, + ) + return response + + async def convert_response_stream( + self, + output: AsyncIterator[Dict[str, Any] | Any], + context: AgentRunContext, + ) -> AsyncGenerator[ResponseStreamEvent, None]: + converter = self._create_stream_response_converter(context) + async for event in output: + output = converter.convert(event) + for e in output: + yield e + + state = await self._aget_state(context) + output = converter.finalize(state) # finalize the response with graph state after stream + for event in output: + yield event + + def get_stream_mode(self, context: AgentRunContext) -> str: + if context.stream: + return "messages" + return "updates" + + def _create_request_converter(self, context: AgentRunContext) -> ResponseAPIRequestConverter: + data = context.request + return ResponseAPIMessageRequestConverter(data) + + def _create_stream_response_converter(self, context: AgentRunContext) -> ResponseAPIMessagesStreamResponseConverter: + hitl_helper = self._create_human_in_the_loop_helper(context) + return ResponseAPIMessagesStreamResponseConverter(context, hitl_helper=hitl_helper) + + def _create_non_stream_response_converter(self, context: AgentRunContext) -> ResponseAPINonStreamResponseConverter: + hitl_helper = self._create_human_in_the_loop_helper(context) + return ResponseAPIMessagesNonStreamResponseConverter(context, hitl_helper) + + def _create_human_in_the_loop_helper(self, context: AgentRunContext) -> HumanInTheLoopHelper: + return HumanInTheLoopJsonHelper(context) + + def _convert_request_input(self, context: AgentRunContext, prev_state: StateSnapshot) -> Union[Dict[str, Any], Command]: + """ + Convert the CreateResponse input to LangGraph input format, handling HITL if needed. + + :param context: The context for the agent run. + :type context: AgentRunContext + :param prev_state: The previous LangGraph state snapshot. + :type prev_state: StateSnapshot + + :return: The converted LangGraph input data or Command for HITL. + :rtype: Union[Dict[str, Any], Command] + """ + hitl_helper = self._create_human_in_the_loop_helper(context) + command = hitl_helper.validate_and_convert_human_feedback( + prev_state, context.request.get("input") + ) + if command is not None: + return command + converter = self._create_request_converter(context) + return converter.convert() + + async def _aget_state(self, context: AgentRunContext) -> StateSnapshot: + config = RunnableConfig( + configurable={"thread_id": context.conversation_id}, + ) + state = await self._graph.aget_state(config=config) + return state diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py similarity index 71% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_response_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py index 62560279cdc6..24e61244e846 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py @@ -3,40 +3,76 @@ # --------------------------------------------------------- # pylint: disable=logging-fstring-interpolation,broad-exception-caught,logging-not-lazy # mypy: disable-error-code="valid-type,call-overload,attr-defined" +from abc import ABC, abstractmethod import copy -from typing import List +import json +from typing import Any, Collection, List from langchain_core import messages from langchain_core.messages import AnyMessage +from langgraph.types import Interrupt from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import projects as project_models from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from .human_in_the_loop_helper import ( + HumanInTheLoopHelper, + INTERRUPT_NODE_NAME, +) from .utils import extract_function_call logger = get_logger() -class LangGraphResponseConverter: - def __init__(self, context: AgentRunContext, output): +class ResponseAPINonStreamResponseConverter(ABC): + """ + Abstract base class for converting Langgraph output to items in Response format. + One converter instance handles one response. + """ + @abstractmethod + def convert(self, output: dict[str, Any]) -> list[project_models.ItemResource]: + """ + Convert the Langgraph output to a list of ItemResource objects. + + :param output: The Langgraph output to be converted. + :type output: dict[str, Any] + + :return: A list of ItemResource objects representing the converted output. + :rtype: list[project_models.ItemResource] + """ + pass + + +class ResponseAPIMessagesNonStreamResponseConverter(ResponseAPINonStreamResponseConverter): + """ + Convert Langgraph MessageState output to ItemResource objects. + """ + def __init__(self, + context: AgentRunContext, + hitl_helper: HumanInTheLoopHelper): self.context = context - self.output = output + self.hitl_helper = hitl_helper - def convert(self) -> list[project_models.ItemResource]: + def convert(self, output: dict[str, Any]) -> list[project_models.ItemResource]: res = [] - for step in self.output: + for step in output: for node_name, node_output in step.items(): - message_arr = node_output.get("messages") - if not message_arr: - logger.warning(f"No messages found in node {node_name} output: {node_output}") - continue - for message in message_arr: - try: - converted = self.convert_output_message(message) - res.append(converted) - except Exception as e: - logger.error(f"Error converting message {message}: {e}") + if node_name == INTERRUPT_NODE_NAME: + interrupt_messages = self.hitl_helper.convert_interrupts(node_output) + res.extend(interrupt_messages) + else: + message_arr = node_output.get("messages") + if not message_arr or not isinstance(message_arr, Collection): + logger.warning(f"No messages found in node {node_name} output: {node_output}") + continue + for message in message_arr: + try: + converted = self.convert_output_message(message) + if converted: + res.append(converted) + except Exception as e: + logger.error(f"Error converting message {message}: {e}") return res def convert_output_message(self, output_message: AnyMessage): # pylint: disable=inconsistent-return-statements @@ -87,6 +123,7 @@ def convert_output_message(self, output_message: AnyMessage): # pylint: disable output=output_message.content, id=self.context.id_generator.generate_function_output_id(), ) + logger.warning(f"Unsupported message type: {type(output_message)}, {output_message}") def convert_MessageContent( self, content, role: project_models.ResponsesMessageRole @@ -134,3 +171,4 @@ def convert_MessageContentItem( content_dict["annotations"] = [] # annotation is required for output_text return project_models.ItemContent(content_dict) + diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py similarity index 93% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_request_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py index d29a346b192b..afc4af4048c6 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/langgraph_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py @@ -3,6 +3,7 @@ # --------------------------------------------------------- # pylint: disable=logging-fstring-interpolation # mypy: ignore-errors +from abc import ABC, abstractmethod import json from typing import Dict, List @@ -37,7 +38,22 @@ } -class LangGraphRequestConverter: +class ResponseAPIRequestConverter(ABC): + """ + Convert CreateResponse to LangGraph request format. + """ + @abstractmethod + def convert(self) -> dict: + """ + Convert the CreateResponse to a LangGraph request format. + + :return: The converted LangGraph request. + :rtype: dict + """ + pass + + +class ResponseAPIMessageRequestConverter(ResponseAPIRequestConverter): def __init__(self, data: CreateResponse): self.data: CreateResponse = data diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py new file mode 100644 index 000000000000..85d01a656a67 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py @@ -0,0 +1,108 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +# pylint: disable=logging-fstring-interpolation +# mypy: disable-error-code="assignment,valid-type" +from abc import ABC, abstractmethod +from typing import Any, List, Union + +from langchain_core.messages import AnyMessage + +from azure.ai.agentserver.core.logger import get_logger +from azure.ai.agentserver.core.models import ResponseStreamEvent +from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext + +from .human_in_the_loop_helper import HumanInTheLoopHelper +from .response_event_generators import ( + ResponseEventGenerator, + ResponseStreamEventGenerator, + StreamEventState, +) + +logger = get_logger() + + +class ResponseAPIStreamResponseConverter(ABC): + """ + Abstract base class for converting Langgraph streamed output to ResponseStreamEvent objects. + One converter instance handles one response stream. + """ + @abstractmethod + async def convert(self, event: Union[AnyMessage, dict, Any, None]): + """ + Convert the Langgraph streamed output to ResponseStreamEvent objects. + + :return: An asynchronous generator yielding ResponseStreamEvent objects. + :rtype: AsyncGenerator[ResponseStreamEvent, None] + """ + pass + + @abstractmethod + async def finalize(self, args=None): + """ + Finalize the conversion process after the stream ends. + + :return: An asynchronous generator yielding final ResponseStreamEvent objects. + :rtype: AsyncGenerator[ResponseStreamEvent, None] + """ + pass + + +class ResponseAPIMessagesStreamResponseConverter(ResponseAPIStreamResponseConverter): + def __init__(self, context: AgentRunContext, *, hitl_helper: HumanInTheLoopHelper): + # self.stream = stream + self.context = context + self.hitl_helper = hitl_helper + + self.stream_state = StreamEventState() + self.current_generator: ResponseEventGenerator = None + + def convert(self, output_event: Union[AnyMessage, dict, Any, None]): + try: + if self.current_generator is None: + self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) + message= output_event[0] # expect a tuple + converted = self.try_process_message(message, self.context) + return converted + except Exception as e: + logger.error(f"Error converting message {message}: {e}") + raise ValueError(f"Error converting message {message}") from e + + def finalize(self, graph_state=None): + logger.info("Stream ended, finalizing response.") + res = [] + # check and convert interrupts + if self.hitl_helper.has_interrupt(graph_state): + interrupt = graph_state.interrupts[0] # should have only one interrupt + converted = self.try_process_message(interrupt, self.context) + res.extend(converted) + # finalize the stream + converted = self.try_process_message(None, self.context) + res.extend(converted) + return res + + def try_process_message(self, event: Union[AnyMessage, Any, None], context: AgentRunContext) -> List[ResponseStreamEvent]: + if event and not self.current_generator: + self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) + + is_processed = False + next_processor = self.current_generator + returned_events = [] + while not is_processed: + is_processed, next_processor, processed_events = self.current_generator.try_process_message( + event, context, self.stream_state + ) + returned_events.extend(processed_events) + if not is_processed and next_processor == self.current_generator: + logger.warning( + f"Message can not be processed by current generator {type(self.current_generator).__name__}:" + + f" {type(event)}: {event}" + ) + break + if next_processor != self.current_generator: + logger.info( + f"Switching processor from {type(self.current_generator).__name__} " + + f"to {type(next_processor).__name__}" + ) + self.current_generator = next_processor + return returned_events diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py index a1c97423d5ae..6c3ef5505aa9 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py @@ -2,11 +2,14 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # mypy: disable-error-code="assignment" +from langgraph.types import Interrupt + from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME +from ..human_in_the_loop_helper import HumanInTheLoopHelper from ..utils import extract_function_call - class ItemResourceHelper: def __init__(self, item_type: str, item_id: str = None): self.item_type = item_type @@ -58,6 +61,28 @@ def get_aggregated_content(self): return self.create_item_resource(is_done=True) +class FunctionCallInterruptItemResourceHelper(ItemResourceHelper): + def __init__(self, + item_id: str = None, + hitl_helper: HumanInTheLoopHelper = None, + interrupt: Interrupt = None): + super().__init__(project_models.ItemType.FUNCTION_CALL, item_id) + self.hitl_helper = hitl_helper + self.interrupt = interrupt + + def create_item_resource(self, is_done: bool): + item_resource = self.hitl_helper.convert_interrupt(self.interrupt) + if not is_done: + item_resource.arguments = "" + return item_resource + + def add_aggregate_content(self, item): + pass + + def get_aggregated_content(self): + return self.create_item_resource(is_done=True) + + class FunctionCallOutputItemResourceHelper(ItemResourceHelper): def __init__(self, item_id: str = None, call_id: str = None): super().__init__(project_models.ItemType.FUNCTION_CALL_OUTPUT, item_id) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py index dbaed3ac9258..ef4e14c0ba95 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py @@ -3,25 +3,37 @@ # --------------------------------------------------------- # pylint: disable=unused-argument,name-too-long # mypy: ignore-errors -from typing import List +from typing import List, Union from langchain_core import messages as langgraph_messages from langchain_core.messages import AnyMessage +from langgraph.types import Interrupt from azure.ai.agentserver.core.models import projects as project_models from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from ..human_in_the_loop_helper import HumanInTheLoopHelper from ..utils import extract_function_call from . import ResponseEventGenerator, StreamEventState class ResponseFunctionCallArgumentEventGenerator(ResponseEventGenerator): - def __init__(self, logger, parent: ResponseEventGenerator, item_id, message_id, output_index: int): + def __init__( + self, + logger, + parent: ResponseEventGenerator, + item_id, + message_id, + output_index: int, + *, + hitl_helper: HumanInTheLoopHelper = None, + ): super().__init__(logger, parent) self.item_id = item_id self.output_index = output_index self.aggregated_content = "" self.message_id = message_id + self.hitl_helper = hitl_helper def try_process_message( self, message, context: AgentRunContext, stream_state: StreamEventState @@ -55,21 +67,28 @@ def on_start( return True, [] def process( - self, message: AnyMessage, run_details, stream_state: StreamEventState + self, message: Union[langgraph_messages.AnyMessage, Interrupt], run_details, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: - tool_call = self.get_tool_call_info(message) - if tool_call: - _, _, argument = extract_function_call(tool_call) - if argument: - argument_delta_event = project_models.ResponseFunctionCallArgumentsDeltaEvent( - item_id=self.item_id, - output_index=self.output_index, - delta=argument, - sequence_number=stream_state.sequence_number, - ) - stream_state.sequence_number += 1 - self.aggregated_content += argument - return True, self, [argument_delta_event] + if self.should_end(message): + return False, self, [] + + argument = None + if isinstance(message, Interrupt): + _, _, argument = self.hitl_helper.interrupt_to_function_call(message) if self.hitl_helper else (None, None, None) + else: + tool_call = self.get_tool_call_info(message) + if tool_call: + _, _, argument = extract_function_call(tool_call) + if argument: + argument_delta_event = project_models.ResponseFunctionCallArgumentsDeltaEvent( + item_id=self.item_id, + output_index=self.output_index, + delta=argument, + sequence_number=stream_state.sequence_number, + ) + stream_state.sequence_number += 1 + self.aggregated_content += argument + return True, self, [argument_delta_event] return False, self, [] def has_finish_reason(self, message: AnyMessage) -> bool: @@ -106,7 +125,7 @@ def on_end( self.parent.aggregate_content(self.aggregated_content) # pass aggregated content to parent return [done_event] - def get_tool_call_info(self, message: langgraph_messages.AnyMessage): + def get_tool_call_info(self, message: Union[langgraph_messages.AnyMessage, Interrupt]): if isinstance(message, langgraph_messages.AIMessageChunk): if message.tool_call_chunks: if len(message.tool_call_chunks) > 1: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py index a2606d1541c1..8394854cd493 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py @@ -3,29 +3,34 @@ # --------------------------------------------------------- # pylint: disable=unused-argument # mypy: ignore-errors -from typing import List +from typing import List, Union from langchain_core import messages as langgraph_messages from langchain_core.messages import AnyMessage +from langgraph.types import Interrupt from azure.ai.agentserver.core.models import projects as project_models from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext from azure.ai.agentserver.core.server.common.id_generator.id_generator import IdGenerator +from ..human_in_the_loop_helper import HumanInTheLoopHelper from . import ResponseEventGenerator, StreamEventState, item_resource_helpers from .response_content_part_event_generator import ResponseContentPartEventGenerator from .response_function_call_argument_event_generator import ResponseFunctionCallArgumentEventGenerator class ResponseOutputItemEventGenerator(ResponseEventGenerator): - def __init__(self, logger, parent: ResponseEventGenerator, output_index: int, message_id: str = None): + def __init__(self, logger, parent: ResponseEventGenerator, + output_index: int, message_id: str = None, + *, hitl_helper: HumanInTheLoopHelper = None): super().__init__(logger, parent) self.output_index = output_index self.message_id = message_id self.item_resource_helper = None + self.hitl_helper = hitl_helper def try_process_message( - self, message: AnyMessage, context: AgentRunContext, stream_state: StreamEventState + self, message: Union[AnyMessage, Interrupt, None], context: AgentRunContext, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: is_processed = False next_processor = self @@ -65,7 +70,7 @@ def try_process_message( return is_processed, next_processor, events def on_start( - self, event: AnyMessage, context: AgentRunContext, stream_state: StreamEventState + self, event: Union[AnyMessage, Interrupt], context: AgentRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if self.started: return True, [] @@ -83,7 +88,7 @@ def on_start( self.started = True return True, [item_added_event] - def should_end(self, event: AnyMessage) -> bool: + def should_end(self, event: Union[AnyMessage, Interrupt]) -> bool: if event is None: self.logger.info("Received None event, ending processor.") return True @@ -92,7 +97,7 @@ def should_end(self, event: AnyMessage) -> bool: return False def on_end( - self, message: AnyMessage, context: AgentRunContext, stream_state: StreamEventState + self, message: Union[AnyMessage, Interrupt], context: AgentRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if not self.started: # should not happen return [] @@ -112,7 +117,7 @@ def aggregate_content(self, content): # aggregate content from child processor self.item_resource_helper.add_aggregate_content(content) - def try_create_item_resource_helper(self, event: AnyMessage, id_generator: IdGenerator): # pylint: disable=too-many-return-statements + def try_create_item_resource_helper(self, event: Union[AnyMessage, Interrupt], id_generator: IdGenerator): # pylint: disable=too-many-return-statements if isinstance(event, langgraph_messages.AIMessageChunk) and event.tool_call_chunks: self.item_resource_helper = item_resource_helpers.FunctionCallItemResourceHelper( item_id=id_generator.generate_function_call_id(), tool_call=event.tool_call_chunks[0] @@ -143,9 +148,16 @@ def try_create_item_resource_helper(self, event: AnyMessage, id_generator: IdGen item_id=id_generator.generate_function_output_id(), call_id=event.tool_call_id ) return True + if isinstance(event, Interrupt): + self.item_resource_helper = item_resource_helpers.FunctionCallInterruptItemResourceHelper( + item_id=id_generator.generate_function_output_id(), + hitl_helper=self.hitl_helper, + interrupt=event, + ) + return True return False - def create_child_processor(self, message: AnyMessage): + def create_child_processor(self, message: Union[AnyMessage, Interrupt]): if self.item_resource_helper is None: return None if self.item_resource_helper.item_type == project_models.ItemType.FUNCTION_CALL: @@ -155,6 +167,7 @@ def create_child_processor(self, message: AnyMessage): item_id=self.item_resource_helper.item_id, message_id=message.id, output_index=self.output_index, + hitl_helper=self.hitl_helper, ) if self.item_resource_helper.item_type == project_models.ItemType.MESSAGE: return ResponseContentPartEventGenerator( diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py index a6ad1cba7396..a99e541cff11 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py @@ -24,8 +24,9 @@ class ResponseStreamEventGenerator(ResponseEventGenerator): Response stream event generator. """ - def __init__(self, logger, parent): + def __init__(self, logger, parent, *, hitl_helper=None): super().__init__(logger, parent) + self.hitl_helper = hitl_helper self.aggregated_contents: List[project_models.ItemResource] = [] def on_start( @@ -87,7 +88,7 @@ def try_process_message( if message: # create a child processor next_processor = ResponseOutputItemEventGenerator( - self.logger, self, len(self.aggregated_contents), message.id + self.logger, self, len(self.aggregated_contents), message.id, hitl_helper=self.hitl_helper ) return is_processed, next_processor, events diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/.env-template b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/.env-template new file mode 100644 index 000000000000..92b9c812a686 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/.env-template @@ -0,0 +1,4 @@ +AZURE_OPENAI_API_KEY= +AZURE_OPENAI_ENDPOINT=https://.cognitiveservices.azure.com/ +OPENAI_API_VERSION=2025-03-01-preview +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/README.md b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/README.md new file mode 100644 index 000000000000..e5c5aa3734a2 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/README.md @@ -0,0 +1,133 @@ +# LangGraph Human-in-the-Loop Sample + +This sample demonstrates how to create an intelligent agent with human-in-the-loop capabilities using LangGraph and Azure AI Agent Server. The agent can interrupt its execution to ask for human input when needed, making it ideal for scenarios requiring human judgment or additional information. + +## Overview + +The sample consists of several key components: + +- **LangGraph Agent**: An AI agent that can intelligently decide when to ask humans for input during task execution +- **Human Interrupt Mechanism**: Uses LangGraph's `interrupt()` function to pause execution and wait for human feedback +- **Azure AI Agent Server Adapter**: Hosts the LangGraph agent as an HTTP service + +## Files Description + +- `main.py` - The main LangGraph agent implementation with human-in-the-loop capabilities +- `requirements.txt` - Python dependencies for the sample + + + +## Setup + +1. **Environment Configuration** + + Create a `.env` file in this directory with your Azure OpenAI configuration: + ```env + AZURE_OPENAI_API_KEY=your_api_key_here + AZURE_OPENAI_ENDPOINT=your_endpoint_here + AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=gpt-4o + ``` + + Alternatively, if you're using Azure Identity (without API key), ensure your Azure credentials are configured. + +2. **Install Dependencies** + + Install the required Python packages: + ```bash + pip install python-dotenv + pip install azure-ai-agentserver[langgraph] + ``` + +## Usage + +### Running the Agent Server + +1. Start the agent server: + ```bash + python main.py + ``` + The server will start on `http://localhost:8088` + +### Making Requests + +#### Initial Request (Triggering Human Input) + +Send a request that will cause the agent to ask for human input: + +```bash +curl -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{ + "agent": { + "name": "local_agent", + "type": "agent_reference" + }, + "stream": false, + "input": "Ask the user where they are, then look up the weather there." + }' +``` + +**Response Structure:** + +The agent will respond with an interrupt request: + +```json +{ + "conversation": { + "id": "conv_abc123..." + }, + "output": [ + { + "type": "function_call", + "name": "__hosted_agent_adapter_interrupt__", + "call_id": "call_xyz789...", + "arguments": "{\"question\": \"Where are you located?\"}" + } + ] +} +``` + +#### Providing Human Feedback + +Resume the conversation by providing the human's response: + +```bash +curl -X POST http://localhost:8088/responses \ + -H "Content-Type: application/json" \ + -d '{ + "agent": { + "name": "local_agent", + "type": "agent_reference" + }, + "stream": false, + "input": [ + { + "type": "function_call_output", + "call_id": "call_xyz789...", + "output": "{\"resume\": \"San Francisco\"}" + } + ], + "conversation": { + "id": "conv_abc123..." + } + }' +``` + +**Final Response:** + +The agent will continue execution and provide the final result: + +```json +{ + "conversation": { + "id": "conv_abc123..." + }, + "output": [ + { + "type": "message", + "role": "assistant", + "content": "I looked up the weather in San Francisco. Result: It's sunny in San Francisco." + } + ] +} +``` diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/main.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/main.py new file mode 100644 index 000000000000..aed1ff51049e --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/main.py @@ -0,0 +1,204 @@ +""" +Human-in-the-Loop Agent Example + +This sample demonstrates how to create a LangGraph agent that can interrupt +execution to ask for human input when needed. The agent uses Azure OpenAI +and includes a custom tool for asking human questions. +""" + +import os + +from dotenv import load_dotenv +from pydantic import BaseModel + +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from langchain.chat_models import init_chat_model +from langchain_core.messages import ToolMessage +from langchain_core.tools import tool +from langgraph.checkpoint.memory import InMemorySaver +from langgraph.graph import END, START, MessagesState, StateGraph +from langgraph.prebuilt import ToolNode +from langgraph.types import interrupt + +from azure.ai.agentserver.langgraph import from_langgraph + +# Load environment variables +load_dotenv() + + +# ============================================================================= +# Configuration +# ============================================================================= + +DEPLOYMENT_NAME = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "gpt-4o") +API_KEY = os.getenv("AZURE_OPENAI_API_KEY", "") + + +# ============================================================================= +# Model Initialization +# ============================================================================= + +def initialize_llm(): + """Initialize the language model with Azure OpenAI credentials.""" + if API_KEY: + return init_chat_model(f"azure_openai:{DEPLOYMENT_NAME}") + else: + credential = DefaultAzureCredential() + token_provider = get_bearer_token_provider( + credential, "https://cognitiveservices.azure.com/.default" + ) + return init_chat_model( + f"azure_openai:{DEPLOYMENT_NAME}", + azure_ad_token_provider=token_provider, + ) + + +llm = initialize_llm() + +# ============================================================================= +# Tools and Models +# ============================================================================= + +@tool +def search(query: str) -> str: + """ + Call to search the web for information. + + Args: + query: The search query string + + Returns: + Search results as a string + """ + # This is a placeholder for the actual implementation + return f"I looked up: {query}. Result: It's sunny in San Francisco." + + +class AskHuman(BaseModel): + """Schema for asking the human a question.""" + question: str + + +# Initialize tools and bind to model +tools = [search] +tool_node = ToolNode(tools) +model = llm.bind_tools(tools + [AskHuman]) + + +# ============================================================================= +# Graph Nodes +# ============================================================================= + +def call_model(state: MessagesState) -> dict: + """ + Call the language model with the current conversation state. + + Args: + state: The current messages state + + Returns: + Dictionary with the model's response message + """ + messages = state["messages"] + response = model.invoke(messages) + return {"messages": [response]} + + +def ask_human(state: MessagesState) -> dict: + """ + Interrupt execution to ask the human for input. + + Args: + state: The current messages state + + Returns: + Dictionary with the human's response as a tool message + """ + last_message = state["messages"][-1] + tool_call_id = last_message.tool_calls[0]["id"] + ask = AskHuman.model_validate(last_message.tool_calls[0]["args"]) + + # Interrupt and wait for human input + location = interrupt(ask.question) + + tool_message = ToolMessage(tool_call_id=tool_call_id, content=location) + return {"messages": [tool_message]} + + +# ============================================================================= +# Graph Logic +# ============================================================================= + +def should_continue(state: MessagesState) -> str: + """ + Determine the next step in the graph based on the last message. + + Args: + state: The current messages state + + Returns: + The name of the next node to execute, or END to finish + """ + messages = state["messages"] + last_message = messages[-1] + + # If there's no function call, we're done + if not last_message.tool_calls: + return END + + # If asking for human input, route to ask_human node + if last_message.tool_calls[0]["name"] == "AskHuman": + return "ask_human" + + # Otherwise, execute the tool call + return "action" + + +# ============================================================================= +# Graph Construction +# ============================================================================= + +def build_graph() -> StateGraph: + """ + Build and compile the LangGraph workflow. + + Returns: + Compiled StateGraph with checkpointing enabled + """ + workflow = StateGraph(MessagesState) + + # Add nodes + workflow.add_node("agent", call_model) + workflow.add_node("action", tool_node) + workflow.add_node("ask_human", ask_human) + + # Set entry point + workflow.add_edge(START, "agent") + + # Add conditional routing from agent + workflow.add_conditional_edges( + "agent", + should_continue, + path_map=["ask_human", "action", END], + ) + + # Add edges back to agent + workflow.add_edge("action", "agent") + workflow.add_edge("ask_human", "agent") + + # Compile with memory checkpointer + memory = InMemorySaver() + return workflow.compile(checkpointer=memory) + + +app = build_graph() + + +# ============================================================================= +# Main Entry Point +# ============================================================================= + +if __name__ == "__main__": + adapter = from_langgraph(app) + adapter.run() + diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/requirements.txt b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/requirements.txt new file mode 100644 index 000000000000..8c3bb2198ef1 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/human_in_the_loop/requirements.txt @@ -0,0 +1,3 @@ +python-dotenv>=1.0.0 +azure-ai-agentserver-core +azure-ai-agentserver-langgraph From 61eeff1b76d7d5353f749b29281523adf522aa58 Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Wed, 14 Jan 2026 14:01:01 -0800 Subject: [PATCH 46/94] [agentserver][agentframework] support human in the loop (#44560) * add hitl sample * updated af hitl sample. implementing non stream resp converter * debugging stream converter * request converter * remove unused code * updated sample * refine human feedback convert * hitl working with thread * check checkpoint status * remove unused code * add checkpoint storage * add thread persistent classes * update storage api * updated agent thread repository * updated thread repo * updated api for thread repo base class * update sample --- .../agentframework/agent_framework.py | 78 ++++++++-- .../agent_framework_input_converters.py | 73 ++++++++- ...ramework_output_non_streaming_converter.py | 31 +++- ...nt_framework_output_streaming_converter.py | 139 ++++++++++++----- .../models/human_in_the_loop_helper.py | 119 ++++++++++++++ .../agentframework/persistence/__init__.py | 13 ++ .../persistence/agent_thread_repository.py | 146 ++++++++++++++++++ .../samples/human_in_the_loop/.envtemplate | 3 + .../samples/human_in_the_loop/README.md | 112 ++++++++++++++ .../samples/human_in_the_loop/main.py | 121 +++++++++++++++ .../human_in_the_loop/requirements.txt | 5 + .../workflow_as_agent_reflection_pattern.py | 125 +++++++++++++++ .../.envtemplate | 3 + .../human_in_the_loop_ai_function/README.md | 46 ++++++ .../human_in_the_loop_ai_function/main.py | 87 +++++++++++ .../requirements.txt | 5 + 16 files changed, 1049 insertions(+), 57 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/.envtemplate create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/README.md create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/main.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/requirements.txt create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/workflow_as_agent_reflection_pattern.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/.envtemplate create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/README.md create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/requirements.txt diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 233436ac84ea..7607fd392513 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -8,8 +8,9 @@ from typing import TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Optional, Protocol, Union, List import inspect -from agent_framework import AgentProtocol, AIFunction +from agent_framework import AgentProtocol, AIFunction, InMemoryCheckpointStorage from agent_framework.azure import AzureAIClient # pylint: disable=no-name-in-module +from agent_framework._workflows import get_checkpoint_summary from opentelemetry import trace from azure.ai.agentserver.core.client.tools import OAuthConsentRequiredError @@ -27,7 +28,9 @@ AgentFrameworkOutputNonStreamingConverter, ) from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter +from .models.human_in_the_loop_helper import HumanInTheLoopHelper from .models.constants import Constants +from .persistence import AgentThreadRepository from .tool_client import ToolClient if TYPE_CHECKING: @@ -73,7 +76,10 @@ class AgentFrameworkCBAgent(FoundryCBAgent): def __init__(self, agent: Union[AgentProtocol, AgentFactory], credentials: "Optional[AsyncTokenCredential]" = None, - **kwargs: Any): + *, + thread_repository: AgentThreadRepository = None, + **kwargs: Any, + ): """Initialize the AgentFrameworkCBAgent with an AgentProtocol or a factory function. :param agent: The Agent Framework agent to adapt, or a callable that takes ToolClient @@ -81,10 +87,16 @@ def __init__(self, agent: Union[AgentProtocol, AgentFactory], :type agent: Union[AgentProtocol, AgentFactory] :param credentials: Azure credentials for authentication. :type credentials: Optional[AsyncTokenCredential] + :param thread_repository: An optional AgentThreadRepository instance for managing thread messages. + :type thread_repository: Optional[AgentThreadRepository] """ super().__init__(credentials=credentials, **kwargs) # pylint: disable=unexpected-keyword-arg self._agent_or_factory: Union[AgentProtocol, AgentFactory] = agent self._resolved_agent: "Optional[AgentProtocol]" = None + self._hitl_helper = HumanInTheLoopHelper() + self._checkpoint_storage = InMemoryCheckpointStorage() + self._thread_repository = thread_repository + # If agent is already instantiated, use it directly if isinstance(agent, AgentProtocol): self._resolved_agent = agent @@ -187,9 +199,13 @@ def init_tracing(self): self.tracer = trace.get_tracer(__name__) def setup_tracing_with_azure_ai_client(self, project_endpoint: str): + logger.info("Setting up tracing with AzureAIClient") + logger.info(f"Project endpoint for tracing credential: {self.credentials}") async def setup_async(): async with AzureAIClient( - project_endpoint=project_endpoint, async_credential=self.credentials + project_endpoint=project_endpoint, + async_credential=self.credentials, + credential=self.credentials, ) as agent_client: await agent_client.setup_azure_ai_observability() @@ -223,24 +239,54 @@ async def agent_run( # pylint: disable=too-many-statements logger.info(f"Starting agent_run with stream={context.stream}") request_input = context.request.get("input") - - input_converter = AgentFrameworkInputConverter() - message = input_converter.transform_input(request_input) + # TODO: load agent thread from storage and deserialize + agent_thread = None + if self._thread_repository: + agent_thread = await self._thread_repository.get(context.conversation_id) + if agent_thread: + logger.info(f"Loaded agent thread for conversation: {context.conversation_id}") + else: + agent_thread = agent.get_new_thread() + + last_checkpoint = None + if self._checkpoint_storage: + checkpoints = await self._checkpoint_storage.list_checkpoints() + last_checkpoint = checkpoints[-1] if len(checkpoints) > 0 else None + if last_checkpoint: + summary = get_checkpoint_summary(last_checkpoint) + logger.info(f"Last checkpoint summary status: {summary.status}") + if summary.status == "completed": + last_checkpoint = None # Do not resume from completed checkpoints + + input_converter = AgentFrameworkInputConverter(hitl_helper=self._hitl_helper) + message = await input_converter.transform_input( + request_input, + agent_thread=agent_thread, + checkpoint=last_checkpoint) logger.debug(f"Transformed input message type: {type(message)}") # Use split converters if context.stream: logger.info("Running agent in streaming mode") - streaming_converter = AgentFrameworkOutputStreamingConverter(context) + streaming_converter = AgentFrameworkOutputStreamingConverter(context, hitl_helper=self._hitl_helper) async def stream_updates(): try: update_count = 0 - updates = agent.run_stream(message) + updates = agent.run_stream( + message, + thread=agent_thread, + checkpoint_storage=self._checkpoint_storage, + checkpoint_id=last_checkpoint.checkpoint_id if last_checkpoint else None, + ) async for event in streaming_converter.convert(updates): update_count += 1 yield event - + + if agent_thread and self._thread_repository: + await self._thread_repository.set(context.conversation_id, agent_thread) + logger.info(f"Saved agent thread for conversation: {context.conversation_id}") + logger.info("Streaming completed with %d updates", update_count) finally: # Close tool_client if it was created for this request @@ -255,9 +301,16 @@ async def stream_updates(): # Non-streaming path logger.info("Running agent in non-streaming mode") - non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context) - result = await agent.run(message) - logger.debug(f"Agent run completed, result type: {type(result)}") + non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context, hitl_helper=self._hitl_helper) + result = await agent.run(message, + thread=agent_thread, + checkpoint_storage=self._checkpoint_storage, + checkpoint_id=last_checkpoint.checkpoint_id if last_checkpoint else None, + ) + + if agent_thread and self._thread_repository: + await self._thread_repository.set(context.conversation_id, agent_thread) + logger.info(f"Saved agent thread for conversation: {context.conversation_id}") transformed_result = non_streaming_converter.transform_output_for_response(result) logger.info("Agent run and transformation completed successfully") return transformed_result @@ -279,3 +332,4 @@ async def oauth_consent_stream(error=e): logger.debug("Closed tool_client after request processing") except Exception as ex: # pylint: disable=broad-exception-caught logger.warning(f"Error closing tool_client: {ex}") + diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py index 993be43e85c8..28cc76b51c32 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py @@ -5,9 +5,15 @@ # mypy: disable-error-code="no-redef" from __future__ import annotations -from typing import Dict, List - -from agent_framework import ChatMessage, Role as ChatRole +from typing import Dict, List, Optional + +from agent_framework import ( + AgentThread, + ChatMessage, + RequestInfoEvent, + Role as ChatRole, + WorkflowCheckpoint, +) from agent_framework._types import TextContent from azure.ai.agentserver.core.logger import get_logger @@ -21,10 +27,14 @@ class AgentFrameworkInputConverter: Accepts: str | List | None Returns: None | str | ChatMessage | list[str] | list[ChatMessage] """ + def __init__(self, *, hitl_helper=None) -> None: + self._hitl_helper = hitl_helper - def transform_input( + async def transform_input( self, input: str | List[Dict] | None, + agent_thread: Optional[AgentThread] = None, + checkpoint: Optional[WorkflowCheckpoint] = None, ) -> str | ChatMessage | list[str] | list[ChatMessage] | None: logger.debug("Transforming input of type: %s", type(input)) @@ -33,7 +43,28 @@ def transform_input( if isinstance(input, str): return input - + + if self._hitl_helper: + # load pending requests from checkpoint and thread messages if available + thread_messages = [] + if agent_thread: + thread_messages = await agent_thread.message_store.list_messages() + logger.info(f"Thread messages count: {len(thread_messages)}") + pending_hitl_requests = self._hitl_helper.get_pending_hitl_request(thread_messages, checkpoint) + logger.info(f"Pending HitL requests: {list(pending_hitl_requests.keys())}") + hitl_response = self._hitl_helper.validate_and_convert_hitl_response( + input, + pending_requests=pending_hitl_requests) + logger.info(f"HitL response validation result: {[m.to_dict() for m in hitl_response]}") + if hitl_response: + return hitl_response + + return self._transform_input_internal(input) + + def _transform_input_internal( + self, + input: str | List[Dict] | None, + ) -> str | ChatMessage | list[str] | list[ChatMessage] | None: try: if isinstance(input, list): messages: list[str | ChatMessage] = [] @@ -118,3 +149,35 @@ def _extract_input_text(self, content_item: Dict) -> str: if isinstance(text_content, str): return text_content return None # type: ignore + + def _validate_and_convert_hitl_response( + self, + pending_request: Dict, + input: List[Dict], + ) -> Optional[List[ChatMessage]]: + if not self._hitl_helper: + logger.warning("HitL helper not provided; cannot validate HitL response.") + return None + if isinstance(input, str): + logger.warning("Expected list input for HitL response validation, got str.") + return None + if not isinstance(input, list) or len(input) != 1: + logger.warning("Expected single-item list input for HitL response validation.") + return None + + item = input[0] + if item.get("type") != "function_call_output": + logger.warning("Expected function_call_output type for HitL response validation.") + return None + call_id = item.get("call_id", None) + if not call_id or call_id not in pending_request: + logger.warning("Function call output missing valid call_id for HitL response validation.") + return None + request_info = pending_request[call_id] + if isinstance(request_info, dict): + request_info = RequestInfoEvent.from_dict(request_info) + if not isinstance(request_info, RequestInfoEvent): + logger.warning("No valid pending request info found for call_id: %s", call_id) + return None + + return self._hitl_helper.convert_response(request_info, item) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index fbece993305a..08db24adfae0 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -7,7 +7,14 @@ import json from typing import Any, List -from agent_framework import AgentRunResponse, FunctionCallContent, FunctionResultContent, ErrorContent, TextContent +from agent_framework import ( + AgentRunResponse, + FunctionCallContent, + FunctionResultContent, + ErrorContent, + TextContent, +) +from agent_framework._types import UserInputRequestContents from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.logger import get_logger @@ -21,6 +28,7 @@ from .agent_id_generator import AgentIdGenerator from .constants import Constants +from .human_in_the_loop_helper import HumanInTheLoopHelper logger = get_logger() @@ -28,10 +36,11 @@ class AgentFrameworkOutputNonStreamingConverter: # pylint: disable=name-too-long """Non-streaming converter: AgentRunResponse -> OpenAIResponse.""" - def __init__(self, context: AgentRunContext): + def __init__(self, context: AgentRunContext, *, hitl_helper: HumanInTheLoopHelper): self._context = context self._response_id = None self._response_created_at = None + self._hitl_helper = hitl_helper def _ensure_response_started(self) -> None: if not self._response_id: @@ -120,6 +129,8 @@ def _append_content_item(self, content: Any, sink: List[dict], author_name: str) self._append_function_call_content(content, sink, author_name) elif isinstance(content, FunctionResultContent): self._append_function_result_content(content, sink, author_name) + elif isinstance(content, UserInputRequestContents): + self._append_user_input_request_contents(content, sink, author_name) elif isinstance(content, ErrorContent): raise ValueError(f"ErrorContent received: code={content.error_code}, message={content.message}") else: @@ -205,6 +216,22 @@ def _append_function_result_content(self, content: FunctionResultContent, sink: call_id, len(result), ) + + def _append_user_input_request_contents(self, content: UserInputRequestContents, sink: List[dict], author_name: str) -> None: + item_id = self._context.id_generator.generate_function_call_id() + content = self._hitl_helper.convert_user_input_request_content(content) + sink.append( + { + "id": item_id, + "type": "function_call", + "status": "in_progress", + "call_id": content["call_id"], + "name": content["name"], + "arguments": content["arguments"], + "created_by": self._build_created_by(author_name), + } + ) + logger.debug(" added user_input_request item id=%s call_id=%s", item_id, content["call_id"]) # ------------- simple normalization helper ------------------------- def _coerce_result_text(self, value: Any) -> str | dict: diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 92f1cb983e08..23d8702e38ec 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -5,15 +5,17 @@ # mypy: disable-error-code="call-overload,assignment,arg-type,override" from __future__ import annotations +from ast import arguments import datetime import json -from typing import Any, AsyncIterable, List +from typing import Any, AsyncIterable, List, Union from agent_framework import AgentRunResponseUpdate, BaseContent, FunctionApprovalRequestContent, FunctionResultContent from agent_framework._types import ( ErrorContent, FunctionCallContent, TextContent, + UserInputRequestContents, ) from azure.ai.agentserver.core import AgentRunContext @@ -43,6 +45,7 @@ ) from .agent_id_generator import AgentIdGenerator +from .human_in_the_loop_helper import HumanInTheLoopHelper from .utils.async_iter import chunk_on_change, peek @@ -130,46 +133,56 @@ async def convert_contents(self, contents: AsyncIterable[TextContent], author_na class _FunctionCallStreamingState(_BaseStreamingState): """State handler for function_call content during streaming.""" - def __init__(self, parent: AgentFrameworkOutputStreamingConverter): + def __init__(self, + parent: AgentFrameworkOutputStreamingConverter, + hitl_helper: HumanInTheLoopHelper): self._parent = parent + self._hitl_helper = hitl_helper async def convert_contents( - self, contents: AsyncIterable[FunctionCallContent], author_name: str + self, contents: AsyncIterable[Union[FunctionCallContent, UserInputRequestContents]], author_name: str ) -> AsyncIterable[ResponseStreamEvent]: content_by_call_id = {} ids_by_call_id = {} + hitl_contents = [] async for content in contents: - if content.call_id not in content_by_call_id: - item_id = self._parent.context.id_generator.generate_function_call_id() - output_index = self._parent.next_output_index() - - content_by_call_id[content.call_id] = content - ids_by_call_id[content.call_id] = (item_id, output_index) - - yield ResponseOutputItemAddedEvent( - sequence_number=self._parent.next_sequence(), - output_index=output_index, - item=FunctionToolCallItemResource( - id=item_id, - status="in_progress", - call_id=content.call_id, - name=content.name, - arguments="", - created_by=self._parent._build_created_by(author_name), - ), - ) - else: - content_by_call_id[content.call_id] = content_by_call_id[content.call_id] + content - item_id, output_index = ids_by_call_id[content.call_id] - - args_delta = content.arguments if isinstance(content.arguments, str) else "" - yield ResponseFunctionCallArgumentsDeltaEvent( - sequence_number=self._parent.next_sequence(), - item_id=item_id, - output_index=output_index, - delta=args_delta, - ) + if isinstance(content, FunctionCallContent): + if content.call_id not in content_by_call_id: + item_id = self._parent.context.id_generator.generate_function_call_id() + output_index = self._parent.next_output_index() + + content_by_call_id[content.call_id] = content + ids_by_call_id[content.call_id] = (item_id, output_index) + + yield ResponseOutputItemAddedEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=FunctionToolCallItemResource( + id=item_id, + status="in_progress", + call_id=content.call_id, + name=content.name, + arguments="", + created_by=self._parent._build_created_by(author_name), + ), + ) + else: + content_by_call_id[content.call_id] = content_by_call_id[content.call_id] + content + item_id, output_index = ids_by_call_id[content.call_id] + + args_delta = content.arguments if isinstance(content.arguments, str) else "" + yield ResponseFunctionCallArgumentsDeltaEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + delta=args_delta, + ) + + elif isinstance(content, UserInputRequestContents): + converted_hitl = self._hitl_helper.convert_user_input_request_content(content) + if converted_hitl: + hitl_contents.append(converted_hitl) for call_id, content in content_by_call_id.items(): item_id, output_index = ids_by_call_id[call_id] @@ -196,6 +209,51 @@ async def convert_contents( ) self._parent.add_completed_output_item(item) # pylint: disable=protected-access + + # process HITL contents after function calls + for content in hitl_contents: + item_id = self._parent.context.id_generator.generate_function_call_id() + output_index = self._parent.next_output_index() + + yield ResponseOutputItemAddedEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=FunctionToolCallItemResource( + id=item_id, + status="in_progress", + call_id=content["call_id"], + name=content["name"], + arguments="", + created_by=self._parent._build_created_by(author_name), + ), + ) + yield ResponseFunctionCallArgumentsDeltaEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + delta=content["arguments"], + ) + + yield ResponseFunctionCallArgumentsDoneEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + arguments=content["arguments"], + ) + item = FunctionToolCallItemResource( + id=item_id, + status="in_progress", + call_id=content["call_id"], + name=content["name"], + arguments=content["arguments"], + created_by=self._parent._build_created_by(author_name), + ) + yield ResponseOutputItemDoneEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=item, + ) + self._parent.add_completed_output_item(item) class _FunctionCallOutputStreamingState(_BaseStreamingState): @@ -255,7 +313,7 @@ def _to_output(cls, result: Any) -> str: class AgentFrameworkOutputStreamingConverter: """Streaming converter using content-type-specific state handlers.""" - def __init__(self, context: AgentRunContext) -> None: + def __init__(self, context: AgentRunContext, *, hitl_helper: HumanInTheLoopHelper=None) -> None: self._context = context # sequence numbers must start at 0 for first emitted event self._sequence = -1 @@ -263,6 +321,7 @@ def __init__(self, context: AgentRunContext) -> None: self._response_id = self._context.response_id self._response_created_at = None self._completed_output_items: List[ItemResource] = [] + self._hitl_helper = hitl_helper def next_sequence(self) -> int: self._sequence += 1 @@ -294,8 +353,12 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async ) is_changed = ( - lambda a, b: a is not None and b is not None and a.message_id != b.message_id # pylint: disable=unnecessary-lambda-assignment + lambda a, b: a is not None \ + and b is not None \ + and (a.message_id != b.message_id \ + or type(a.content[0]) != type(b.content[0])) # pylint: disable=unnecessary-lambda-assignment ) + async for group in chunk_on_change(updates, is_changed): has_value, first_tuple, contents_with_author = await peek(self._read_updates(group)) if not has_value or first_tuple is None: @@ -306,8 +369,8 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async state = None if isinstance(first, TextContent): state = _TextContentStreamingState(self) - elif isinstance(first, (FunctionCallContent, FunctionApprovalRequestContent)): - state = _FunctionCallStreamingState(self) + elif isinstance(first, (FunctionCallContent, UserInputRequestContents)): + state = _FunctionCallStreamingState(self, self._hitl_helper) elif isinstance(first, FunctionResultContent): state = _FunctionCallOutputStreamingState(self) elif isinstance(first, ErrorContent): @@ -350,7 +413,7 @@ async def _read_updates(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> accepted_types = (TextContent, FunctionCallContent, - FunctionApprovalRequestContent, + UserInputRequestContents, FunctionResultContent, ErrorContent) for content in update.contents: diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py new file mode 100644 index 000000000000..30bb3aa8d9c5 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py @@ -0,0 +1,119 @@ +from typing import Any, List, Dict, Optional, Union +import json + +from agent_framework import ( + ChatMessage, + FunctionResultContent, + FunctionApprovalResponseContent, + RequestInfoEvent, + WorkflowCheckpoint, +) +from agent_framework._types import UserInputRequestContents + +from azure.ai.agentserver.core.logger import get_logger +from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME + +logger = get_logger() + +class HumanInTheLoopHelper: + + def get_pending_hitl_request(self, + thread_messages: List[ChatMessage] = None, + checkpoint: Optional[WorkflowCheckpoint] = None, + ) -> dict[str, Union[RequestInfoEvent, Any]]: + res = {} + # if has checkpoint (WorkflowAgent), find pending request info from checkpoint + if checkpoint and checkpoint.pending_request_info_events: + for call_id, request in checkpoint.pending_request_info_events.items(): + # find if the request is already responded in the thread messages + if isinstance(request, dict): + request_obj = RequestInfoEvent.from_dict(request) + res[call_id] = request_obj + return res + + if not thread_messages: + return res + + # if no checkpoint (Agent), find user input request and pair the feedbacks + for message in thread_messages: + for content in message.contents: + print(f" Content {type(content)}: {content.to_dict()}") + if isinstance(content, UserInputRequestContents): + # is a human input request + function_call = content.function_call + call_id = getattr(function_call, "call_id", "") + if call_id: + res[call_id] = RequestInfoEvent( + source_executor_id="agent", + request_id=call_id, + response_type=None, + request_data=function_call, + ) + elif isinstance(content, FunctionResultContent): + if content.call_id and content.call_id in res: + # remove requests that already got feedback + res.pop(content.call_id) + elif isinstance(content, FunctionApprovalResponseContent): + function_call = content.function_call + call_id = getattr(function_call, "call_id", "") + if call_id and call_id in res: + res.pop(call_id) + return res + + def convert_user_input_request_content(self, content: UserInputRequestContents) -> dict: + function_call = content.function_call + call_id = getattr(function_call, "call_id", "") + arguments = self.convert_request_arguments(getattr(function_call, "arguments", "")) + return { + "call_id": call_id, + "name": HUMAN_IN_THE_LOOP_FUNCTION_NAME, + "arguments": arguments or "", + } + + def convert_request_arguments(self, arguments: Any) -> str: + # convert data to payload if possible + if isinstance(arguments, dict): + data = arguments.get("data") + if data and hasattr(data, "convert_to_payload"): + return data.convert_to_payload() + + if not isinstance(arguments, str): + if hasattr(arguments, "to_dict"): # agentframework models have to_dict method + arguments = arguments.to_dict() + try: + arguments = json.dumps(arguments) + except Exception: # pragma: no cover - fallback # pylint: disable=broad-exception-caught + arguments = str(arguments) + return arguments + + def validate_and_convert_hitl_response(self, + input: str | List[Dict] | None, + pending_requests: Dict[str, RequestInfoEvent], + ) -> List[ChatMessage] | None: + + if input is None or isinstance(input, str): + logger.warning("Expected list input for HitL response validation, got str.") + return None + + res = [] + for item in input: + if item.get("type") != "function_call_output": + logger.warning("Expected function_call_output type for HitL response validation.") + return None + call_id = item.get("call_id", None) + if call_id and call_id in pending_requests: + res.append(self.convert_response(pending_requests[call_id], item)) + return res + + def convert_response(self, hitl_request: RequestInfoEvent, input: Dict) -> ChatMessage: + response_type = hitl_request.response_type + response_result = input.get("output", "") + logger.info(f"response_type {type(response_type)}: %s", response_type) + if response_type and hasattr(response_type, "convert_from_payload"): + response_result = response_type.convert_from_payload(input.get("output", "")) + logger.info(f"response_result {type(response_result)}: %s", response_result) + response_content = FunctionResultContent( + call_id=hitl_request.request_id, + result=response_result, + ) + return ChatMessage(role="tool", contents=[response_content]) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py new file mode 100644 index 000000000000..cd8687e03856 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py @@ -0,0 +1,13 @@ +from .agent_thread_repository import ( + AgentThreadRepository, + InMemoryAgentThreadRepository, + SerializedAgentThreadRepository, + JsonLocalFileAgentThreadRepository, +) + +__all__ = [ + "AgentThreadRepository", + "InMemoryAgentThreadRepository", + "SerializedAgentThreadRepository", + "JsonLocalFileAgentThreadRepository", +] \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py new file mode 100644 index 000000000000..ea3de29385e1 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py @@ -0,0 +1,146 @@ +from abc import ABC, abstractmethod +import json +import os +from typing import Any, Optional + +from agent_framework import AgentThread, AgentProtocol + + +class AgentThreadRepository(ABC): + """AgentThread repository to manage saved thread messages of agent threads and workflows.""" + + @abstractmethod + async def get(self, conversation_id: str) -> Optional[AgentThread]: + """Retrieve the savedt thread for a given conversation ID. + + :param conversation_id: The conversation ID. + :type conversation_id: str + + :return: The saved AgentThread if available, None otherwise. + :rtype: Optional[AgentThread] + """ + + @abstractmethod + async def set(self, conversation_id: str, thread: AgentThread) -> None: + """Save the thread for a given conversation ID. + + :param conversation_id: The conversation ID. + :type conversation_id: str + :param thread: The thread to save. + :type thread: AgentThread + """ + + +class InMemoryAgentThreadRepository(AgentThreadRepository): + """In-memory implementation of AgentThreadRepository.""" + def __init__(self) -> None: + self._inventory: dict[str, AgentThread] = {} + + async def get(self, conversation_id: str) -> Optional[AgentThread]: + """Retrieve the saved thread for a given conversation ID. + + :param conversation_id: The conversation ID. + :type conversation_id: str + + :return: The saved AgentThread if available, None otherwise. + :rtype: Optional[AgentThread] + """ + if conversation_id in self._inventory: + return self._inventory[conversation_id] + return None + + async def set(self, conversation_id: str, thread: AgentThread) -> None: + """Save the thread for a given conversation ID. + + :param conversation_id: The conversation ID. + :type conversation_id: str + :param thread: The thread to save. + :type thread: AgentThread + """ + if conversation_id and thread: + self._inventory[conversation_id] = thread + + +class SerializedAgentThreadRepository(AgentThreadRepository): + """Implementation of AgentThreadRepository with AgentThread serialization.""" + def __init__(self, agent: AgentProtocol) -> None: + """ + Initialize the repository with the given agent. + + :param agent: The agent instance. + :type agent: AgentProtocol + """ + self._agent = agent + + async def get(self, conversation_id: str) -> Optional[AgentThread]: + """Retrieve the saved thread for a given conversation ID. + + :param conversation_id: The conversation ID. + :type conversation_id: str + + :return: The saved AgentThread if available, None otherwise. + :rtype: Optional[AgentThread] + """ + serialized_thread = await self.read_from_storage(conversation_id) + if serialized_thread: + thread = await self._agent.deserialize_thread(serialized_thread) + return thread + return None + + async def set(self, conversation_id: str, thread: AgentThread) -> None: + """Save the thread for a given conversation ID. + + :param conversation_id: The conversation ID. + :type conversation_id: str + :param thread: The thread to save. + :type thread: AgentThread + """ + serialized_thread = await thread.serialize() + await self.write_to_storage(conversation_id, serialized_thread) + + async def read_from_storage(self, conversation_id: str) -> Optional[Any]: + """Read the serialized thread from storage. + + :param conversation_id: The conversation ID. + :type conversation_id: str + + :return: The serialized thread if available, None otherwise. + :rtype: Optional[Any] + """ + raise NotImplementedError("read_from_storage is not implemented.") + + async def write_to_storage(self, conversation_id: str, serialized_thread: Any) -> None: + """Write the serialized thread to storage. + + :param conversation_id: The conversation ID. + :type conversation_id: str + :param serialized_thread: The serialized thread to save. + :type serialized_thread: Any + """ + raise NotImplementedError("write_to_storage is not implemented.") + + +class JsonLocalFileAgentThreadRepository(SerializedAgentThreadRepository): + """Json based implementation of AgentThreadRepository using local file storage.""" + def __init__(self, agent: AgentProtocol, storage_path: str) -> None: + super().__init__(agent) + self._storage_path = storage_path + os.makedirs(self._storage_path, exist_ok=True) + + async def read_from_storage(self, conversation_id: str) -> Optional[Any]: + file_path = self._get_file_path(conversation_id) + if os.path.exists(file_path): + with open(file_path, "r", encoding="utf-8") as f: + serialized_thread = f.read() + if serialized_thread: + return json.loads(serialized_thread) + return None + + async def write_to_storage(self, conversation_id: str, serialized_thread: Any) -> None: + serialized_str = json.dumps(serialized_thread) + file_path = self._get_file_path(conversation_id) + with open(file_path, "w", encoding="utf-8") as f: + f.write(serialized_str) + + def _get_file_path(self, conversation_id: str) -> str: + return os.path.join(self._storage_path, f"{conversation_id}.json") \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/.envtemplate b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/.envtemplate new file mode 100644 index 000000000000..bd646f163bb7 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/.envtemplate @@ -0,0 +1,3 @@ +AZURE_OPENAI_ENDPOINT=https://.cognitiveservices.azure.com/ +OPENAI_API_VERSION=2025-03-01-preview +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/README.md new file mode 100644 index 000000000000..19f0335895e3 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/README.md @@ -0,0 +1,112 @@ +# Human-in-the-Loop Agent Framework Sample + +This sample shows how to host a Microsoft Agent Framework workflow inside Azure AI Agent Server while escalating responses to a real human when the reviewer executor decides that manual approval is required. + +## Prerequisites + +- Python 3.10+ and `pip` +- Azure CLI logged in with `az login` (used by `AzureCliCredential`) +- An Azure OpenAI chat deployment + +### Environment configuration + +1. Copy `.envtemplate` to `.env` and fill in your Azure OpenAI details: + + ``` + AZURE_OPENAI_ENDPOINT=https://.cognitiveservices.azure.com/ + OPENAI_API_VERSION=2025-03-01-preview + AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= + ``` + +2. Create a virtual environment (optional but recommended) and install the sample dependencies: + + ```powershell + python -m venv .venv + . .venv/Scripts/Activate.ps1 + pip install -r requirements.txt + ``` + +`main.py` automatically loads the `.env` file before spinning up the server. + +## Run the workflow-hosted agent + +From this directory start the adapter host (defaults to `http://0.0.0.0:8088`): + +```powershell +python main.py +``` + +The worker executor produces answers, the reviewer executor always escalates to a person, and the adapter exposes the whole workflow through the `/responses` endpoint. + +For Human-in-the-loop scenario, the `HumanReviewRequest` and `ReviewResponse` are classes provided by user. User should provide functions for these classes that allow adapter convert the data to request payloads. + + +## Send a user request + +Save the following payload to `request.json` (adjust the prompt as needed): + +```json +{ + "input": "Plan a 2-day Seattle trip that balances food and museums.", + "stream": false +} +``` + +Then call the server (PowerShell example): + +```pwsh +$body = Get-Content .\request.json -Raw +Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -ContentType "application/json" -Body $body ` + | ConvertTo-Json -Depth 8 +``` + +A human-review interrupt looks like this (formatted for clarity): + +```json +{ + "conversation": {"id": "conv_xxx"}, + "output": [ + { + "type": "function_call", + "name": "__hosted_agent_adapter_hitl__", + "call_id": "call_xxx", + "arguments": "{\"agent_request\":{\"request_id\":\"req_xxx\",...}}" + } + ] +} +``` + +Capture three values from the response: + +- `conversation.id` +- The `call_id` of the `__hosted_agent_adapter_hitl__` function call +- The `request_id` inside the serialized `agent_request` + +## Provide human feedback + +Respond by sending a `function_call_output` message that carries your review decision. Replace the placeholders before running the command: + +```pwsh +$payload = @{ + stream = $false + conversation = @{ id = "" } + input = @( + @{ + type = "function_call_output" + call_id = "" + output = '{"request_id":"","feedback":"Approved","approved":true}' + } + ) +} | ConvertTo-Json -Depth 5 + +Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -ContentType "application/json" -Body $payload ` + | ConvertTo-Json -Depth 8 +``` + +Update the JSON string in `output` to reject a response: + +```json +{"request_id":"","feedback":"Missing safety disclaimers.","approved":false} +``` + +Once the reviewer accepts the human feedback, the worker emits the approved assistant response and the HTTP call returns the final output. diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/main.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/main.py new file mode 100644 index 000000000000..93ac31ffc4f3 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/main.py @@ -0,0 +1,121 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import json +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +from agent_framework.azure import AzureOpenAIChatClient +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +from agent_framework import ( # noqa: E402 + Executor, + InMemoryCheckpointStorage, + WorkflowAgent, + WorkflowBuilder, + WorkflowContext, + handler, + response_handler, +) +from workflow_as_agent_reflection_pattern import ( # noqa: E402 + ReviewRequest, + ReviewResponse, + Worker, +) + +from azure.ai.agentserver.agentframework import from_agent_framework + +load_dotenv() + +@dataclass +class HumanReviewRequest: + """A request message type for escalation to a human reviewer.""" + + agent_request: ReviewRequest | None = None + + def convert_to_payload(self) -> str: + """Convert the HumanReviewRequest to a payload string.""" + request = self.agent_request + payload: dict[str, Any] = {"agent_request": None} + + if request: + payload["agent_request"] = { + "request_id": request.request_id, + "user_messages": [msg.to_dict() for msg in request.user_messages], + "agent_messages": [msg.to_dict() for msg in request.agent_messages], + } + + return json.dumps(payload, indent=2) + + +class ReviewerWithHumanInTheLoop(Executor): + """Executor that always escalates reviews to a human manager.""" + + def __init__(self, worker_id: str, reviewer_id: str | None = None) -> None: + unique_id = reviewer_id or f"{worker_id}-reviewer" + super().__init__(id=unique_id) + self._worker_id = worker_id + + @handler + async def review(self, request: ReviewRequest, ctx: WorkflowContext) -> None: + # In this simplified example, we always escalate to a human manager. + # See workflow_as_agent_reflection.py for an implementation + # using an automated agent to make the review decision. + print(f"Reviewer: Evaluating response for request {request.request_id[:8]}...") + print("Reviewer: Escalating to human manager...") + + # Forward the request to a human manager by sending a HumanReviewRequest. + await ctx.request_info( + request_data=HumanReviewRequest(agent_request=request), + response_type=ReviewResponse, + ) + + @response_handler + async def accept_human_review( + self, + original_request: HumanReviewRequest, + response: ReviewResponse, + ctx: WorkflowContext[ReviewResponse], + ) -> None: + # Accept the human review response and forward it back to the Worker. + print(f"Reviewer: Accepting human review for request {response.request_id[:8]}...") + print(f"Reviewer: Human feedback: {response.feedback}") + print(f"Reviewer: Human approved: {response.approved}") + print("Reviewer: Forwarding human review back to worker...") + await ctx.send_message(response, target_id=self._worker_id) + + +def build_agent(): + # Build a workflow with bidirectional communication between Worker and Reviewer, + # and escalation paths for human review. + agent = ( + WorkflowBuilder() + .register_executor( + lambda: Worker( + id="sub-worker", + chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + ), + name="worker", + ) + .register_executor( + lambda: ReviewerWithHumanInTheLoop(worker_id="sub-worker"), + name="reviewer", + ) + .add_edge("worker", "reviewer") # Worker sends requests to Reviewer + .add_edge("reviewer", "worker") # Reviewer sends feedback to Worker + .set_start_executor("worker") + .build() + .as_agent() # Convert workflow into an agent interface + ) + return agent + + +async def run_agent() -> None: + """Run the workflow inside the agent server adapter.""" + agent = build_agent() + await from_agent_framework(agent).run_async() + +if __name__ == "__main__": + asyncio.run(run_agent()) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/requirements.txt b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/requirements.txt new file mode 100644 index 000000000000..c044abf99eb1 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/requirements.txt @@ -0,0 +1,5 @@ +python-dotenv>=1.0.0 +azure-identity +agent-framework-azure-ai +azure-ai-agentserver-core +azure-ai-agentserver-agentframework diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/workflow_as_agent_reflection_pattern.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/workflow_as_agent_reflection_pattern.py new file mode 100644 index 000000000000..168d90cdd93d --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/workflow_as_agent_reflection_pattern.py @@ -0,0 +1,125 @@ +# Copyright (c) Microsoft. All rights reserved. + +from dataclasses import dataclass +import json +from uuid import uuid4 + +from agent_framework import ( + AgentRunResponseUpdate, + AgentRunUpdateEvent, + ChatClientProtocol, + ChatMessage, + Contents, + Executor, + Role, + WorkflowContext, + handler, +) + +@dataclass +class ReviewRequest: + """Structured request passed from Worker to Reviewer for evaluation.""" + + request_id: str + user_messages: list[ChatMessage] + agent_messages: list[ChatMessage] + + +@dataclass +class ReviewResponse: + """Structured response from Reviewer back to Worker.""" + + request_id: str + feedback: str + approved: bool + + @staticmethod + def convert_from_payload(payload: str) -> "ReviewResponse": + """Convert a JSON payload string to a ReviewResponse instance.""" + data = json.loads(payload) + return ReviewResponse( + request_id=data["request_id"], + feedback=data["feedback"], + approved=data["approved"], + ) + + +PendingReviewState = tuple[ReviewRequest, list[ChatMessage]] + + +class Worker(Executor): + """Executor that generates responses and incorporates feedback when necessary.""" + + def __init__(self, id: str, chat_client: ChatClientProtocol) -> None: + super().__init__(id=id) + self._chat_client = chat_client + self._pending_requests: dict[str, PendingReviewState] = {} + + @handler + async def handle_user_messages(self, user_messages: list[ChatMessage], ctx: WorkflowContext[ReviewRequest]) -> None: + print("Worker: Received user messages, generating response...") + + # Initialize chat with system prompt. + messages = [ChatMessage(role=Role.SYSTEM, text="You are a helpful assistant.")] + messages.extend(user_messages) + + print("Worker: Calling LLM to generate response...") + response = await self._chat_client.get_response(messages=messages) + print(f"Worker: Response generated: {response.messages[-1].text}") + + # Add agent messages to context. + messages.extend(response.messages) + + # Create review request and send to Reviewer. + request = ReviewRequest(request_id=str(uuid4()), user_messages=user_messages, agent_messages=response.messages) + print(f"Worker: Sending response for review (ID: {request.request_id[:8]})") + await ctx.send_message(request) + + # Track request for possible retry. + self._pending_requests[request.request_id] = (request, messages) + + @handler + async def handle_review_response(self, review: ReviewResponse, ctx: WorkflowContext[ReviewRequest]) -> None: + print(f"Worker: Received review for request {review.request_id[:8]} - Approved: {review.approved}") + + if review.request_id not in self._pending_requests: + raise ValueError(f"Unknown request ID in review: {review.request_id}") + + request, messages = self._pending_requests.pop(review.request_id) + + if review.approved: + print("Worker: Response approved. Emitting to external consumer...") + contents: list[Contents] = [] + for message in request.agent_messages: + contents.extend(message.contents) + + # Emit approved result to external consumer via AgentRunUpdateEvent. + await ctx.add_event( + AgentRunUpdateEvent(self.id, data=AgentRunResponseUpdate(contents=contents, role=Role.ASSISTANT)) + ) + return + + print(f"Worker: Response not approved. Feedback: {review.feedback}") + print("Worker: Regenerating response with feedback...") + + # Incorporate review feedback. + messages.append(ChatMessage(role=Role.SYSTEM, text=review.feedback)) + messages.append( + ChatMessage(role=Role.SYSTEM, text="Please incorporate the feedback and regenerate the response.") + ) + messages.extend(request.user_messages) + + # Retry with updated prompt. + response = await self._chat_client.get_response(messages=messages) + print(f"Worker: New response generated: {response.messages[-1].text}") + + messages.extend(response.messages) + + # Send updated request for re-review. + new_request = ReviewRequest( + request_id=review.request_id, user_messages=request.user_messages, agent_messages=response.messages + ) + await ctx.send_message(new_request) + + # Track new request for further evaluation. + self._pending_requests[new_request.request_id] = (new_request, messages) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/.envtemplate b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/.envtemplate new file mode 100644 index 000000000000..bd646f163bb7 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/.envtemplate @@ -0,0 +1,3 @@ +AZURE_OPENAI_ENDPOINT=https://.cognitiveservices.azure.com/ +OPENAI_API_VERSION=2025-03-01-preview +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/README.md new file mode 100644 index 000000000000..6a07f73e03c7 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/README.md @@ -0,0 +1,46 @@ +pip install -e src/adapter/python +# Agent Framework Sample + +This sample demonstrates how to use the agents hosting adapter with Microsoft Agent Framework. + +## Prerequisites + +> **Azure sign-in:** Run `az login` before starting the sample so `DefaultAzureCredential` can acquire a CLI token. + +### Environment Variables + +Copy `.envtemplate` to `.env` and supply: + +``` +AZURE_OPENAI_ENDPOINT=https://.cognitiveservices.azure.com/ +OPENAI_API_VERSION=2025-03-01-preview +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= +``` + +## Running the Sample + +Follow these steps from this folder: + +1) Start the agent server (defaults to 0.0.0.0:8088): + +```bash +python main.py +``` + +2) Send a non-streaming request (returns a single JSON response): + +```bash +curl -sS \ + -H "Content-Type: application/json" \ + -X POST http://localhost:8088/responses \ + -d "{\"input\":\"Add a dentist appointment on March 15th\",\"stream\":false}" +``` + +3) Send a streaming request (server-sent events). Use -N to disable curl buffering: + +```bash +curl -N \ + -H "Content-Type: application/json" \ + -X POST http://localhost:8088/responses \ + -d "{\"input\":\"Add a dentist appointment on March 15th\",\"stream\":true}" +``` diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py new file mode 100644 index 000000000000..094a22826c20 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py @@ -0,0 +1,87 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from typing import Annotated, Any, Collection +from dotenv import load_dotenv +import json + +load_dotenv() + +from agent_framework import ChatAgent, ChatMessage, ChatMessageStoreProtocol, FunctionResultContent, ai_function +from agent_framework._threads import ChatMessageStoreState +from agent_framework._types import UserInputRequestContents +from agent_framework.azure import AzureOpenAIChatClient + +from azure.ai.agentserver.agentframework import from_agent_framework +from azure.ai.agentserver.agentframework.persistence.agent_thread_repository import JsonLocalFileAgentThreadRepository + +""" +Tool Approvals with Threads + +This sample demonstrates using tool approvals with threads. +With threads, you don't need to manually pass previous messages - +the thread stores and retrieves them automatically. +""" + + +class CustomChatMessageStore(ChatMessageStoreProtocol): + """Implementation of custom chat message store. + In real applications, this can be an implementation of relational database or vector store.""" + + def __init__(self, messages: Collection[ChatMessage] | None = None) -> None: + self._messages: list[ChatMessage] = [] + if messages: + self._messages.extend(messages) + + async def add_messages(self, messages: Collection[ChatMessage]) -> None: + self._messages.extend(messages) + + async def list_messages(self) -> list[ChatMessage]: + return self._messages + + @classmethod + async def deserialize(cls, serialized_store_state: Any, **kwargs: Any) -> "CustomChatMessageStore": + """Create a new instance from serialized state.""" + store = cls() + await store.update_from_state(serialized_store_state, **kwargs) + return store + + async def update_from_state(self, serialized_store_state: Any, **kwargs: Any) -> None: + """Update this instance from serialized state.""" + if serialized_store_state: + state = ChatMessageStoreState.from_dict(serialized_store_state, **kwargs) + if state.messages: + self._messages.extend(state.messages) + + async def serialize(self, **kwargs: Any) -> Any: + """Serialize this store's state.""" + state = ChatMessageStoreState(messages=self._messages) + return state.to_dict(**kwargs) + + +@ai_function(approval_mode="always_require") +def add_to_calendar( + event_name: Annotated[str, "Name of the event"], date: Annotated[str, "Date of the event"] +) -> str: + """Add an event to the calendar (requires approval).""" + print(f">>> EXECUTING: add_to_calendar(event_name='{event_name}', date='{date}')") + return f"Added '{event_name}' to calendar on {date}" + + +def build_agent(): + return ChatAgent( + chat_client=AzureOpenAIChatClient(), + name="CalendarAgent", + instructions="You are a helpful calendar assistant.", + tools=[add_to_calendar], + chat_message_store_factory=CustomChatMessageStore, + ) + + +async def main() -> None: + agent = build_agent() + thread_repository = JsonLocalFileAgentThreadRepository(agent=agent, storage_path="./thread_storage") + await from_agent_framework(agent, thread_repository=thread_repository).run_async() + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/requirements.txt b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/requirements.txt new file mode 100644 index 000000000000..c044abf99eb1 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/requirements.txt @@ -0,0 +1,5 @@ +python-dotenv>=1.0.0 +azure-identity +agent-framework-azure-ai +azure-ai-agentserver-core +azure-ai-agentserver-agentframework From e65bbcdf60a4cb0b6ceba131b0c1a54fa68a513b Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Wed, 14 Jan 2026 14:46:24 -0800 Subject: [PATCH 47/94] [agentserver][agentframework] update for AF observability breaking change (#44616) * update for AF breaking changes, make observability setup compatible for both newer and earlier versions * set upper version limit for agent-framework packages * get otlp protocol from env var --- .../agentframework/agent_framework.py | 89 +++++++++++++++---- .../pyproject.toml | 4 +- .../azure/ai/agentserver/core/constants.py | 1 + 3 files changed, 76 insertions(+), 18 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 7607fd392513..ed0eb699be92 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -180,34 +180,91 @@ async def _resolve_agent_for_request(self, context: AgentRunContext): def init_tracing(self): try: - exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) + otel_exporter_endpoint = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) + otel_exporter_protocol = os.environ.get(AdapterConstants.OTEL_EXPORTER_OTLP_PROTOCOL) app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT) - if exporter or app_insights_conn_str: - from agent_framework.observability import setup_observability - - setup_observability( - enable_sensitive_data=True, - otlp_endpoint=exporter, - applicationinsights_connection_string=app_insights_conn_str, - ) + exporters = [] + if otel_exporter_endpoint: + otel_exporter = self._create_otlp_exporter(otel_exporter_endpoint, protocol=otel_exporter_protocol) + if otel_exporter: + exporters.append(otel_exporter) + if app_insights_conn_str: + appinsight_exporter = self._create_application_insights_exporter(app_insights_conn_str) + if appinsight_exporter: + exporters.append(appinsight_exporter) + + if exporters and self._setup_observability(exporters): + logger.info("Observability setup completed with provided exporters.") elif project_endpoint: - self.setup_tracing_with_azure_ai_client(project_endpoint) + self._setup_tracing_with_azure_ai_client(project_endpoint) except Exception as e: logger.warning(f"Failed to initialize tracing: {e}", exc_info=True) self.tracer = trace.get_tracer(__name__) - def setup_tracing_with_azure_ai_client(self, project_endpoint: str): - logger.info("Setting up tracing with AzureAIClient") - logger.info(f"Project endpoint for tracing credential: {self.credentials}") + def _create_application_insights_exporter(self, connection_string): + try: + from azure.monitor.opentelemetry.exporter import AzureMonitorTraceExporter + + return AzureMonitorTraceExporter.from_connection_string(connection_string) + except Exception as e: + logger.error(f"Failed to create Application Insights exporter: {e}", exc_info=True) + return None + + def _create_otlp_exporter(self, endpoint, protocol=None): + try: + if protocol and protocol.lower() in ("http", "http/protobuf", "http/json"): + from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter + + return OTLPSpanExporter(endpoint=endpoint) + else: + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter + + return OTLPSpanExporter(endpoint=endpoint) + except Exception as e: + logger.error(f"Failed to create OTLP exporter: {e}", exc_info=True) + return None + + def _setup_observability(self, exporters) -> bool: + setup_function = self._try_import_configure_otel_providers() + if not setup_function: # fallback to early version with setup_observability + setup_function = self._try_import_setup_observability() + if setup_function: + setup_function( + enable_sensitive_data=True, + exporters=exporters, + ) + return True + return False + + def _try_import_setup_observability(self): + try: + from agent_framework.observability import setup_observability + return setup_observability + except ImportError as e: + logger.warning(f"Failed to import setup_observability: {e}") + return None + + def _try_import_configure_otel_providers(self): + try: + from agent_framework.observability import configure_otel_providers + return configure_otel_providers + except ImportError as e: + logger.warning(f"Failed to import configure_otel_providers: {e}") + return None + + def _setup_tracing_with_azure_ai_client(self, project_endpoint: str): async def setup_async(): async with AzureAIClient( project_endpoint=project_endpoint, - async_credential=self.credentials, - credential=self.credentials, + async_credential=self.credentials, + credential=self.credentials, # Af breaking change, keep both for compatibility ) as agent_client: - await agent_client.setup_azure_ai_observability() + try: + await agent_client.configure_azure_monitor() + except AttributeError: + await agent_client.setup_azure_ai_observability() import asyncio diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index a86c9eef2648..47ffdce2c23e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -21,8 +21,8 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-ai-agentserver-core>=1.0.0b7", - "agent-framework-azure-ai>=1.0.0b251112", - "agent-framework-core>=1.0.0b251112", + "agent-framework-azure-ai>=1.0.0b251112,<=1.0.0b260107", + "agent-framework-core>=1.0.0b251112,<=1.0.0b260107", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", ] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py index 7844eee8d155..ae6c04235ff1 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py @@ -8,6 +8,7 @@ class Constants: AGENT_NAME = "AGENT_NAME" AGENT_PROJECT_RESOURCE_ID = "AGENT_PROJECT_NAME" OTEL_EXPORTER_ENDPOINT = "OTEL_EXPORTER_ENDPOINT" + OTEL_EXPORTER_OTLP_PROTOCOL = "OTEL_EXPORTER_OTLP_PROTOCOL" AGENT_LOG_LEVEL = "AGENT_LOG_LEVEL" AGENT_DEBUG_ERRORS = "AGENT_DEBUG_ERRORS" ENABLE_APPLICATION_INSIGHTS_LOGGER = "AGENT_APP_INSIGHTS_ENABLED" From 1e58922b1519f26f5fa5683f4ccc54ca40fbf587 Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Mon, 19 Jan 2026 11:09:08 -0800 Subject: [PATCH 48/94] [AgentServer][agent-frameowork] Add CheckpointRepository and refined samples (#44688) * refined human in the loop with workflowagent sampel * updated readme for agent framework samples * add agent framework checkpoint repository * refined checkpoint loading * fix ignore * updated sample readme * fix unit tests --- .../agentframework/agent_framework.py | 66 +++++--- .../agent_framework_input_converters.py | 3 +- .../agentframework/persistence/__init__.py | 8 + .../persistence/checkpoint_repository.py | 62 ++++++++ .../dev_requirements.txt | 3 +- .../samples/human_in_the_loop/README.md | 112 -------------- .../human_in_the_loop_ai_function/.gitignore | 1 + .../human_in_the_loop_ai_function/README.md | 94 +++++++++--- .../human_in_the_loop_ai_function/main.py | 5 +- .../.envtemplate | 0 .../README.md | 142 ++++++++++++++++++ .../main.py | 13 +- .../requirements.txt | 0 .../workflow_as_agent_reflection_pattern.py | 14 ++ .../test_agent_framework_input_converter.py | 45 +++--- 15 files changed, 384 insertions(+), 184 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/README.md create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/.gitignore rename sdk/agentserver/azure-ai-agentserver-agentframework/samples/{human_in_the_loop => human_in_the_loop_workflow_agent}/.envtemplate (100%) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md rename sdk/agentserver/azure-ai-agentserver-agentframework/samples/{human_in_the_loop => human_in_the_loop_workflow_agent}/main.py (91%) rename sdk/agentserver/azure-ai-agentserver-agentframework/samples/{human_in_the_loop => human_in_the_loop_workflow_agent}/requirements.txt (100%) rename sdk/agentserver/azure-ai-agentserver-agentframework/samples/{human_in_the_loop => human_in_the_loop_workflow_agent}/workflow_as_agent_reflection_pattern.py (89%) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index ed0eb699be92..ed0b8b2ea64c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -8,7 +8,7 @@ from typing import TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Optional, Protocol, Union, List import inspect -from agent_framework import AgentProtocol, AIFunction, InMemoryCheckpointStorage +from agent_framework import AgentProtocol, AIFunction, CheckpointStorage, InMemoryCheckpointStorage, WorkflowCheckpoint from agent_framework.azure import AzureAIClient # pylint: disable=no-name-in-module from agent_framework._workflows import get_checkpoint_summary from opentelemetry import trace @@ -30,7 +30,7 @@ from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter from .models.human_in_the_loop_helper import HumanInTheLoopHelper from .models.constants import Constants -from .persistence import AgentThreadRepository +from .persistence import AgentThreadRepository, CheckpointRepository from .tool_client import ToolClient if TYPE_CHECKING: @@ -78,6 +78,7 @@ def __init__(self, agent: Union[AgentProtocol, AgentFactory], credentials: "Optional[AsyncTokenCredential]" = None, *, thread_repository: AgentThreadRepository = None, + checkpoint_repository: CheckpointRepository = None, **kwargs: Any, ): """Initialize the AgentFrameworkCBAgent with an AgentProtocol or a factory function. @@ -94,7 +95,7 @@ def __init__(self, agent: Union[AgentProtocol, AgentFactory], self._agent_or_factory: Union[AgentProtocol, AgentFactory] = agent self._resolved_agent: "Optional[AgentProtocol]" = None self._hitl_helper = HumanInTheLoopHelper() - self._checkpoint_storage = InMemoryCheckpointStorage() + self._checkpoint_repository = checkpoint_repository self._thread_repository = thread_repository # If agent is already instantiated, use it directly @@ -296,24 +297,28 @@ async def agent_run( # pylint: disable=too-many-statements logger.info(f"Starting agent_run with stream={context.stream}") request_input = context.request.get("input") - # TODO: load agent thread from storage and deserialize + agent_thread = None + checkpoint_storage = None + last_checkpoint = None if self._thread_repository: agent_thread = await self._thread_repository.get(context.conversation_id) if agent_thread: logger.info(f"Loaded agent thread for conversation: {context.conversation_id}") else: agent_thread = agent.get_new_thread() - - last_checkpoint = None - if self._checkpoint_storage: - checkpoints = await self._checkpoint_storage.list_checkpoints() - last_checkpoint = checkpoints[-1] if len(checkpoints) > 0 else None + + if self._checkpoint_repository: + checkpoint_storage = await self._checkpoint_repository.get_or_create(context.conversation_id) + last_checkpoint = await self._get_latest_checkpoint(checkpoint_storage) if last_checkpoint: summary = get_checkpoint_summary(last_checkpoint) - logger.info(f"Last checkpoint summary status: {summary.status}") if summary.status == "completed": + logger.warning("Last checkpoint is completed. Will not resume from it.") last_checkpoint = None # Do not resume from completed checkpoints + if last_checkpoint: + await self._load_checkpoint(agent, last_checkpoint, checkpoint_storage) + logger.info(f"Loaded checkpoint with ID: {last_checkpoint.checkpoint_id}") input_converter = AgentFrameworkInputConverter(hitl_helper=self._hitl_helper) message = await input_converter.transform_input( @@ -333,15 +338,14 @@ async def stream_updates(): updates = agent.run_stream( message, thread=agent_thread, - checkpoint_storage=self._checkpoint_storage, - checkpoint_id=last_checkpoint.checkpoint_id if last_checkpoint else None, + checkpoint_storage=checkpoint_storage, ) async for event in streaming_converter.convert(updates): update_count += 1 yield event if agent_thread and self._thread_repository: - await self._thread_repository.set(context.conversation_id, agent_thread) + await self._thread_repository.set(context.conversation_id, agent_thread, checkpoint_storage) logger.info(f"Saved agent thread for conversation: {context.conversation_id}") logger.info("Streaming completed with %d updates", update_count) @@ -359,11 +363,11 @@ async def stream_updates(): # Non-streaming path logger.info("Running agent in non-streaming mode") non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context, hitl_helper=self._hitl_helper) - result = await agent.run(message, + result = await agent.run( + message, thread=agent_thread, - checkpoint_storage=self._checkpoint_storage, - checkpoint_id=last_checkpoint.checkpoint_id if last_checkpoint else None, - ) + checkpoint_storage=checkpoint_storage, + ) if agent_thread and self._thread_repository: await self._thread_repository.set(context.conversation_id, agent_thread) @@ -389,4 +393,30 @@ async def oauth_consent_stream(error=e): logger.debug("Closed tool_client after request processing") except Exception as ex: # pylint: disable=broad-exception-caught logger.warning(f"Error closing tool_client: {ex}") - + + async def _get_latest_checkpoint(self, + checkpoint_storage: CheckpointStorage) -> Optional[Any]: + """Load the latest checkpoint from the given storage. + + :param checkpoint_storage: The checkpoint storage to load from. + :type checkpoint_storage: CheckpointStorage + + :return: The latest checkpoint if available, None otherwise. + :rtype: Optional[Any] + """ + checkpoints = await checkpoint_storage.list_checkpoints() + if checkpoints: + latest_checkpoint = max(checkpoints, key=lambda cp: cp.timestamp) + return latest_checkpoint + return None + + async def _load_checkpoint(self, agent: AgentProtocol, + checkpoint: WorkflowCheckpoint, + checkpoint_storage: CheckpointStorage) -> None: + """Load the checkpoint data from the given WorkflowCheckpoint. + + :param checkpoint: The WorkflowCheckpoint to load data from. + :type checkpoint: WorkflowCheckpoint + """ + await agent.run(checkpoint_id=checkpoint.checkpoint_id, + checkpoint_storage=checkpoint_storage) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py index 28cc76b51c32..a6eefb2b1740 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py @@ -47,9 +47,8 @@ async def transform_input( if self._hitl_helper: # load pending requests from checkpoint and thread messages if available thread_messages = [] - if agent_thread: + if agent_thread and agent_thread.message_store: thread_messages = await agent_thread.message_store.list_messages() - logger.info(f"Thread messages count: {len(thread_messages)}") pending_hitl_requests = self._hitl_helper.get_pending_hitl_request(thread_messages, checkpoint) logger.info(f"Pending HitL requests: {list(pending_hitl_requests.keys())}") hitl_response = self._hitl_helper.validate_and_convert_hitl_response( diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py index cd8687e03856..40ce839556bd 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py @@ -4,10 +4,18 @@ SerializedAgentThreadRepository, JsonLocalFileAgentThreadRepository, ) +from .checkpoint_repository import ( + CheckpointRepository, + InMemoryCheckpointRepository, + FileCheckpointRepository, +) __all__ = [ "AgentThreadRepository", "InMemoryAgentThreadRepository", "SerializedAgentThreadRepository", "JsonLocalFileAgentThreadRepository", + "CheckpointRepository", + "InMemoryCheckpointRepository", + "FileCheckpointRepository", ] \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py new file mode 100644 index 000000000000..0bc89a4b5377 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py @@ -0,0 +1,62 @@ +from abc import ABC, abstractmethod +import os +from typing import Any, Optional + +from agent_framework import ( + CheckpointStorage, + InMemoryCheckpointStorage, + FileCheckpointStorage, +) + +class CheckpointRepository(ABC): + """Repository interface for storing and retrieving checkpoints.""" + @abstractmethod + async def get_or_create(self, conversation_id: str) -> Optional[CheckpointStorage]: + """Retrieve or create a checkpoint storage by conversation ID. + + :param conversation_id: The unique identifier for the checkpoint. + :type conversation_id: str + :return: The CheckpointStorage if found or created, None otherwise. + :rtype: Optional[CheckpointStorage] + """ + + +class InMemoryCheckpointRepository(CheckpointRepository): + """In-memory implementation of CheckpointRepository.""" + def __init__(self) -> None: + self._inventory: dict[str, CheckpointStorage] = {} + + async def get_or_create(self, conversation_id: str) -> Optional[CheckpointStorage]: + """Retrieve or create a checkpoint storage by conversation ID. + + :param conversation_id: The unique identifier for the checkpoint. + :type conversation_id: str + :return: The CheckpointStorage if found or created, None otherwise. + :rtype: Optional[CheckpointStorage] + """ + if conversation_id not in self._inventory: + self._inventory[conversation_id] = InMemoryCheckpointStorage() + return self._inventory[conversation_id] + + +class FileCheckpointRepository(CheckpointRepository): + """File-based implementation of CheckpointRepository.""" + def __init__(self, storage_path: str) -> None: + self._storage_path = storage_path + self._inventory: dict[str, CheckpointStorage] = {} + os.makedirs(self._storage_path, exist_ok=True) + + async def get_or_create(self, conversation_id: str) -> Optional[CheckpointStorage]: + """Retrieve or create a checkpoint storage by conversation ID. + + :param conversation_id: The unique identifier for the checkpoint. + :type conversation_id: str + :return: The CheckpointStorage if found or created, None otherwise. + :rtype: Optional[CheckpointStorage] + """ + if conversation_id not in self._inventory: + self._inventory[conversation_id] = FileCheckpointStorage(self._get_dir_path(conversation_id)) + return self._inventory[conversation_id] + + def _get_dir_path(self, conversation_id: str) -> str: + return os.path.join(self._storage_path, conversation_id) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/dev_requirements.txt b/sdk/agentserver/azure-ai-agentserver-agentframework/dev_requirements.txt index 6c036d7fb4e0..8bf2c1695f7f 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/dev_requirements.txt +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/dev_requirements.txt @@ -1,4 +1,5 @@ -e ../../../eng/tools/azure-sdk-tools ../azure-ai-agentserver-core python-dotenv -pywin32; sys_platform == 'win32' \ No newline at end of file +pywin32; sys_platform == 'win32' +pytest-asyncio \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/README.md deleted file mode 100644 index 19f0335895e3..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/README.md +++ /dev/null @@ -1,112 +0,0 @@ -# Human-in-the-Loop Agent Framework Sample - -This sample shows how to host a Microsoft Agent Framework workflow inside Azure AI Agent Server while escalating responses to a real human when the reviewer executor decides that manual approval is required. - -## Prerequisites - -- Python 3.10+ and `pip` -- Azure CLI logged in with `az login` (used by `AzureCliCredential`) -- An Azure OpenAI chat deployment - -### Environment configuration - -1. Copy `.envtemplate` to `.env` and fill in your Azure OpenAI details: - - ``` - AZURE_OPENAI_ENDPOINT=https://.cognitiveservices.azure.com/ - OPENAI_API_VERSION=2025-03-01-preview - AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= - ``` - -2. Create a virtual environment (optional but recommended) and install the sample dependencies: - - ```powershell - python -m venv .venv - . .venv/Scripts/Activate.ps1 - pip install -r requirements.txt - ``` - -`main.py` automatically loads the `.env` file before spinning up the server. - -## Run the workflow-hosted agent - -From this directory start the adapter host (defaults to `http://0.0.0.0:8088`): - -```powershell -python main.py -``` - -The worker executor produces answers, the reviewer executor always escalates to a person, and the adapter exposes the whole workflow through the `/responses` endpoint. - -For Human-in-the-loop scenario, the `HumanReviewRequest` and `ReviewResponse` are classes provided by user. User should provide functions for these classes that allow adapter convert the data to request payloads. - - -## Send a user request - -Save the following payload to `request.json` (adjust the prompt as needed): - -```json -{ - "input": "Plan a 2-day Seattle trip that balances food and museums.", - "stream": false -} -``` - -Then call the server (PowerShell example): - -```pwsh -$body = Get-Content .\request.json -Raw -Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -ContentType "application/json" -Body $body ` - | ConvertTo-Json -Depth 8 -``` - -A human-review interrupt looks like this (formatted for clarity): - -```json -{ - "conversation": {"id": "conv_xxx"}, - "output": [ - { - "type": "function_call", - "name": "__hosted_agent_adapter_hitl__", - "call_id": "call_xxx", - "arguments": "{\"agent_request\":{\"request_id\":\"req_xxx\",...}}" - } - ] -} -``` - -Capture three values from the response: - -- `conversation.id` -- The `call_id` of the `__hosted_agent_adapter_hitl__` function call -- The `request_id` inside the serialized `agent_request` - -## Provide human feedback - -Respond by sending a `function_call_output` message that carries your review decision. Replace the placeholders before running the command: - -```pwsh -$payload = @{ - stream = $false - conversation = @{ id = "" } - input = @( - @{ - type = "function_call_output" - call_id = "" - output = '{"request_id":"","feedback":"Approved","approved":true}' - } - ) -} | ConvertTo-Json -Depth 5 - -Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -ContentType "application/json" -Body $payload ` - | ConvertTo-Json -Depth 8 -``` - -Update the JSON string in `output` to reject a response: - -```json -{"request_id":"","feedback":"Missing safety disclaimers.","approved":false} -``` - -Once the reviewer accepts the human feedback, the worker emits the approved assistant response and the HTTP call returns the final output. diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/.gitignore b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/.gitignore new file mode 100644 index 000000000000..c21dfd88c196 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/.gitignore @@ -0,0 +1 @@ +thread_storage \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/README.md index 6a07f73e03c7..165dac4eb483 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/README.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/README.md @@ -1,15 +1,16 @@ -pip install -e src/adapter/python -# Agent Framework Sample +# Human-in-the-Loop Agent Framework Sample -This sample demonstrates how to use the agents hosting adapter with Microsoft Agent Framework. +This sample demonstrates how to host a Microsoft Agent Framework agent inside Azure AI Agent Server and escalate function-call responses to a human reviewer whenever approval is required. It is adapted from the [agent-framework sample](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/tools/ai_function_with_approval_and_threads.py). ## Prerequisites -> **Azure sign-in:** Run `az login` before starting the sample so `DefaultAzureCredential` can acquire a CLI token. +- Python 3.10+ and `pip` +- Azure CLI logged in with `az login` (used by `AzureCliCredential`) +- An Azure OpenAI chat deployment -### Environment Variables +### Environment configuration -Copy `.envtemplate` to `.env` and supply: +Copy `.envtemplate` to `.env` and fill in your Azure OpenAI details: ``` AZURE_OPENAI_ENDPOINT=https://.cognitiveservices.azure.com/ @@ -17,30 +18,79 @@ OPENAI_API_VERSION=2025-03-01-preview AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= ``` -## Running the Sample +`main.py` automatically loads the `.env` file before spinning up the server. -Follow these steps from this folder: +## Thread persistence -1) Start the agent server (defaults to 0.0.0.0:8088): +The sample uses `JsonLocalFileAgentThreadRepository` for `AgentThread` persistence, creating a JSON file per conversation ID under the sample directory. An in-memory alternative, `InMemoryAgentThreadRepository`, lives in the `azure.ai.agentserver.agentframework.persistence` module. -```bash +To store thread messages elsewhere, inherit from `SerializedAgentThreadRepository` and override the following methods: + +- `read_from_storage(self, conversation_id: str) -> Optional[Any]` +- `write_to_storage(self, conversation_id: str, serialized_thread: Any)` + +These hooks let you plug in any backing store (blob storage, databases, etc.) without changing the rest of the sample. + +## Run the hosted agent + +From this directory start the adapter host (defaults to `http://0.0.0.0:8088`): + +```powershell python main.py ``` -2) Send a non-streaming request (returns a single JSON response): +## Send a user request + +Send a `POST` request to `http://0.0.0.0:8088/responses`: -```bash -curl -sS \ - -H "Content-Type: application/json" \ - -X POST http://localhost:8088/responses \ - -d "{\"input\":\"Add a dentist appointment on March 15th\",\"stream\":false}" +```json +{ + "agent": {"name": "local_agent", "type": "agent_reference"}, + "stream": false, + "input": "Add a dentist appointment on March 15th", +} ``` -3) Send a streaming request (server-sent events). Use -N to disable curl buffering: +A response that requires a human decision looks like this (formatted for clarity): -```bash -curl -N \ - -H "Content-Type: application/json" \ - -X POST http://localhost:8088/responses \ - -d "{\"input\":\"Add a dentist appointment on March 15th\",\"stream\":true}" +```json +{ + "conversation": {"id": ""}, + "output": [ + {...}, + { + "type": "function_call", + "id": "func_xxx", + "name": "__hosted_agent_adapter_hitl__", + "call_id": "", + "arguments": "{\"event_name\":\"Dentist Appointment\",\"date\":\"2024-03-15\"}" + } + ] +} ``` + +Capture these values from the response; you will need them to provide feedback: + +- `conversation.id` +- The `call_id` associated with `__hosted_agent_adapter_hitl__` + +## Provide human feedback + +Send a `CreateResponse` request with a `function_call_output` message that contains your decision (`approve`, `reject`, or additional guidance). Replace the placeholders before running the command: + +```json +{ + "agent": {"name": "local_agent", "type": "agent_reference"}, + "stream": false, + "conversation": {"id": ""}, + "input": [ + { + "call_id": "", + "output": "approve", + "type": "function_call_output", + } + ] +} +``` + +When the reviewer response is accepted, the worker emits the approved assistant response and the HTTP call returns the final output. diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py index 094a22826c20..56dc5fca8860 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py @@ -3,13 +3,11 @@ import asyncio from typing import Annotated, Any, Collection from dotenv import load_dotenv -import json load_dotenv() -from agent_framework import ChatAgent, ChatMessage, ChatMessageStoreProtocol, FunctionResultContent, ai_function +from agent_framework import ChatAgent, ChatMessage, ChatMessageStoreProtocol, ai_function from agent_framework._threads import ChatMessageStoreState -from agent_framework._types import UserInputRequestContents from agent_framework.azure import AzureOpenAIChatClient from azure.ai.agentserver.agentframework import from_agent_framework @@ -23,7 +21,6 @@ the thread stores and retrieves them automatically. """ - class CustomChatMessageStore(ChatMessageStoreProtocol): """Implementation of custom chat message store. In real applications, this can be an implementation of relational database or vector store.""" diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/.envtemplate b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/.envtemplate similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/.envtemplate rename to sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/.envtemplate diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md new file mode 100644 index 000000000000..aed6deee122a --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md @@ -0,0 +1,142 @@ +# Human-in-the-Loop Agent Framework Sample + +This sample shows how to host a Microsoft Agent Framework workflow inside Azure AI Agent Server while escalating responses to a real human when the reviewer executor decides that manual approval is required. The sample is created by [agent-framework](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py). + +## Prerequisites + +- Python 3.10+ and `pip` +- Azure CLI logged in with `az login` (used by `AzureCliCredential`) +- An Azure OpenAI chat deployment + +### Environment configuration + +Copy `.envtemplate` to `.env` and fill in your Azure OpenAI details: + +``` +AZURE_OPENAI_ENDPOINT=https://.cognitiveservices.azure.com/ +OPENAI_API_VERSION=2025-03-01-preview +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= +``` + +`main.py` automatically loads the `.env` file before spinning up the server. + +## Checkpoint persistence + +The adapter uses the `CheckpointRepository` interface to retrieve a `CheckpointStorage` per conversation. The storage keeps serialized workflow state so long-running conversations can resume after the host restarts. + +- Use `InMemoryCheckpointRepository()` for quick demos; checkpoints vanish once the Python process exits. +- `FileCheckpointRepository("")` is implemented to persist checkpoints in files. +- To back checkpoints with a different store (Redis, Blob, etc.), subclass `CheckpointRepository`, implement `get_or_create`, and pass your repository instance to `from_agent_framework(..., checkpoint_repository=)`. + +## Run the hosted workflow agent + +For Human-in-the-loop scenario, the `HumanReviewRequest` and `ReviewResponse` are provided by user. User should provide functions for these classes that allow adapter convert the data to request payloads. + +```py +@dataclass +class HumanReviewRequest: + """A request message type for escalation to a human reviewer.""" + + agent_request: ReviewRequest | None = None + + def convert_to_payload(self) -> str: # called by adapter + """Convert the HumanReviewRequest to a payload string.""" + request = self.agent_request + payload: dict[str, Any] = {"agent_request": None} + + if request: + payload["agent_request"] = { + "request_id": request.request_id, + "user_messages": [msg.to_dict() for msg in request.user_messages], + "agent_messages": [msg.to_dict() for msg in request.agent_messages], + } + + return json.dumps(payload) +``` + +```py +@dataclass +class ReviewResponse: + """Structured response from Reviewer back to Worker.""" + + request_id: str + feedback: str + approved: bool + + @staticmethod + def convert_from_payload(payload: str) -> "ReviewResponse": + """Convert a JSON payload string to a ReviewResponse instance.""" + data = json.loads(payload) + return ReviewResponse( + request_id=data["request_id"], + feedback=data["feedback"], + approved=data["approved"], + ) +``` + +From this directory start the adapter host (defaults to `http://0.0.0.0:8088`): + +```powershell +python main.py +``` + +The worker executor produces answers, the reviewer executor always escalates to a person, and the adapter exposes the whole workflow through the `/responses` endpoint. + + + +## Send a user request + +Send a `POST` request to `http://0.0.0.0:8088/responses` + +```json +{ + "agent": {"name": "local_agent", "type": "agent_reference"}, + "stream": false, + "input": "Write code for parallel reading 1 million Files on disk and write to a sorted output file.", +} +``` + +A response with human-review request looks like this (formatted for clarity): + +```json +{ + "conversation": {"id": ""}, + "output": [ + {...}, + { + "type": "function_call", + "id": "func_xxx", + "name": "__hosted_agent_adapter_hitl__", + "call_id": "", + "arguments": "{\"agent_request\":{\"request_id\":\"\",...}}" + } + ] +} +``` + +Capture three values from the response: + +- `conversation.id` +- The `call_id` of the `__hosted_agent_adapter_hitl__` function call +- The `request_id` inside the serialized `agent_request` + +## Provide human feedback + +Respond by sending a `CreateResponse` request with `function_call_output` message that carries your review decision. Replace the placeholders before running the command: + +```json +{ + "agent": {"name": "local_agent", "type": "agent_reference"}, + "stream": false, + "convseration": {"id": ""}, + "input": [ + { + "call_id": "", + "output": "{\"request_id\":\"\",\"approved\":true}", + "type": "function_call_output", + } + ] +} +``` + +Once the reviewer accepts the human feedback, the worker emits the approved assistant response and the HTTP call returns the final output. diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/main.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/main.py similarity index 91% rename from sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/main.py rename to sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/main.py index 93ac31ffc4f3..b5deef145920 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/main.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/main.py @@ -3,7 +3,6 @@ import asyncio import json from dataclasses import dataclass -from pathlib import Path from typing import Any from agent_framework.azure import AzureOpenAIChatClient @@ -12,8 +11,6 @@ from agent_framework import ( # noqa: E402 Executor, - InMemoryCheckpointStorage, - WorkflowAgent, WorkflowBuilder, WorkflowContext, handler, @@ -26,6 +23,7 @@ ) from azure.ai.agentserver.agentframework import from_agent_framework +from azure.ai.agentserver.agentframework.persistence import InMemoryCheckpointRepository load_dotenv() @@ -86,8 +84,7 @@ async def accept_human_review( print("Reviewer: Forwarding human review back to worker...") await ctx.send_message(response, target_id=self._worker_id) - -def build_agent(): +def build_agent(tools): # Build a workflow with bidirectional communication between Worker and Reviewer, # and escalation paths for human review. agent = ( @@ -114,8 +111,10 @@ def build_agent(): async def run_agent() -> None: """Run the workflow inside the agent server adapter.""" - agent = build_agent() - await from_agent_framework(agent).run_async() + await from_agent_framework( + build_agent, # pass WorkflowAgent factory to adapter, build a new instance per request + checkpoint_repository=InMemoryCheckpointRepository(), # for checkpoint storage + ).run_async() if __name__ == "__main__": asyncio.run(run_agent()) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/requirements.txt b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/requirements.txt similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/requirements.txt rename to sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/requirements.txt diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/workflow_as_agent_reflection_pattern.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/workflow_as_agent_reflection_pattern.py similarity index 89% rename from sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/workflow_as_agent_reflection_pattern.py rename to sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/workflow_as_agent_reflection_pattern.py index 168d90cdd93d..ef2a286ba174 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop/workflow_as_agent_reflection_pattern.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/workflow_as_agent_reflection_pattern.py @@ -123,3 +123,17 @@ async def handle_review_response(self, review: ReviewResponse, ctx: WorkflowCont # Track new request for further evaluation. self._pending_requests[new_request.request_id] = (new_request, messages) + + async def on_checkpoint_save(self) -> dict: + """ + Persist pending requests during checkpointing. + In memory implementation for demonstration purposes. + """ + return {"pending_requests": self._pending_requests} + + async def on_checkpoint_restore(self, data: dict) -> None: + """ + Load pending requests from checkpoint data. + In memory implementation for demonstration purposes. + """ + self._pending_requests = data.get("pending_requests", {}) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_agent_framework_input_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_agent_framework_input_converter.py index 3dab36131f8d..d52d0e481bd2 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_agent_framework_input_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_agent_framework_input_converter.py @@ -16,26 +16,30 @@ def converter() -> AgentFrameworkInputConverter: @pytest.mark.unit -def test_transform_none_returns_none(converter: AgentFrameworkInputConverter) -> None: - assert converter.transform_input(None) is None +@pytest.mark.asyncio +async def test_transform_none_returns_none(converter: AgentFrameworkInputConverter) -> None: + assert await converter.transform_input(None) is None @pytest.mark.unit -def test_transform_string_returns_same(converter: AgentFrameworkInputConverter) -> None: - assert converter.transform_input("hello") == "hello" +@pytest.mark.asyncio +async def test_transform_string_returns_same(converter: AgentFrameworkInputConverter) -> None: + assert await converter.transform_input("hello") == "hello" @pytest.mark.unit -def test_transform_implicit_user_message_with_string(converter: AgentFrameworkInputConverter) -> None: +@pytest.mark.asyncio +async def test_transform_implicit_user_message_with_string(converter: AgentFrameworkInputConverter) -> None: payload = [{"content": "How are you?"}] - result = converter.transform_input(payload) + result = await converter.transform_input(payload) assert result == "How are you?" @pytest.mark.unit -def test_transform_implicit_user_message_with_input_text_list(converter: AgentFrameworkInputConverter) -> None: +@pytest.mark.asyncio +async def test_transform_implicit_user_message_with_input_text_list(converter: AgentFrameworkInputConverter) -> None: payload = [ { "content": [ @@ -45,13 +49,14 @@ def test_transform_implicit_user_message_with_input_text_list(converter: AgentFr } ] - result = converter.transform_input(payload) + result = await converter.transform_input(payload) assert result == "Hello world" @pytest.mark.unit -def test_transform_explicit_message_returns_chat_message(converter: AgentFrameworkInputConverter) -> None: +@pytest.mark.asyncio +async def test_transform_explicit_message_returns_chat_message(converter: AgentFrameworkInputConverter) -> None: payload = [ { "type": "message", @@ -62,7 +67,7 @@ def test_transform_explicit_message_returns_chat_message(converter: AgentFramewo } ] - result = converter.transform_input(payload) + result = await converter.transform_input(payload) assert isinstance(result, ChatMessage) assert result.role == ChatRole.ASSISTANT @@ -70,7 +75,8 @@ def test_transform_explicit_message_returns_chat_message(converter: AgentFramewo @pytest.mark.unit -def test_transform_multiple_explicit_messages_returns_list(converter: AgentFrameworkInputConverter) -> None: +@pytest.mark.asyncio +async def test_transform_multiple_explicit_messages_returns_list(converter: AgentFrameworkInputConverter) -> None: payload = [ { "type": "message", @@ -86,7 +92,7 @@ def test_transform_multiple_explicit_messages_returns_list(converter: AgentFrame }, ] - result = converter.transform_input(payload) + result = await converter.transform_input(payload) assert isinstance(result, list) assert len(result) == 2 @@ -98,7 +104,8 @@ def test_transform_multiple_explicit_messages_returns_list(converter: AgentFrame @pytest.mark.unit -def test_transform_mixed_messages_coerces_to_strings(converter: AgentFrameworkInputConverter) -> None: +@pytest.mark.asyncio +async def test_transform_mixed_messages_coerces_to_strings(converter: AgentFrameworkInputConverter) -> None: payload = [ {"content": "First"}, { @@ -110,21 +117,23 @@ def test_transform_mixed_messages_coerces_to_strings(converter: AgentFrameworkIn }, ] - result = converter.transform_input(payload) + result = await converter.transform_input(payload) assert result == ["First", "Second"] @pytest.mark.unit -def test_transform_invalid_input_type_raises(converter: AgentFrameworkInputConverter) -> None: +@pytest.mark.asyncio +async def test_transform_invalid_input_type_raises(converter: AgentFrameworkInputConverter) -> None: with pytest.raises(Exception) as exc_info: - converter.transform_input({"content": "invalid"}) + await converter.transform_input({"content": "invalid"}) assert "Unsupported input type" in str(exc_info.value) @pytest.mark.unit -def test_transform_skips_non_text_entries(converter: AgentFrameworkInputConverter) -> None: +@pytest.mark.asyncio +async def test_transform_skips_non_text_entries(converter: AgentFrameworkInputConverter) -> None: payload = [ { "content": [ @@ -134,6 +143,6 @@ def test_transform_skips_non_text_entries(converter: AgentFrameworkInputConverte } ] - result = converter.transform_input(payload) + result = await converter.transform_input(payload) assert result is None From d279c58f13489af5cba30fe42131ecf34739e33f Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Mon, 19 Jan 2026 13:27:25 -0800 Subject: [PATCH 49/94] fixed get state without checkpointer (#44758) --- .../ai/agentserver/langgraph/__init__.py | 4 +- .../models/response_api_default_converter.py | 21 ++++++---- .../samples/custom_state/README.md | 26 ++++++------ .../samples/custom_state/main.py | 42 +++++++++++-------- 4 files changed, 53 insertions(+), 40 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index 569166bc3786..13bb7c2af189 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -17,11 +17,11 @@ def from_langgraph( agent, credentials: Optional["AsyncTokenCredential"] = None, - state_converter: Optional["models.LanggraphStateConverter"] = None, + converter: Optional["models.response_api_converter.ResponseAPIConverter"] = None, **kwargs: Any ) -> "LangGraphAdapter": - return LangGraphAdapter(agent, credentials=credentials, state_converter=state_converter, **kwargs) + return LangGraphAdapter(agent, credentials=credentials, converter=converter, **kwargs) __all__ = ["from_langgraph", "ToolClient"] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py index e98f914ae4f7..9f2af30c4937 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py @@ -1,5 +1,5 @@ import time -from typing import Any, AsyncGenerator, AsyncIterator, Dict, TypedDict, Union +from typing import Any, AsyncGenerator, AsyncIterator, Dict, Optional, TypedDict, Union from langchain_core.runnables import RunnableConfig from langgraph.types import Command, Interrupt, StateSnapshot @@ -111,17 +111,20 @@ def _convert_request_input(self, context: AgentRunContext, prev_state: StateSnap :rtype: Union[Dict[str, Any], Command] """ hitl_helper = self._create_human_in_the_loop_helper(context) - command = hitl_helper.validate_and_convert_human_feedback( - prev_state, context.request.get("input") - ) - if command is not None: - return command + if hitl_helper: + command = hitl_helper.validate_and_convert_human_feedback( + prev_state, context.request.get("input") + ) + if command is not None: + return command converter = self._create_request_converter(context) return converter.convert() - async def _aget_state(self, context: AgentRunContext) -> StateSnapshot: + async def _aget_state(self, context: AgentRunContext) -> Optional[StateSnapshot]: config = RunnableConfig( configurable={"thread_id": context.conversation_id}, ) - state = await self._graph.aget_state(config=config) - return state + if self._graph.checkpointer: + state = await self._graph.aget_state(config=config) + return state + return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/README.md b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/README.md index 1455a366a0ad..1c78acf0105d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/README.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/README.md @@ -1,10 +1,11 @@ -# Custom LangGraph State Converter (Mini RAG) Sample +# Custom LangGraph Converter (Mini RAG) Sample -This sample demonstrates how to host a LangGraph agent **with a custom internal state** using the `azure.ai.agentserver` SDK by supplying a custom `LanggraphStateConverter` (`RAGStateConverter`). It shows the minimal pattern required to adapt OpenAI Responses-style requests to a LangGraph state and back to an OpenAI-compatible response. +This sample demonstrates how to host a LangGraph agent **with a custom internal state** using the `azure.ai.agentserver` SDK by supplying a custom `ResponseAPIConverter` (`RAGStateConverter`). It shows the minimal pattern required to adapt OpenAI Responses-style requests to a LangGraph state and back to an OpenAI-compatible response. ## What It Shows -- Defining a custom state (`RAGState`) separate from the wire contract. -- Implementing `RAGStateConverter.request_to_state` and `state_to_response` to bridge request ↔ graph ↔ response. +- Defining a custom graph state (`RAGState`) separate from the wire contract. +- Implementing `ResponseAPIRequestConverter` to convert request payload to your graph input +- An overwrite of `ResponseAPIDefaultConverter.convert_response_non_stream` to bridge graph ↔ response. - A simple multi-step graph: intent analysis → optional retrieval → answer generation. - Lightweight retrieval (keyword scoring over an in‑memory knowledge base) with citation annotations added to the assistant message. - Graceful local fallback answer when Azure OpenAI credentials are absent. @@ -13,11 +14,12 @@ This sample demonstrates how to host a LangGraph agent **with a custom internal ## Flow Overview ``` CreateResponse request - -> RAGStateConverter.request_to_state - -> LangGraph executes nodes (analyze → retrieve? → answer) - -> Final state - -> RAGStateConverter.state_to_response - -> OpenAI-style response object + -> RAGStateConverter + -> RAGRequestConverter.convert + -> LangGraph executes nodes (analyze → retrieve? → answer) + -> Final state + -> RAGStateConverter.convert_response_non_stream + -> OpenAI-style response object ``` ## Running @@ -34,7 +36,7 @@ Optional environment variables for live model call: |------|--------| | Real retrieval | Replace `retrieve_docs` with embedding + vector / search backend. | | Richer answers | Introduce prompt templates or additional graph nodes. | -| Multi‑turn memory | Persist prior messages; include truncated history in `request_to_state`. | +| Multi‑turn memory | Persist prior messages; include truncated history in `RAGRequestConverter.convert`. | | Tool / function calls | Add nodes producing tool outputs and incorporate into final response. | | Better citations | Store offsets / URLs and expand annotation objects. | | Streaming support | (See below) | @@ -42,10 +44,10 @@ Optional environment variables for live model call: ### Adding Streaming 1. Allow `stream=True` in requests and propagate a flag into state. 2. Implement `get_stream_mode` (return appropriate mode, e.g. `events`). -3. Implement `state_to_response_stream` to yield `ResponseStreamEvent` objects (lifecycle + deltas) and finalize with a completed event. +3. Implement `convert_response_stream` to yield `ResponseStreamEvent` objects (lifecycle + deltas) and finalize with a completed event. 4. Optionally collect incremental model tokens during `generate_answer`. ## Key Takeaway -A custom `LanggraphStateConverter` is the seam where you map external request contracts to an internal graph-friendly state shape and then format the final (or streamed) result back to the OpenAI Responses schema. Start simple (non‑streaming), then layer retrieval sophistication, memory, tools, and streaming as needed. +A custom `ResponseAPIDefaultConverter` is the seam where you map external request contracts to an internal graph-friendly state shape and then format the final (or streamed) result back to the OpenAI Responses schema. Start simple (non‑streaming), then layer retrieval sophistication, memory, tools, and streaming as needed. Streaming is not supported in this sample out-of-the-box. diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py index 27f5bf0d5ee2..a4d469449d09 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py @@ -13,9 +13,8 @@ from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.models import Response, ResponseStreamEvent from azure.ai.agentserver.langgraph import from_langgraph -from azure.ai.agentserver.langgraph.models import ( - LanggraphStateConverter, -) +from azure.ai.agentserver.langgraph.models.response_api_default_converter import ResponseAPIDefaultConverter +from azure.ai.agentserver.langgraph.models.response_api_request_converter import ResponseAPIRequestConverter load_dotenv() @@ -99,16 +98,14 @@ def retrieve_docs(question: str, k: int = 2) -> List[Dict[str, Any]]: # --------------------------------------------------------------------------- # Custom Converter # --------------------------------------------------------------------------- -class RAGStateConverter(LanggraphStateConverter): - """Converter implementing mini RAG logic (non‑streaming only).""" +class RAGRequestConverter(ResponseAPIRequestConverter): + """Converter implementing mini RAG logic.""" - def get_stream_mode(self, context: AgentRunContext) -> str: # noqa: D401 - if context.request.get("stream", False): # type: ignore[attr-defined] - raise NotImplementedError("Streaming not supported in this sample.") - return "values" + def __init__(self, context: AgentRunContext): + self.context = context - def request_to_state(self, context: AgentRunContext) -> Dict[str, Any]: # noqa: D401 - req = context.request + def convert(self) -> dict: + req = self.context.request user_input = req.get("input") if isinstance(user_input, list): for item in user_input: @@ -136,10 +133,20 @@ def request_to_state(self, context: AgentRunContext) -> Dict[str, Any]: # noqa: } print("initial state:", res) return res + + +class RAGStateConverter(ResponseAPIDefaultConverter): + """Converter implementing mini RAG logic (non‑streaming only).""" + def __init__(self, graph: StateGraph): + super().__init__(graph=graph, + create_request_converter=lambda context: RAGRequestConverter(context)) - def state_to_response( - self, state: Dict[str, Any], context: AgentRunContext - ) -> Response: # noqa: D401 + def get_stream_mode(self, context: AgentRunContext) -> str: # noqa: D401 + if context.request.get("stream", False): # type: ignore[attr-defined] + raise NotImplementedError("Streaming not supported in this sample.") + return "values" + + async def convert_response_non_stream(self, state: Any, context: AgentRunContext) -> Response: final_answer = state.get("final_answer") or "(no answer generated)" print(f"convert state to response, state: {state}") citations = state.get("retrieved", []) @@ -172,7 +179,7 @@ def state_to_response( } return Response(**base) - async def state_to_response_stream( # noqa: D401 + async def convert_response_stream( # noqa: D401 self, stream_state: AsyncIterator[Dict[str, Any] | Any], context: AgentRunContext, @@ -289,5 +296,6 @@ def _build_graph(): # --------------------------------------------------------------------------- if __name__ == "__main__": graph = _build_graph() - converter = RAGStateConverter() - from_langgraph(graph, converter).run() + + converter = RAGStateConverter(graph=graph) + from_langgraph(graph, converter=converter).run() From 6efa6a6458d883fe444f930b5174fbc3baa7fe01 Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Mon, 19 Jan 2026 13:33:06 -0800 Subject: [PATCH 50/94] [agentserver] Refactor tool api (#44534) * foundry hosted mcp ops checkpoint * refine hosted mcp tools; complete refactoring of invoking connected tools * Refine FoundryToolClient * move FoundryToolClientConfiguration * high-level tool api * high-level api * user provider & starlette hook * impl default cached catalog * fix comments * concurrency-safe caching for cached catalog * concurrency-safe caching for cached catalog * default runtime * remove MetadataMapper * remove MetadataMapper * init global AgentServerContext * refine core.tools impl * langgraph tool support * optimize import * create top-level context for langgraph run * refine foundry tools support for agent framework (#44652) * refine foundry tools support for agent framework * remove unnecessary contextprovider * provide customized chatmiddleware * adjust to latest core implementation * minor refine * remove unnecessary filed * fix happy path of react agent + hosted mcp tool * optimize cache: cache value after cached task is done. Make FoundryTool not hashable * optimize cache: reduce loop * simplify cache code * fix key issue of cache * resolve conflict --------- Co-authored-by: melionel --- .../ai/agentserver/agentframework/__init__.py | 24 +- ...agent_framework.py => _agent_framework.py} | 189 ++--- .../agentframework/_foundry_tools.py | 150 ++++ .../agentserver/agentframework/tool_client.py | 183 ---- .../chat_client_with_foundry_tool/README.md | 81 ++ .../chat_client_with_foundry_tool.py | 34 + .../requirements.txt | 0 .../samples/tool_client_example/README.md | 113 --- .../agent_factory_example.py | 109 --- .../azure/ai/agentserver/core/__init__.py | 3 +- .../agentserver/core/application/__init__.py | 12 + .../agentserver/core/application/_builder.py | 5 + .../core/application/_configuration.py | 42 + .../agentserver/core/application/_options.py | 44 + .../core/application/_package_metadata.py | 50 ++ .../agentserver/core/client/tools/__init__.py | 13 - .../agentserver/core/client/tools/_client.py | 195 ----- .../core/client/tools/_configuration.py | 85 -- .../core/client/tools/_exceptions.py | 52 -- .../core/client/tools/_model_base.py | 174 ---- .../core/client/tools/_utils/_model_base.py | 796 ------------------ .../core/client/tools/aio/__init__.py | 13 - .../core/client/tools/aio/_client.py | 207 ----- .../core/client/tools/aio/_configuration.py | 86 -- .../tools/aio/operations/_operations.py | 187 ---- .../client/tools/operations/_operations.py | 551 ------------ .../azure/ai/agentserver/core/logger.py | 3 + .../ai/agentserver/core/server/_context.py | 32 + .../azure/ai/agentserver/core/server/base.py | 135 +-- .../core/server/common/agent_run_context.py | 5 +- .../ai/agentserver/core/tools/__init__.py | 17 + .../ai/agentserver/core/tools/_exceptions.py | 76 ++ .../core/{ => tools}/client/__init__.py | 2 +- .../agentserver/core/tools/client/_client.py | 174 ++++ .../core/tools/client/_configuration.py | 35 + .../agentserver/core/tools/client/_models.py | 552 ++++++++++++ .../core/tools/client/operations/_base.py | 73 ++ .../operations/_foundry_connected_tools.py | 180 ++++ .../operations/_foundry_hosted_mcp_tools.py | 168 ++++ .../core/tools/runtime/__init__.py | 5 + .../core/tools/runtime/_catalog.py | 146 ++++ .../agentserver/core/tools/runtime/_facade.py | 49 ++ .../core/tools/runtime/_invoker.py | 69 ++ .../core/tools/runtime/_resolver.py | 57 ++ .../core/tools/runtime/_runtime.py | 87 ++ .../core/tools/runtime/_starlette.py | 65 ++ .../agentserver/core/tools/runtime/_user.py | 52 ++ .../agentserver/core/tools/utils/__init__.py | 7 + .../core/tools/utils/_name_resolver.py | 37 + .../ai/agentserver/core/utils/__init__.py | 5 + .../ai/agentserver/core/utils/_credential.py | 89 ++ .../azure-ai-agentserver-core/pyproject.toml | 1 + .../custom_mock_agent_with_tools_test.py | 2 +- .../ai/agentserver/langgraph/__init__.py | 16 +- .../ai/agentserver/langgraph/_context.py | 20 + .../ai/agentserver/langgraph/langgraph.py | 215 +---- .../ai/agentserver/langgraph/tool_client.py | 226 ----- .../agentserver/langgraph/tools/__init__.py | 10 + .../agentserver/langgraph/tools/_builder.py | 61 ++ .../langgraph/tools/_chat_model.py | 112 +++ .../agentserver/langgraph/tools/_context.py | 16 + .../langgraph/tools/_middleware.py | 110 +++ .../agentserver/langgraph/tools/_resolver.py | 148 ++++ .../agentserver/langgraph/tools/_tool_node.py | 91 ++ .../tool_client_example/react_agent_tool.py | 47 ++ .../use_tool_client_example.py | 6 +- 66 files changed, 3182 insertions(+), 3417 deletions(-) rename sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/{agent_framework.py => _agent_framework.py} (71%) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/README.md create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py rename sdk/agentserver/azure-ai-agentserver-agentframework/samples/{tool_client_example => chat_client_with_foundry_tool}/requirements.txt (100%) delete mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md delete mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_builder.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_configuration.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_context.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/{ => tools}/client/__init__.py (73%) create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_connected_tools.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_hosted_mcp_tools.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_invoker.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_user.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/_name_resolver.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_context.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/react_agent_tool.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index 2b987cdcf3f5..20a41df7ef73 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -3,22 +3,30 @@ # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) -from typing import TYPE_CHECKING, Optional, Any +from typing import TYPE_CHECKING, Any, Optional -from .agent_framework import AgentFrameworkCBAgent -from .tool_client import ToolClient -from ._version import VERSION +from azure.ai.agentserver.agentframework._version import VERSION +from azure.ai.agentserver.agentframework._agent_framework import AgentFrameworkCBAgent +from azure.ai.agentserver.agentframework._foundry_tools import FoundryToolsChatMiddleware +from azure.ai.agentserver.core.application import PackageMetadata, set_current_app if TYPE_CHECKING: # pragma: no cover from azure.core.credentials_async import AsyncTokenCredential -def from_agent_framework(agent, - credentials: Optional["AsyncTokenCredential"] = None, - **kwargs: Any) -> "AgentFrameworkCBAgent": +def from_agent_framework( + agent, + credentials: Optional["AsyncTokenCredential"] = None, + **kwargs: Any, +) -> "AgentFrameworkCBAgent": return AgentFrameworkCBAgent(agent, credentials=credentials, **kwargs) -__all__ = ["from_agent_framework", "ToolClient"] +__all__ = [ + "from_agent_framework", + "FoundryToolsChatMiddleware", +] __version__ = VERSION + +set_current_app(PackageMetadata.from_dist("azure-ai-agentserver-agentframework")) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py similarity index 71% rename from sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py rename to sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py index ed0b8b2ea64c..8d22cd9ff263 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py @@ -6,14 +6,13 @@ import os from typing import TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Optional, Protocol, Union, List -import inspect from agent_framework import AgentProtocol, AIFunction, CheckpointStorage, InMemoryCheckpointStorage, WorkflowCheckpoint from agent_framework.azure import AzureAIClient # pylint: disable=no-name-in-module from agent_framework._workflows import get_checkpoint_summary from opentelemetry import trace -from azure.ai.agentserver.core.client.tools import OAuthConsentRequiredError +from azure.ai.agentserver.core.tools import OAuthConsentRequiredError from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.constants import Constants as AdapterConstants from azure.ai.agentserver.core.logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger @@ -22,6 +21,7 @@ Response as OpenAIResponse, ResponseStreamEvent, ) +from azure.ai.agentserver.core.models.projects import ResponseErrorEvent, ResponseFailedEvent from .models.agent_framework_input_converters import AgentFrameworkInputConverter from .models.agent_framework_output_non_streaming_converter import ( @@ -31,7 +31,6 @@ from .models.human_in_the_loop_helper import HumanInTheLoopHelper from .models.constants import Constants from .persistence import AgentThreadRepository, CheckpointRepository -from .tool_client import ToolClient if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential @@ -42,12 +41,12 @@ class AgentFactory(Protocol): """Protocol for agent factory functions. - An agent factory is a callable that takes a ToolClient and returns + An agent factory is a callable that takes a list of tools and returns an AgentProtocol, either synchronously or asynchronously. """ def __call__(self, tools: List[AIFunction]) -> Union[AgentProtocol, Awaitable[AgentProtocol]]: - """Create an AgentProtocol using the provided ToolClient. + """Create an AgentProtocol using the provided tools. :param tools: The list of AIFunction tools available to the agent. :type tools: List[AIFunction] @@ -74,7 +73,7 @@ class AgentFrameworkCBAgent(FoundryCBAgent): - Supports both streaming and non-streaming responses based on the `stream` flag. """ - def __init__(self, agent: Union[AgentProtocol, AgentFactory], + def __init__(self, agent: AgentProtocol, credentials: "Optional[AsyncTokenCredential]" = None, *, thread_repository: AgentThreadRepository = None, @@ -92,27 +91,19 @@ def __init__(self, agent: Union[AgentProtocol, AgentFactory], :type thread_repository: Optional[AgentThreadRepository] """ super().__init__(credentials=credentials, **kwargs) # pylint: disable=unexpected-keyword-arg - self._agent_or_factory: Union[AgentProtocol, AgentFactory] = agent - self._resolved_agent: "Optional[AgentProtocol]" = None + self._agent: AgentProtocol = agent self._hitl_helper = HumanInTheLoopHelper() self._checkpoint_repository = checkpoint_repository self._thread_repository = thread_repository - # If agent is already instantiated, use it directly - if isinstance(agent, AgentProtocol): - self._resolved_agent = agent - logger.info(f"Initialized AgentFrameworkCBAgent with agent: {type(agent).__name__}") - else: - logger.info("Initialized AgentFrameworkCBAgent with agent factory") - @property - def agent(self) -> "Optional[AgentProtocol]": + def agent(self) -> "AgentProtocol": """Get the resolved agent. This property provides backward compatibility. :return: The resolved AgentProtocol if available, None otherwise. - :rtype: Optional[AgentProtocol] + :rtype: AgentProtocol """ - return self._resolved_agent + return self._agent def _resolve_stream_timeout(self, request_body: CreateResponse) -> float: """Resolve idle timeout for streaming updates. @@ -134,51 +125,6 @@ def _resolve_stream_timeout(self, request_body: CreateResponse) -> float: env_val = os.getenv(Constants.AGENTS_ADAPTER_STREAM_TIMEOUT_S) return float(env_val) if env_val is not None else float(Constants.DEFAULT_STREAM_TIMEOUT_S) - async def _resolve_agent(self, context: AgentRunContext): - """Resolve the agent if it's a factory function (for single-use/first-time resolution). - Creates a ToolClient and calls the factory function with it. - This is used for the initial resolution. - - :param context: The agent run context containing tools and user information. - :type context: AgentRunContext - """ - if callable(self._agent_or_factory): - logger.debug("Resolving agent from factory function") - - # Create ToolClient with credentials - tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) # pylint: disable=no-member - tool_client_wrapper = ToolClient(tool_client) - tools = await tool_client_wrapper.list_tools() - - result = self._agent_or_factory(tools) - if inspect.iscoroutine(result): - self._resolved_agent = await result - else: - self._resolved_agent = result - - logger.debug("Agent resolved successfully") - else: - # Should not reach here, but just in case - self._resolved_agent = self._agent_or_factory - - async def _resolve_agent_for_request(self, context: AgentRunContext): - - logger.debug("Resolving fresh agent from factory function for request") - - # Create ToolClient with credentials - tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) # pylint: disable=no-member - tool_client_wrapper = ToolClient(tool_client) - tools = await tool_client_wrapper.list_tools() - - result = self._agent_or_factory(tools) - if inspect.iscoroutine(result): - agent = await result - else: - agent = result - - logger.debug("Fresh agent resolved successfully for request") - return agent, tool_client_wrapper - def init_tracing(self): try: otel_exporter_endpoint = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) @@ -238,7 +184,7 @@ def _setup_observability(self, exporters) -> bool: ) return True return False - + def _try_import_setup_observability(self): try: from agent_framework.observability import setup_observability @@ -246,7 +192,7 @@ def _try_import_setup_observability(self): except ImportError as e: logger.warning(f"Failed to import setup_observability: {e}") return None - + def _try_import_configure_otel_providers(self): try: from agent_framework.observability import configure_otel_providers @@ -254,12 +200,12 @@ def _try_import_configure_otel_providers(self): except ImportError as e: logger.warning(f"Failed to import configure_otel_providers: {e}") return None - + def _setup_tracing_with_azure_ai_client(self, project_endpoint: str): async def setup_async(): async with AzureAIClient( project_endpoint=project_endpoint, - async_credential=self.credentials, + async_credential=self.credentials, credential=self.credentials, # Af breaking change, keep both for compatibility ) as agent_client: try: @@ -283,18 +229,7 @@ async def agent_run( # pylint: disable=too-many-statements OpenAIResponse, AsyncGenerator[ResponseStreamEvent, Any], ]: - # Resolve agent - always resolve if it's a factory function to get fresh agent each time - # For factories, get a new agent instance per request to avoid concurrency issues - tool_client = None try: - if callable(self._agent_or_factory): - agent, tool_client = await self._resolve_agent_for_request(context) - elif self._resolved_agent is None: - await self._resolve_agent(context) - agent = self._resolved_agent - else: - agent = self._resolved_agent - logger.info(f"Starting agent_run with stream={context.stream}") request_input = context.request.get("input") @@ -306,8 +241,8 @@ async def agent_run( # pylint: disable=too-many-statements if agent_thread: logger.info(f"Loaded agent thread for conversation: {context.conversation_id}") else: - agent_thread = agent.get_new_thread() - + agent_thread = self.agent.get_new_thread() + if self._checkpoint_repository: checkpoint_storage = await self._checkpoint_repository.get_or_create(context.conversation_id) last_checkpoint = await self._get_latest_checkpoint(checkpoint_storage) @@ -317,7 +252,7 @@ async def agent_run( # pylint: disable=too-many-statements logger.warning("Last checkpoint is completed. Will not resume from it.") last_checkpoint = None # Do not resume from completed checkpoints if last_checkpoint: - await self._load_checkpoint(agent, last_checkpoint, checkpoint_storage) + await self._load_checkpoint(self.agent, last_checkpoint, checkpoint_storage) logger.info(f"Loaded checkpoint with ID: {last_checkpoint.checkpoint_id}") input_converter = AgentFrameworkInputConverter(hitl_helper=self._hitl_helper) @@ -335,43 +270,73 @@ async def agent_run( # pylint: disable=too-many-statements async def stream_updates(): try: update_count = 0 - updates = agent.run_stream( - message, - thread=agent_thread, - checkpoint_storage=checkpoint_storage, - ) - async for event in streaming_converter.convert(updates): - update_count += 1 - yield event - - if agent_thread and self._thread_repository: - await self._thread_repository.set(context.conversation_id, agent_thread, checkpoint_storage) - logger.info(f"Saved agent thread for conversation: {context.conversation_id}") - - logger.info("Streaming completed with %d updates", update_count) + try: + updates = self.agent.run_stream( + message, + thread=agent_thread, + checkpoint_storage=checkpoint_storage, + ) + async for event in streaming_converter.convert(updates): + update_count += 1 + yield event + + if agent_thread and self._thread_repository: + await self._thread_repository.set(context.conversation_id, agent_thread, checkpoint_storage) + logger.info(f"Saved agent thread for conversation: {context.conversation_id}") + + logger.info("Streaming completed with %d updates", update_count) + except OAuthConsentRequiredError as e: + logger.info("OAuth consent required during streaming updates") + if update_count == 0: + async for event in self.respond_with_oauth_consent_astream(context, e): + yield event + else: + # If we've already emitted events, we cannot safely restart a new + # OAuth-consent stream (it would reset sequence numbers). + yield ResponseErrorEvent( + sequence_number=streaming_converter.next_sequence(), + code="server_error", + message=f"OAuth consent required: {e.consent_url}", + param="agent_run", + ) + yield ResponseFailedEvent( + sequence_number=streaming_converter.next_sequence(), + response=streaming_converter._build_response(status="failed"), # pylint: disable=protected-access + ) + except Exception as e: # pylint: disable=broad-exception-caught + logger.error("Unhandled exception during streaming updates: %s", e, exc_info=True) + + # Emit well-formed error events instead of terminating the stream. + yield ResponseErrorEvent( + sequence_number=streaming_converter.next_sequence(), + code="server_error", + message=str(e), + param="agent_run", + ) + yield ResponseFailedEvent( + sequence_number=streaming_converter.next_sequence(), + response=streaming_converter._build_response(status="failed"), # pylint: disable=protected-access + ) finally: - # Close tool_client if it was created for this request - if tool_client is not None: - try: - await tool_client.close() - logger.debug("Closed tool_client after streaming completed") - except Exception as ex: # pylint: disable=broad-exception-caught - logger.warning(f"Error closing tool_client in stream: {ex}") + # No request-scoped resources to clean up here today. + # Keep this block as a hook for future request-scoped cleanup. + pass return stream_updates() # Non-streaming path logger.info("Running agent in non-streaming mode") - non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context, hitl_helper=self._hitl_helper) - result = await agent.run( - message, - thread=agent_thread, - checkpoint_storage=checkpoint_storage, - ) + non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context) + result = await self.agent.run( + message, + thread=agent_thread, + checkpoint_storage=checkpoint_storage) + logger.debug(f"Agent run completed, result type: {type(result)}") if agent_thread and self._thread_repository: await self._thread_repository.set(context.conversation_id, agent_thread) logger.info(f"Saved agent thread for conversation: {context.conversation_id}") + transformed_result = non_streaming_converter.transform_output_for_response(result) logger.info("Agent run and transformation completed successfully") return transformed_result @@ -386,13 +351,7 @@ async def oauth_consent_stream(error=e): return oauth_consent_stream() return await self.respond_with_oauth_consent(context, e) finally: - # Close tool_client if it was created for this request (non-streaming only, streaming handles in generator) - if not context.stream and tool_client is not None: - try: - await tool_client.close() - logger.debug("Closed tool_client after request processing") - except Exception as ex: # pylint: disable=broad-exception-caught - logger.warning(f"Error closing tool_client: {ex}") + pass async def _get_latest_checkpoint(self, checkpoint_storage: CheckpointStorage) -> Optional[Any]: diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py new file mode 100644 index 000000000000..875c1de24e8c --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py @@ -0,0 +1,150 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +import inspect +from typing import Any, Awaitable, Callable, Dict, List, Optional, Sequence + +from agent_framework import AIFunction, ChatContext, ChatOptions, ChatMiddleware +from pydantic import Field, create_model + +from azure.ai.agentserver.core import AgentServerContext +from azure.ai.agentserver.core.logger import get_logger +from azure.ai.agentserver.core.tools import FoundryToolLike, ResolvedFoundryTool + +logger = get_logger() + + +def _attach_signature_from_pydantic_model(func, input_model) -> None: + params = [] + annotations: Dict[str, Any] = {} + + for name, field in input_model.model_fields.items(): + ann = field.annotation or Any + annotations[name] = ann + + default = inspect._empty if field.is_required() else field.default + params.append( + inspect.Parameter( + name=name, + kind=inspect.Parameter.KEYWORD_ONLY, + default=default, + annotation=ann, + ) + ) + + func.__signature__ = inspect.Signature(parameters=params, return_annotation=Any) + func.__annotations__ = {**annotations, "return": Any} + +class FoundryToolClient: + + def __init__( + self, + tools: Sequence[FoundryToolLike], + ) -> None: + self._allowed_tools: List[FoundryToolLike] = list(tools) + + async def list_tools(self) -> List[AIFunction]: + server_context = AgentServerContext.get() + foundry_tool_catalog = server_context.tools.catalog + resolved_tools = await foundry_tool_catalog.list(self._allowed_tools) + return [self._to_aifunction(tool) for tool in resolved_tools] + + def _to_aifunction(self, foundry_tool: "ResolvedFoundryTool") -> AIFunction: + """Convert an FoundryTool to an Agent Framework AI Function + + :param foundry_tool: The FoundryTool to convert. + :type foundry_tool: ~azure.ai.agentserver.core.client.tools.aio.FoundryTool + :return: An AI Function Tool. + :rtype: AIFunction + """ + # Get the input schema from the tool descriptor + input_schema = foundry_tool.input_schema or {} + + # Create a Pydantic model from the input schema + properties = input_schema.properties or {} + required_fields = set(input_schema.required or []) + + # Build field definitions for the Pydantic model + field_definitions: Dict[str, Any] = {} + for field_name, field_info in properties.items(): + field_type = self._json_schema_type_to_python(field_info.type or "string") + field_description = field_info.description or "" + is_required = field_name in required_fields + + if is_required: + field_definitions[field_name] = (field_type, Field(description=field_description)) + else: + field_definitions[field_name] = (Optional[field_type], + Field(default=None, description=field_description)) + + # Create the Pydantic model dynamically + input_model = create_model( + f"{foundry_tool.name}_input", + **field_definitions + ) + + # Create a wrapper function that calls the Azure tool + async def tool_func(**kwargs: Any) -> Any: + """Dynamically generated function to invoke the Azure AI tool. + + :return: The result from the tool invocation. + :rtype: Any + """ + server_context = AgentServerContext.get() + logger.debug("Invoking tool: %s with input: %s", foundry_tool.name, kwargs) + return await server_context.tools.invoke(foundry_tool, kwargs) + _attach_signature_from_pydantic_model(tool_func, input_model) + + # Create and return the AIFunction + return AIFunction( + name=foundry_tool.name, + description=foundry_tool.description or "No description available", + func=tool_func, + input_model=input_model + ) + + def _json_schema_type_to_python(self, json_type: str) -> type: + """Convert JSON schema type to Python type. + + :param json_type: The JSON schema type string. + :type json_type: str + :return: The corresponding Python type. + :rtype: type + """ + type_map = { + "string": str, + "number": float, + "integer": int, + "boolean": bool, + "array": list, + "object": dict, + } + return type_map.get(json_type, str) + + +class FoundryToolsChatMiddleware(ChatMiddleware): + """Chat middleware to inject Foundry tools into ChatOptions on each call.""" + + def __init__( + self, + tools: Sequence[FoundryToolLike]) -> None: + self._foundry_tool_client = FoundryToolClient(tools=tools) + + async def process( + self, + context: ChatContext, + next: Callable[[ChatContext], Awaitable[None]], + ) -> None: + tools = await self._foundry_tool_client.list_tools() + base_chat_options = context.chat_options + if not base_chat_options: + logger.debug("No existing ChatOptions found, creating new one with Foundry tools.") + base_chat_options = ChatOptions(tools=tools) + context.chat_options = base_chat_options + else: + logger.debug("Adding Foundry tools to existing ChatOptions.") + base_tools = base_chat_options.tools or [] + context.chat_options.tools = base_tools + tools + await next(context) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py deleted file mode 100644 index 8b7142f0862a..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py +++ /dev/null @@ -1,183 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# mypy: disable-error-code="assignment" -"""Tool client for integrating AzureAIToolClient with Agent Framework.""" - -from typing import TYPE_CHECKING, Any, Dict, List, Optional -from agent_framework import AIFunction -from pydantic import Field, create_model -from azure.ai.agentserver.core.logger import get_logger -if TYPE_CHECKING: - from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient, FoundryTool - -logger = get_logger() - -# pylint: disable=client-accepts-api-version-keyword,missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs -class ToolClient: - """Client that integrates AzureAIToolClient with Agent Framework. - - This class provides methods to list tools from AzureAIToolClient and invoke them - in a format compatible with Agent Framework agents. - - :param tool_client: The AzureAIToolClient instance to use for tool operations. - :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient - - .. admonition:: Example: - - .. code-block:: python - - from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient - from azure.ai.agentserver.agentframework import ToolClient - from azure.identity.aio import DefaultAzureCredential - - async with DefaultAzureCredential() as credential: - tool_client = AzureAIToolClient( - endpoint="https://", - credential=credential - ) - - client = ToolClient(tool_client) - - # List tools as Agent Framework tool definitions - tools = await client.list_tools() - - # Invoke a tool directly - result = await client.invoke_tool( - tool_name="my_tool", - tool_input={"param": "value"} - ) - - :meta private: - """ - - def __init__(self, tool_client: "AzureAIToolClient") -> None: - """Initialize the ToolClient. - - :param tool_client: The AzureAIToolClient instance to use for tool operations. - :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient - """ - self._tool_client = tool_client - self._aifunction_cache: List[AIFunction] = None - - async def list_tools(self) -> List[AIFunction]: - """List all available tools as Agent Framework tool definitions. - - Retrieves tools from AzureAIToolClient and returns them in a format - compatible with Agent Framework. - - :return: List of tool definitions. - :rtype: List[AIFunction] - :raises ~azure.core.exceptions.HttpResponseError: - Raised for HTTP communication failures. - - .. admonition:: Example: - - .. code-block:: python - - client = ToolClient(tool_client) - tools = await client.list_tools() - """ - # Get tools from AzureAIToolClient - if self._aifunction_cache is not None: - return self._aifunction_cache - - azure_tools = await self._tool_client.list_tools() - self._aifunction_cache = [] - - # Convert to Agent Framework tool definitions - for azure_tool in azure_tools: - ai_function_tool = self._convert_to_agent_framework_tool(azure_tool) - self._aifunction_cache.append(ai_function_tool) - - return self._aifunction_cache - - def _convert_to_agent_framework_tool(self, azure_tool: "FoundryTool") -> AIFunction: - """Convert an AzureAITool to an Agent Framework AI Function - - :param azure_tool: The AzureAITool to convert. - :type azure_tool: ~azure.ai.agentserver.core.client.tools.aio.FoundryTool - :return: An AI Function Tool. - :rtype: AIFunction - """ - # Get the input schema from the tool descriptor - input_schema = azure_tool.input_schema or {} - - # Create a Pydantic model from the input schema - properties = input_schema.get("properties") or {} - required_fields = set(input_schema.get("required") or []) - - # Build field definitions for the Pydantic model - field_definitions: Dict[str, Any] = {} - for field_name, field_info in properties.items(): - field_type = self._json_schema_type_to_python(field_info.get("type", "string")) - field_description = field_info.get("description", "") - is_required = field_name in required_fields - - if is_required: - field_definitions[field_name] = (field_type, Field(description=field_description)) - else: - field_definitions[field_name] = (Optional[field_type], - Field(default=None, description=field_description)) - - # Create the Pydantic model dynamically - input_model = create_model( - f"{azure_tool.name}_input", - **field_definitions - ) - - # Create a wrapper function that calls the Azure tool - async def tool_func(**kwargs: Any) -> Any: - """Dynamically generated function to invoke the Azure AI tool. - - :return: The result from the tool invocation. - :rtype: Any - """ - logger.debug("Invoking tool: %s with input: %s", azure_tool.name, kwargs) - return await azure_tool.ainvoke(kwargs) - - # Create and return the AIFunction - return AIFunction( - name=azure_tool.name, - description=azure_tool.description or "No description available", - func=tool_func, - input_model=input_model - ) - - def _json_schema_type_to_python(self, json_type: str) -> type: - """Convert JSON schema type to Python type. - - :param json_type: The JSON schema type string. - :type json_type: str - :return: The corresponding Python type. - :rtype: type - """ - type_map = { - "string": str, - "number": float, - "integer": int, - "boolean": bool, - "array": list, - "object": dict, - } - return type_map.get(json_type, str) - - async def close(self) -> None: - """Close the tool client and release resources.""" - await self._tool_client.close() - - async def __aenter__(self) -> "ToolClient": - """Async context manager entry. - - :return: The ToolClient instance. - :rtype: ToolClient - """ - return self - - async def __aexit__(self, *exc_details: Any) -> None: - """Async context manager exit. - - :param exc_details: Exception details if an exception occurred. - :type exc_details: Any - """ - await self.close() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/README.md new file mode 100644 index 000000000000..956fc634eb11 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/README.md @@ -0,0 +1,81 @@ +# Chat Client With Foundry Tools + +This sample demonstrates how to attach `FoundryToolsChatMiddleware` to an Agent Framework chat client so that: + +- Foundry tools configured in your Azure AI Project are converted into Agent Framework `AIFunction` tools. +- The tools are injected automatically for each agent run. + +## What this sample does + +The script creates an Agent Framework agent using: + +- `AzureOpenAIChatClient` for model inference +- `FoundryToolsChatMiddleware` to resolve and inject Foundry tools +- `from_agent_framework(agent).run()` to start an AgentServer-compatible HTTP server + +## Prerequisites + +- Python 3.10+ +- An Azure AI Project endpoint +- A tool connection configured in that project (e.g. an MCP connection) +- Azure credentials available to `DefaultAzureCredential` + +## Setup + +1. Install dependencies: + +```bash +pip install -r requirements.txt +``` + +2. Update `.env` in this folder with your values. At minimum you need: + +```dotenv +AZURE_OPENAI_ENDPOINT=https://.openai.azure.com/ +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= +OPENAI_API_VERSION= + +AZURE_AI_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ +AZURE_AI_PROJECT_TOOL_CONNECTION_ID= +``` + +Notes: + +- This sample uses `DefaultAzureCredential()`. Make sure you are signed in (e.g. `az login`) or otherwise configured. + +## Run + +```bash +python chat_client_with_foundry_tool.py +``` + +This starts a local Uvicorn server (it will keep running and wait for requests). If it looks "stuck" at startup, it may just be waiting for requests. + +## Key code + +The core pattern used by this sample: + +```python +agent = AzureOpenAIChatClient( + credential=DefaultAzureCredential(), + middleware=FoundryToolsChatMiddleware( + tools=[{"type": "mcp", "project_connection_id": tool_connection_id}], + ), +).create_agent( + name="FoundryToolAgent", + instructions="You are a helpful assistant with access to various tools.", +) + +from_agent_framework(agent).run() +``` + +## Troubleshooting + +- **No tools found**: verify `AZURE_AI_PROJECT_TOOL_CONNECTION_ID` points at an existing tool connection in your project. +- **Auth failures**: confirm `DefaultAzureCredential` can acquire a token (try `az login`). +- **Import errors / weird agent_framework circular import**: ensure you are running the sample from this folder (not from inside the package module directory) so the external `agent_framework` dependency is imported correctly. + +## Learn more + +- Azure AI Agent Service: https://learn.microsoft.com/azure/ai-services/agents/ +- Agent Framework: https://github.com/microsoft/agent-framework diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py new file mode 100644 index 000000000000..cb9c3cd2c9c6 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft. All rights reserved. +"""Example showing how to use an agent factory function with ToolClient. + +This sample demonstrates how to pass a factory function to from_agent_framework +that receives a ToolClient and returns an AgentProtocol. This pattern allows +the agent to be created dynamically with access to tools from Azure AI Tool +Client at runtime. +""" + +import os +from dotenv import load_dotenv +from agent_framework.azure import AzureOpenAIChatClient + +from azure.ai.agentserver.agentframework import from_agent_framework, FoundryToolsChatMiddleware +from azure.identity import DefaultAzureCredential + +load_dotenv() + +def main(): + tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") + + agent = AzureOpenAIChatClient( + credential=DefaultAzureCredential(), + middleware=FoundryToolsChatMiddleware( + tools=[{"type": "mcp", "project_connection_id": tool_connection_id}] + )).create_agent( + name="FoundryToolAgent", + instructions="You are a helpful assistant with access to various tools.", + ) + + from_agent_framework(agent).run() + +if __name__ == "__main__": + main() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/requirements.txt b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/requirements.txt similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/requirements.txt rename to sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/requirements.txt diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md deleted file mode 100644 index 019e388975ff..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md +++ /dev/null @@ -1,113 +0,0 @@ -# Tool Client Example - -This example demonstrates how to use the `ToolClient` with Agent Framework to dynamically access tools from Azure AI Tool Client. - -## Overview - -The `ToolClient` provides a bridge between Azure AI Tool Client and Agent Framework, allowing agents to access tools configured in your Azure AI project. This example shows how to use a factory function pattern to create agents dynamically with access to tools at runtime. - -## Features - -- **Dynamic Tool Access**: Agents can list and invoke tools from Azure AI Tool Client -- **Factory Pattern**: Create fresh agent instances per request to avoid concurrency issues -- **Tool Integration**: Seamlessly integrate Azure AI tools with Agent Framework agents - -## Prerequisites - -- Python 3.10 or later -- Azure AI project with configured tools -- Azure credentials (DefaultAzureCredential) - -## Setup - -1. Install dependencies: -```bash -pip install -r requirements.txt -``` - -2. Configure environment variables in `.env`: -``` -AZURE_AI_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ -``` - -3. Ensure your Azure AI project has tools configured (e.g., MCP connections) - -## Running the Example - -```bash -python agent_factory_example.py -``` - -## How It Works - -1. **Factory Function**: The example creates a factory function that: - - Receives a `ToolClient` instance - - Lists available tools from Azure AI Tool Client - - Creates an Agent Framework agent with those tools - - Returns the agent instance - -2. **Dynamic Agent Creation**: The factory is called for each request, ensuring: - - Fresh agent instances per request - - Latest tool configurations - - No concurrency issues - -3. **Tool Access**: The agent can use tools like: - - MCP (Model Context Protocol) connections - - Function tools - - Other Azure AI configured tools - -## Key Code Patterns - -### Creating a Factory Function - -```python -async def agent_factory(tool_client: ToolClient): - # List tools from Azure AI - tools = await tool_client.list_tools() - - # Create agent with tools - agent = Agent( - name="MyAgent", - model="gpt-4o", - instructions="You are a helpful assistant.", - tools=tools - ) - return agent -``` - -### Using the Factory - -```python -from azure.ai.agentserver.agentframework import from_agent_framework - -adapter = from_agent_framework( - agent_factory, - credentials=credential, - tools=[{"type": "mcp", "project_connection_id": "my-mcp"}] -) -``` - -## Alternative: Direct Agent Usage - -You can also use a pre-created agent instead of a factory: - -```python -agent = Agent( - name="MyAgent", - model="gpt-4o", - instructions="You are a helpful assistant." -) - -adapter = from_agent_framework(agent, credentials=credential) -``` - -## Troubleshooting - -- **No tools found**: Ensure your Azure AI project has tools configured -- **Authentication errors**: Check your Azure credentials and project endpoint -- **Import errors**: Verify all dependencies are installed - -## Learn More - -- [Azure AI Agent Service Documentation](https://learn.microsoft.com/azure/ai-services/agents/) -- [Agent Framework Documentation](https://github.com/microsoft/agent-framework) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py deleted file mode 100644 index bc4d6bf8806d..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -"""Example showing how to use an agent factory function with ToolClient. - -This sample demonstrates how to pass a factory function to from_agent_framework -that receives a ToolClient and returns an AgentProtocol. This pattern allows -the agent to be created dynamically with access to tools from Azure AI Tool -Client at runtime. -""" - -import asyncio -import os -from typing import List -from dotenv import load_dotenv -from agent_framework import AIFunction -from agent_framework.azure import AzureOpenAIChatClient - -from azure.ai.agentserver.agentframework import from_agent_framework -from azure.identity.aio import DefaultAzureCredential - -load_dotenv() - - -def create_agent_factory(): - """Create a factory function that builds an agent with ToolClient. - - This function returns a factory that takes a ToolClient and returns - an AgentProtocol. The agent is created at runtime for every request, - allowing it to access the latest tool configuration dynamically. - """ - - async def agent_factory(tools: List[AIFunction]) -> AzureOpenAIChatClient: - """Factory function that creates an agent using the provided tools. - - :param tools: The list of AIFunction tools available to the agent. - :type tools: List[AIFunction] - :return: An Agent Framework ChatAgent instance. - :rtype: ChatAgent - """ - # List all available tools from the ToolClient - print("Fetching tools from Azure AI Tool Client via factory...") - print(f"Found {len(tools)} tools:") - for tool in tools: - print(f" - tool: {tool.name}, description: {tool.description}") - - if not tools: - print("\nNo tools found!") - print("Make sure your Azure AI project has tools configured.") - raise ValueError("No tools available to create agent") - - # Create the Agent Framework agent with the tools - print("\nCreating Agent Framework agent with tools from factory...") - agent = AzureOpenAIChatClient(credential=DefaultAzureCredential()).create_agent( - name="ToolClientAgent", - instructions="You are a helpful assistant with access to various tools.", - tools=tools, - ) - - print("Agent created successfully!") - return agent - - return agent_factory - - -async def quickstart(): - """Build and return an AgentFrameworkCBAgent using an agent factory function.""" - - # Get configuration from environment - project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") - - if not project_endpoint: - raise ValueError( - "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " - "Set it to your Azure AI project endpoint, e.g., " - "https://.services.ai.azure.com/api/projects/" - ) - - # Create Azure credentials - credential = DefaultAzureCredential() - - # Create a factory function that will build the agent at runtime - # The factory will receive a ToolClient when the agent first runs - agent_factory = create_agent_factory() - - tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") - # Pass the factory function to from_agent_framework instead of a compiled agent - # The agent will be created on every agent run with access to ToolClient - print("Creating Agent Framework adapter with factory function...") - adapter = from_agent_framework( - agent_factory, - credentials=credential, - tools=[{"type": "mcp", "project_connection_id": tool_connection_id}] - ) - - print("Adapter created! Agent will be built on every request.") - return adapter - - -async def main(): # pragma: no cover - sample entrypoint - """Main function to run the agent.""" - adapter = await quickstart() - - if adapter: - print("\nStarting agent server...") - print("The agent factory will be called for every request that arrives.") - await adapter.run_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py index 895074d32ae3..88a13741bbac 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py @@ -7,8 +7,9 @@ from .logger import configure as config_logging from .server.base import FoundryCBAgent from .server.common.agent_run_context import AgentRunContext +from .server._context import AgentServerContext config_logging() -__all__ = ["FoundryCBAgent", "AgentRunContext"] +__all__ = ["FoundryCBAgent", "AgentRunContext", "AgentServerContext"] __version__ = VERSION diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py new file mode 100644 index 000000000000..ccf4062cce31 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py @@ -0,0 +1,12 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) + +__all__ = [ + "PackageMetadata", + "set_current_app" +] + +from ._package_metadata import PackageMetadata, set_current_app \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_builder.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_builder.py new file mode 100644 index 000000000000..c09c253ab09f --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_builder.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +class AgentServerBuilder: + pass diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_configuration.py new file mode 100644 index 000000000000..fe05dae18a67 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_configuration.py @@ -0,0 +1,42 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from dataclasses import dataclass, field + +from azure.core.credentials_async import AsyncTokenCredential + + +@dataclass(frozen=True) +class HttpServerConfiguration: + """Resolved configuration for the HTTP server. + + :ivar str host: The host address the server listens on. Defaults to '0.0.0.0'. + :ivar int port: The port number the server listens on. Defaults to 8088. + """ + + host: str = "0.0.0.0" + port: int = 8088 + + +class ToolsConfiguration: + """Resolved configuration for the Tools subsystem. + + :ivar int catalog_cache_ttl: The time-to-live (TTL) for the tool catalog cache in seconds. + Defaults to 600 seconds (10 minutes). + :ivar int catalog_cache_max_size: The maximum size of the tool catalog cache. + Defaults to 1024 entries. + """ + + catalog_cache_ttl: int = 600 + catalog_cache_max_size: int = 1024 + + +@dataclass(frozen=True, kw_only=True) +class AgentServerConfiguration: + """Resolved configuration for the Agent Server application.""" + + agent_name: str = "$default" + project_endpoint: str + credential: AsyncTokenCredential + http: HttpServerConfiguration = field(default_factory=HttpServerConfiguration) + tools: ToolsConfiguration = field(default_factory=ToolsConfiguration) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py new file mode 100644 index 000000000000..cb4e8bde0bfd --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py @@ -0,0 +1,44 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import Literal, NotRequired, TypedDict, Union + +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential + + +class AgentServerOptions(TypedDict): + """Configuration options for the Agent Server. + + Attributes: + project_endpoint (str, optional): The endpoint URL for the project. Defaults to current project. + credential (Union[AsyncTokenCredential, TokenCredential], optional): The credential used for authentication. + Defaults to current project's managed identity. + """ + project_endpoint: NotRequired[str] + credential: NotRequired[Union[AsyncTokenCredential, TokenCredential]] + http: NotRequired["HttpServerOptions"] + toos: NotRequired["ToolsOptions"] + + +class HttpServerOptions(TypedDict): + """Configuration options for the HTTP server. + + Attributes: + host (str, optional): The host address the server listens on. + """ + host: NotRequired[Literal["127.0.0.1", "localhost", "0.0.0.0"]] + + +class ToolsOptions(TypedDict): + """Configuration options for the Tools subsystem. + + Attributes: + catalog_cache_ttl (int, optional): The time-to-live (TTL) for the tool catalog cache in seconds. + Defaults to 600 seconds (10 minutes). + catalog_cache_max_size (int, optional): The maximum size of the tool catalog cache. + Defaults to 1024 entries. + """ + catalog_cache_ttl: NotRequired[int] + catalog_cache_max_size: NotRequired[int] + diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py new file mode 100644 index 000000000000..36ff9313a6a2 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py @@ -0,0 +1,50 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +import platform +from dataclasses import dataclass +from importlib.metadata import Distribution, PackageNotFoundError + + +@dataclass(frozen=True) +class PackageMetadata: + name: str + version: str + python_version: str + platform: str + + @staticmethod + def from_dist(dist_name: str): + try: + ver = Distribution.from_name(dist_name).version + except PackageNotFoundError: + ver = "" + + return PackageMetadata( + name=dist_name, + version=ver, + python_version=platform.python_version(), + platform=platform.platform(), + ) + + def as_user_agent(self, component: str | None = None) -> str: + return (f"{self.name}/{self.version} " + f"Python {self.python_version} " + f"{component} " if component else "" + f"({self.platform})") + + +_default = PackageMetadata.from_dist("azure-ai-agentserver-core") +_app: PackageMetadata = _default + + +def set_current_app(app: PackageMetadata) -> None: + global _app + _app = app + + +def get_current_app() -> PackageMetadata: + global _app + return _app diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py deleted file mode 100644 index 3800740fb464..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- - -from ._client import AzureAIToolClient, FoundryTool -from ._exceptions import OAuthConsentRequiredError, MCPToolApprovalRequiredError - -__all__ = [ - "AzureAIToolClient", - "FoundryTool", - "OAuthConsentRequiredError", - "MCPToolApprovalRequiredError", -] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py deleted file mode 100644 index ee56a4d44a94..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py +++ /dev/null @@ -1,195 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# pylint: disable=protected-access -from typing import Any, List, Mapping, Union -from azure.core import PipelineClient -from azure.core.pipeline import policies -from azure.core.credentials import TokenCredential -from azure.core.tracing.decorator import distributed_trace - -from ._configuration import AzureAIToolClientConfiguration -from .operations._operations import MCPToolsOperations, RemoteToolsOperations -from ._utils._model_base import InvocationPayloadBuilder -from ._model_base import FoundryTool, ToolSource - -class AzureAIToolClient: - """Synchronous client for aggregating tools from Azure AI MCP and Tools APIs. - - This client provides access to tools from both MCP (Model Context Protocol) servers - and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. - - :param str endpoint: - The fully qualified endpoint for the Azure AI Agents service. - Example: "https://.api.azureml.ms" - :param credential: - Credential for authenticating requests to the service. - Use credentials from azure-identity like DefaultAzureCredential. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str agent_name: - Name of the agent to use for tool operations. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations defining which tools to include. - :keyword Mapping[str, Any] user: - User information for tool invocations (object_id, tenant_id). - :keyword str api_version: - API version to use when communicating with the service. - Default is the latest supported version. - :keyword transport: - Custom transport implementation. Default is RequestsTransport. - :paramtype transport: ~azure.core.pipeline.transport.HttpTransport - - """ - - def __init__( - self, - endpoint: str, - credential: "TokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the synchronous Azure AI Tool Client. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional keyword arguments for client configuration. - """ - self._config = AzureAIToolClientConfiguration( - endpoint, - credential, - **kwargs, - ) - - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: PipelineClient = PipelineClient(base_url=endpoint, policies=_policies, **kwargs) - - # Initialize specialized clients with client and config - self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) - self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) - - def list_tools(self) -> List[FoundryTool]: - """List all available tools from configured sources. - - Retrieves tools from both MCP servers and Azure AI Tools API endpoints, - returning them as FoundryTool instances ready for invocation. - :return: List of available tools from all configured sources. - :rtype: List[~AzureAITool] - :raises ~exceptions.OAuthConsentRequiredError: - Raised when the service requires user OAuth consent. - :raises ~exceptions.MCPToolApprovalRequiredError: - Raised when tool access requires human approval. - :raises ~azure.core.exceptions.HttpResponseError: - Raised for HTTP communication failures. - - """ - - existing_names: set[str] = set() - - tools: List[FoundryTool] = [] - - # Fetch MCP tools - if ( - self._config.tool_config._named_mcp_tools - and len(self._config.tool_config._named_mcp_tools) > 0 - ): - mcp_tools = self._mcp_tools.list_tools(existing_names) - tools.extend(mcp_tools) - - # Fetch Tools API tools - if ( - self._config.tool_config._remote_tools - and len(self._config.tool_config._remote_tools) > 0 - ): - tools_api_tools = self._remote_tools.resolve_tools(existing_names) - tools.extend(tools_api_tools) - - for tool in tools: - # Capture tool in a closure to avoid shadowing issues - def make_invoker(captured_tool): - return lambda *args, **kwargs: self.invoke_tool(captured_tool, *args, **kwargs) - tool.invoker = make_invoker(tool) - return tools - - @distributed_trace - def invoke_tool( - self, - tool: Union[str, FoundryTool], - *args: Any, - **kwargs: Any, - ) -> Any: - """Invoke a tool by instance, name, or descriptor. - - :param tool: Tool to invoke, specified as an AzureAITool instance, - tool name string, or FoundryTool. - :type tool: Union[str, ~FoundryTool] - :param args: Positional arguments to pass to the tool. - :type args: Any - :return: The result of invoking the tool. - :rtype: Any - """ - descriptor = self._resolve_tool_descriptor(tool) - payload = InvocationPayloadBuilder.build_payload(args, kwargs, configuration={}) - return self._invoke_tool(descriptor, payload, **kwargs) - - def _resolve_tool_descriptor( - self, tool: Union[str, FoundryTool] - ) -> FoundryTool: - """Resolve a tool reference to a descriptor. - - :param tool: Tool to resolve, either a FoundryTool instance or a string name/key. - :type tool: Union[str, FoundryTool] - :return: The resolved FoundryTool descriptor. - :rtype: FoundryTool - """ - if isinstance(tool, FoundryTool): - return tool - if isinstance(tool, str): - # Fetch all tools and find matching descriptor - descriptors = self.list_tools() - for descriptor in descriptors: - if tool in (descriptor.name, descriptor.key): - return descriptor - raise KeyError(f"Unknown tool: {tool}") - raise TypeError("Tool must be an AzureAITool, FoundryTool, or registered name/key") - - def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: - """Invoke a tool descriptor. - - :param descriptor: The tool descriptor to invoke. - :type descriptor: FoundryTool - :param arguments: Arguments to pass to the tool. - :type arguments: Mapping[str, Any] - :return: The result of the tool invocation. - :rtype: Any - """ - if descriptor.source is ToolSource.MCP_TOOLS: - return self._mcp_tools.invoke_tool(descriptor, arguments, **kwargs) - if descriptor.source is ToolSource.REMOTE_TOOLS: - return self._remote_tools.invoke_tool(descriptor, arguments, **kwargs) - raise ValueError(f"Unsupported tool source: {descriptor.source}") - - def close(self) -> None: - self._client.close() - - def __enter__(self) -> "AzureAIToolClient": - self._client.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client.__exit__(*exc_details) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py deleted file mode 100644 index 71cbdebec911..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py +++ /dev/null @@ -1,85 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- - -from typing import Any, List, Optional, TYPE_CHECKING - -from azure.core.pipeline import policies -from ._utils._model_base import ToolConfigurationParser, UserInfo, ToolDefinition - -if TYPE_CHECKING: - from azure.core.credentials import TokenCredential - -class AzureAIToolClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for Azure AI Tool Client. - - Manages authentication, endpoint configuration, and policy settings for the - Azure AI Tool Client. This class is used internally by the client and should - not typically be instantiated directly. - - :param str endpoint: - Fully qualified endpoint for the Azure AI Agents service. - :param credential: - Azure TokenCredential for authentication. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str api_version: - API version to use. Default is the latest supported version. - :keyword List[str] credential_scopes: - OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. - :keyword str agent_name: - Name of the agent. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations. - :keyword Mapping[str, Any] user: - User information for tool invocations. - """ - - def __init__( - self, - endpoint: str, - credential: "TokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the configuration. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional configuration options. - """ - api_version: str = kwargs.pop("api_version", "2025-05-15-preview") - - self.endpoint = endpoint - self.credential = credential - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) - - # Tool configuration - self.agent_name: str = kwargs.pop("agent_name", "$default") - self.tools: Optional[List[ToolDefinition]] = kwargs.pop("tools", None) - self.user: Optional[UserInfo] = kwargs.pop("user", None) - - # Initialize tool configuration parser - self.tool_config = ToolConfigurationParser(self.tools) - - self._configure(**kwargs) - - # Warn about unused kwargs - if kwargs: - import warnings - warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.BearerTokenCredentialPolicy( - self.credential, *self.credential_scopes, **kwargs - ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py deleted file mode 100644 index 41515592d698..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py +++ /dev/null @@ -1,52 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- - -from typing import Any, Mapping, Optional - - -class OAuthConsentRequiredError(RuntimeError): - """Raised when the service requires end-user OAuth consent. - - This exception is raised when a tool or service operation requires explicit - OAuth consent from the end user before the operation can proceed. - - :ivar str message: Human-readable guidance returned by the service. - :ivar str consent_url: Link that the end user must visit to provide consent. - :ivar dict payload: Full response payload from the service. - - :param str message: Human-readable guidance returned by the service. - :param str consent_url: Link that the end user must visit to provide the required consent. - :param dict payload: Full response payload supplied by the service. - """ - - def __init__(self, message: str, consent_url: Optional[str], payload: Mapping[str, Any]): - super().__init__(message) - self.message = message - self.consent_url = consent_url - self.payload = dict(payload) - - -class MCPToolApprovalRequiredError(RuntimeError): - """Raised when an MCP tool invocation needs human approval. - - This exception is raised when an MCP (Model Context Protocol) tool requires - explicit human approval before the invocation can proceed, typically for - security or compliance reasons. - - :ivar str message: Human-readable guidance returned by the service. - :ivar dict approval_arguments: - Arguments that must be approved or amended before continuing. - :ivar dict payload: Full response payload from the service. - - :param str message: Human-readable guidance returned by the service. - :param dict approval_arguments: - Arguments that must be approved or amended before continuing. - :param dict payload: Full response payload supplied by the service. - """ - - def __init__(self, message: str, approval_arguments: Mapping[str, Any], payload: Mapping[str, Any]): - super().__init__(message) - self.message = message - self.approval_arguments = dict(approval_arguments) - self.payload = dict(payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py deleted file mode 100644 index 7e20b20edeb0..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py +++ /dev/null @@ -1,174 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- - -from enum import Enum -import json - -from typing import Any, Awaitable, Callable, Mapping, Optional -from dataclasses import dataclass -import asyncio # pylint: disable=do-not-import-asyncio -import inspect -from azure.core import CaseInsensitiveEnumMeta - -class ToolSource(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Identifies the origin of a tool. - - Specifies whether a tool comes from an MCP (Model Context Protocol) server - or from the Azure AI Tools API (remote tools). - """ - - MCP_TOOLS = "mcp_tools" - REMOTE_TOOLS = "remote_tools" - -class ToolDefinition: - """Definition of a tool including its parameters. - - :ivar str type: JSON schema type (e.g., "mcp", "a2", other tools). - """ - - def __init__(self, type: str, **kwargs: Any) -> None: - """Initialize ToolDefinition with type and any additional properties. - - :param str type: JSON schema type (e.g., "mcp", "a2", other tools). - :param kwargs: Any additional properties to set on the tool definition. - """ - self.type = type - # Store all additional properties as attributes - for key, value in kwargs.items(): - setattr(self, key, value) - - def __repr__(self) -> str: - """Return a detailed string representation of the ToolDefinition. - - :return: JSON string representation of the ToolDefinition. - :rtype: str - """ - return json.dumps(self.__dict__, default=str) - - def __str__(self) -> str: - """Return a human-readable string representation. - - :return: JSON string representation of the ToolDefinition. - :rtype: str - """ - return json.dumps(self.__dict__, default=str) - -@dataclass -class FoundryTool: - """Lightweight description of a tool that can be invoked. - - Represents metadata and configuration for a single tool, including its - name, description, input schema, and source information. - - :ivar str key: Unique identifier for this tool. - :ivar str name: Display name of the tool. - :ivar str description: Human-readable description of what the tool does. - :ivar ~ToolSource source: - Origin of the tool (MCP_TOOLS or REMOTE_TOOLS). - :ivar dict metadata: Raw metadata from the API response. - :ivar dict input_schema: - JSON schema describing the tool's input parameters, or None. - :ivar ToolDefinition tool_definition: - Optional tool definition object, or None. - """ - - key: str - name: str - description: str - source: ToolSource - metadata: Mapping[str, Any] - input_schema: Optional[Mapping[str, Any]] = None - tool_definition: Optional[ToolDefinition] = None - invoker: Optional[Callable[..., Awaitable[Any]]] = None - - def invoke(self, *args: Any, **kwargs: Any) -> Any: - """Invoke the tool synchronously. - - :param args: Positional arguments to pass to the tool. - :type args: Any - :return: The result from the tool invocation. - :rtype: Any - """ - - if not self.invoker: - raise NotImplementedError("No invoker function defined for this tool.") - if inspect.iscoroutinefunction(self.invoker): - # If the invoker is async, check if we're already in an event loop - try: - asyncio.get_running_loop() - # We're in a running loop, can't use asyncio.run() - raise RuntimeError( - "Cannot call invoke() on an async tool from within an async context. " - "Use 'await tool.ainvoke(...)' or 'await tool(...)' instead." - ) - except RuntimeError as e: - if "no running event loop" in str(e).lower(): - # No running loop, safe to use asyncio.run() - return asyncio.run(self.invoker(*args, **kwargs)) - # Re-raise our custom error - raise - else: - return self.invoker(*args, **kwargs) - - async def ainvoke(self, *args: Any, **kwargs: Any) -> Any: - """Invoke the tool asynchronously. - - :param args: Positional arguments to pass to the tool. - :type args: Any - :return: The result from the tool invocation. - :rtype: Any - """ - - if not self.invoker: - raise NotImplementedError("No invoker function defined for this tool.") - if inspect.iscoroutinefunction(self.invoker): - return await self.invoker(*args, **kwargs) - - result = self.invoker(*args, **kwargs) - # If the result is awaitable (e.g., a coroutine), await it - if inspect.iscoroutine(result) or hasattr(result, '__await__'): - return await result - return result - - def __call__(self, *args: Any, **kwargs: Any) -> Any: - - # Check if the invoker is async - if self.invoker and inspect.iscoroutinefunction(self.invoker): - # Return coroutine for async context - return self.ainvoke(*args, **kwargs) - - # Use sync invoke - return self.invoke(*args, **kwargs) - - -class UserInfo: - """Represents user information. - - :ivar str objectId: User's object identifier. - :ivar str tenantId: Tenant identifier. - """ - - def __init__(self, objectId: str, tenantId: str, **kwargs: Any) -> None: - """Initialize UserInfo with user details. - - :param str objectId: User's object identifier. - :param str tenantId: Tenant identifier. - :param kwargs: Any additional properties to set on the user. - """ - self.objectId = objectId - self.tenantId = tenantId - # Store all additional properties as attributes - for key, value in kwargs.items(): - setattr(self, key, value) - - def to_dict(self) -> dict: - """Convert to dictionary for JSON serialization. - - :return: Dictionary containing objectId and tenantId. - :rtype: dict - """ - return { - "objectId": self.objectId, - "tenantId": self.tenantId - } diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py deleted file mode 100644 index e06ef576264e..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py +++ /dev/null @@ -1,796 +0,0 @@ - -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# mypy: ignore-errors - -from dataclasses import dataclass, asdict, is_dataclass -from typing import Any, Dict, Iterable, List, Mapping, MutableMapping, Optional, Set, Tuple - -from .._model_base import ToolDefinition, FoundryTool, ToolSource, UserInfo - - - -class ToolDescriptorBuilder: - """Builds FoundryTool objects from raw tool data.""" - - @staticmethod - def build_descriptors( - raw_tools: Iterable[Mapping[str, Any]], - source: ToolSource, - existing_names: Set[str], - ) -> List[FoundryTool]: - """Build tool descriptors from raw tool data. - - Parameters - ---------- - raw_tools : Iterable[Mapping[str, Any]] - Raw tool data from API (can be dicts or dataclass objects) - source : ToolSource - Source of the tools - existing_names : Set[str] - Set of existing tool names to avoid conflicts - - Returns - ------- - List[FoundryTool] - List of built tool descriptors - """ - descriptors: List[FoundryTool] = [] - for raw in raw_tools: - # Convert dataclass objects to dictionaries - if is_dataclass(raw) and not isinstance(raw, type): - raw = asdict(raw) - - name, description = ToolMetadataExtractor.extract_name_description(raw) - if not name: - continue - - key = ToolMetadataExtractor.derive_tool_key(raw, source) - description = description or "" - resolved_name = NameResolver.ensure_unique_name(name, existing_names) - - descriptor = FoundryTool( - key=key, - name=resolved_name, - description=description, - source=source, - metadata=dict(raw), - input_schema=ToolMetadataExtractor.extract_input_schema(raw), - tool_definition= raw.get("tool_definition") - ) - descriptors.append(descriptor) - existing_names.add(resolved_name) - - return descriptors - - -class ToolMetadataExtractor: - """Extracts metadata from raw tool data.""" - - @staticmethod - def extract_name_description(raw: Mapping[str, Any]) -> Tuple[Optional[str], Optional[str]]: - """Extract name and description from raw tool data. - - Parameters - ---------- - raw : Mapping[str, Any] - Raw tool data - - Returns - ------- - Tuple[Optional[str], Optional[str]] - Tuple of (name, description) - """ - name = ( - raw.get("name") - or raw.get("id") - or raw.get("tool_name") - or raw.get("definition", {}).get("name") - or raw.get("tool", {}).get("name") - ) - description = ( - raw.get("description") - or raw.get("long_description") - or raw.get("definition", {}).get("description") - or raw.get("tool", {}).get("description") - ) - return name, description - - @staticmethod - def derive_tool_key(raw: Mapping[str, Any], source: ToolSource) -> str: - """Derive unique key for a tool. - - Parameters - ---------- - raw : Mapping[str, Any] - Raw tool data - source : ToolSource - Source of the tool - - Returns - ------- - str - Unique tool key - """ - for candidate in (raw.get("id"), raw.get("name"), raw.get("tool_name")): - if candidate: - return f"{source.value}:{candidate}" - return f"{source.value}:{id(raw)}" - - @staticmethod - def extract_input_schema(raw: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: - """Extract input schema from raw tool data. - - Parameters - ---------- - raw : Mapping[str, Any] - Raw tool data - - Returns - ------- - Optional[Mapping[str, Any]] - Input schema if found - """ - for key in ("input_schema", "inputSchema", "schema", "parameters"): - if key in raw and isinstance(raw[key], Mapping): - return raw[key] - nested = raw.get("definition") or raw.get("tool") - if isinstance(nested, Mapping): - return ToolMetadataExtractor.extract_input_schema(nested) - return None - - @staticmethod - def extract_metadata_schema(raw: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: - """Extract input schema from raw tool data. - - Parameters - ---------- - raw : Mapping[str, Any] - Raw tool data - - Returns - ------- - Optional[Mapping[str, Any]] - _metadata if found - """ - for key in ("_meta", "metadata", "meta"): - if key in raw and isinstance(raw[key], Mapping): - return raw[key] - return None - - -class NameResolver: - """Resolves tool names to ensure uniqueness.""" - - @staticmethod - def ensure_unique_name(proposed_name: str, existing_names: Set[str]) -> str: - """Ensure a tool name is unique. - - Parameters - ---------- - proposed_name : str - Proposed tool name - existing_names : Set[str] - Set of existing tool names - - Returns - ------- - str - Unique tool name - """ - if proposed_name not in existing_names: - return proposed_name - - suffix = 1 - while True: - candidate = f"{proposed_name}_{suffix}" - if candidate not in existing_names: - return candidate - suffix += 1 - - -class MetadataMapper: - """Maps tool metadata from _meta schema to tool configuration.""" - - # Default key mapping: meta_schema_key -> output_key - # Note: When used with key_overrides, the direction is reversed internally - # to support tool_def_key -> meta_schema_key mapping - DEFAULT_KEY_MAPPING = { - "imagegen_model_deployment_name": "model_deployment_name", - "model_deployment_name": "model", - "deployment_name": "model", - } - - @staticmethod - def extract_metadata_config( - tool_metadata: Mapping[str, Any], - tool_definition: Optional[Mapping[str, Any]] = None, - key_overrides: Optional[Mapping[str, str]] = None, - ) -> Dict[str, Any]: - """Extract metadata configuration from _meta schema and tool definition. - - This method extracts properties defined in the _meta schema and attempts - to find matching values in the tool definition. Key overrides allow mapping - from tool definition property names to _meta schema property names. - - Parameters - ---------- - tool_metadata : Mapping[str, Any] - The _meta schema containing property definitions - tool_definition : Optional[Mapping[str, Any]] - The tool definition containing actual values - key_overrides : Optional[Mapping[str, str]] - Mapping from tool definition keys to _meta schema keys. - Format: {"tool_def_key": "meta_schema_key"} - Example: {"model": "imagegen_model_deployment_name"} - - Returns - ------- - Dict[str, Any] - Dictionary with mapped metadata configuration - - Examples - -------- - >>> meta_schema = { - ... "properties": { - ... "quality": {"type": "string", "default": "auto"}, - ... "model_deployment_name": {"type": "string"} - ... } - ... } - >>> tool_def = {"quality": "high", "model": "gpt-4"} - >>> overrides = {"model": "model_deployment_name"} # tool_def -> meta - >>> MetadataMapper.extract_metadata_config(meta_schema, tool_def, overrides) - {'quality': 'high', 'model_deployment_name': 'gpt-4'} - """ - result: Dict[str, Any] = {} - - # Build reverse mapping: tool_definition_key -> meta_property_name - # Start with default mappings (also reversed) - reverse_default_mapping = {v: k for k, v in MetadataMapper.DEFAULT_KEY_MAPPING.items()} - - # Add user overrides (these are already tool_def -> meta format) - tool_to_meta_mapping = dict(reverse_default_mapping) - if key_overrides: - tool_to_meta_mapping.update(key_overrides) - - # Extract properties from _meta schema - properties = tool_metadata.get("properties", {}) - if not isinstance(properties, Mapping): - return result - - for meta_prop_name, prop_schema in properties.items(): - if not isinstance(prop_schema, Mapping): - continue - - is_required = meta_prop_name in tool_metadata.get("required", []) - - # Try to find value in tool definition - value = None - value_from_definition = False - - if tool_definition: - # First check if tool definition has this exact key - if meta_prop_name in tool_definition: - value = tool_definition[meta_prop_name] - value_from_definition = True - else: - # Check if any tool definition key maps to this meta property - for tool_key, mapped_meta_key in tool_to_meta_mapping.items(): - if mapped_meta_key == meta_prop_name and tool_key in tool_definition: - value = tool_definition[tool_key] - value_from_definition = True - break - - # If no value from definition, check for default (only use if required) - if value is None and is_required and "default" in prop_schema: - value = prop_schema["default"] - - # Only add if: - # 1. Value is from tool definition, OR - # 2. Value is required and has a default - if value is not None and (value_from_definition or is_required): - result[meta_prop_name] = value - - return result - - @staticmethod - def prepare_metadata_dict( - tool_metadata_raw: Mapping[str, Any], - tool_definition: Optional[Mapping[str, Any]] = None, - key_overrides: Optional[Mapping[str, str]] = None, - ) -> Dict[str, Any]: - """Prepare a _meta dictionary from tool metadata and definition. - - This is a convenience method that extracts the _meta schema from - raw tool metadata and maps it to configuration values. - - Parameters - ---------- - tool_metadata_raw : Mapping[str, Any] - Raw tool metadata containing _meta or similar fields - tool_definition : Optional[Mapping[str, Any]] - The tool definition containing actual values - key_overrides : Optional[Mapping[str, str]] - Mapping from tool definition keys to _meta schema keys. - Format: {"tool_def_key": "meta_schema_key"} - - Returns - ------- - Dict[str, Any] - Dictionary with mapped metadata configuration - """ - # Extract _meta schema using existing utility - meta_schema = ToolMetadataExtractor.extract_metadata_schema(tool_metadata_raw) - if not meta_schema: - return {} - - return MetadataMapper.extract_metadata_config( - meta_schema, - tool_definition, - key_overrides - ) - - -class InvocationPayloadBuilder: - """Builds invocation payloads for tool calls.""" - - @staticmethod - def build_payload( - args: Tuple[Any, ...], - kwargs: Dict[str, Any], - configuration: Dict[str, Any], - ) -> Dict[str, Any]: - """Build invocation payload from args and kwargs. - - Parameters - ---------- - args : Tuple[Any, ...] - Positional arguments - kwargs : Dict[str, Any] - Keyword arguments - configuration : Dict[str, Any] - Tool configuration defaults - - Returns - ------- - Dict[str, Any] - Complete invocation payload - """ - user_arguments = InvocationPayloadBuilder._normalize_input(args, kwargs) - merged = dict(configuration) - merged.update(user_arguments) - return merged - - @staticmethod - def _normalize_input( - args: Tuple[Any, ...], - kwargs: Dict[str, Any] - ) -> Dict[str, Any]: - """Normalize invocation input to a dictionary. - - Parameters - ---------- - args : Tuple[Any, ...] - Positional arguments - kwargs : Dict[str, Any] - Keyword arguments - - Returns - ------- - Dict[str, Any] - Normalized input dictionary - - Raises - ------ - ValueError - If mixing positional and keyword arguments or providing multiple positional args - """ - if args and kwargs: - raise ValueError("Mixing positional and keyword arguments is not supported") - - if args: - if len(args) > 1: - raise ValueError("Multiple positional arguments are not supported") - candidate = next(iter(args)) - if candidate is None: - return {} - if isinstance(candidate, Mapping): - return dict(candidate) - return {"input": candidate} - - if kwargs: - return dict(kwargs) - - return {} - - -@dataclass -class ToolProperty: - """Represents a single property/parameter in a tool's schema. - - :ivar str type: JSON schema type (e.g., "string", "object", "array"). - :ivar Optional[str] description: Human-readable description of the property. - :ivar Optional[Mapping[str, Any]] properties: Nested properties for object types. - :ivar Any default: Default value for the property. - :ivar List[str] required: List of required nested properties. - """ - - type: str - description: Optional[str] = None - properties: Optional[Mapping[str, Any]] = None - default: Any = None - required: Optional[List[str]] = None - -@dataclass -class ToolParameters: - """Represents the parameters schema for a tool. - - :ivar str type: JSON schema type, typically "object". - :ivar Mapping[str, ToolProperty] properties: Dictionary of parameter properties. - :ivar List[str] required: List of required parameter names. - """ - - type: str - properties: Mapping[str, ToolProperty] - required: Optional[List[str]] = None - -@dataclass -class ToolManifest: - """Represents a tool manifest with metadata and parameters. - - :ivar str name: Unique name of the tool. - :ivar str description: Detailed description of the tool's functionality. - :ivar ToolParameters parameters: Schema defining the tool's input parameters. - """ - - name: str - description: str - parameters: ToolParameters - -@dataclass -class RemoteServer: - """Represents remote server configuration for a tool. - - :ivar str projectConnectionId: Identifier for the project connection. - :ivar str protocol: Communication protocol (e.g., "mcp"). - """ - - projectConnectionId: str - protocol: str - - def to_dict(self) -> Dict[str, Any]: - """Convert to dictionary for JSON serialization.""" - return { - "projectConnectionId": self.projectConnectionId, - "protocol": self.protocol - } - -@dataclass -class EnrichedToolEntry(ToolManifest): - """Enriched tool representation with input schema. - - :ivar str name: Name of the tool. - :ivar str description: Description of the tool. - """ - remoteServer: RemoteServer - projectConnectionId: str - protocol: str - inputSchema: Optional[Mapping[str, Any]] = None - tool_definition: Optional[ToolDefinition] = None - -@dataclass -class ToolEntry: - """Represents a single tool entry in the API response. - - :ivar RemoteServer remoteServer: Configuration for the remote server. - :ivar List[ToolManifest] manifest: List of tool manifests provided by this entry. - """ - - remoteServer: RemoteServer - manifest: List[ToolManifest] - -@dataclass -class ToolsResponse: - """Root response model for the tools API. - - :ivar List[ToolEntry] tools: List of tool entries from the API. - """ - - tools: List[ToolEntry] - enriched_tools: List[EnrichedToolEntry] - - @classmethod - def from_dict(cls, data: Mapping[str, Any], tool_definitions: List[ToolDefinition]) -> "ToolsResponse": - """Create a ToolsResponse from a dictionary. - - :param Mapping[str, Any] data: Dictionary representation of the API response. - :return: Parsed ToolsResponse instance. - :rtype: ToolsResponse - """ - tool_defintions_map = {f"{td.type.lower()}_{td.project_connection_id.lower()}": td for td in tool_definitions} - - def tool_definition_lookup(remote_server: RemoteServer) -> Optional[ToolDefinition]: - return tool_defintions_map.get(f"{remote_server.protocol.lower()}_{remote_server.projectConnectionId.lower()}") - - - tools = [] - flattend_tools = [] - for tool_data in data.get("tools", []): - remote_server = RemoteServer( - projectConnectionId=tool_data["remoteServer"]["projectConnectionId"], - protocol=tool_data["remoteServer"]["protocol"] - ) - - manifests = [] - for manifest_data in tool_data.get("manifest", []): - params_data = manifest_data.get("parameters", {}) - properties = {} - - for prop_name, prop_data in params_data.get("properties", {}).items(): - properties[prop_name] = ToolProperty( - type=prop_data.get("type"), - description=prop_data.get("description"), - properties=prop_data.get("properties"), - default=prop_data.get("default"), - required=prop_data.get("required") - ) - - parameters = ToolParameters( - type=params_data.get("type", "object"), - properties=properties, - required=params_data.get("required") - ) - manifest = ToolManifest( - name=manifest_data["name"], - description=manifest_data["description"], - parameters=parameters - ) - manifests.append(manifest) - tool_definition = tool_definition_lookup(remote_server) - flattend_tools.append(EnrichedToolEntry( - projectConnectionId=remote_server.projectConnectionId, - protocol=remote_server.protocol, - name=manifest.name, - description=manifest.description, - parameters=parameters, - remoteServer=remote_server, - inputSchema=parameters, - tool_definition=tool_definition - )) - - tools.append(ToolEntry( - remoteServer=remote_server, - manifest=manifests - )) - - return cls(tools=tools, enriched_tools=flattend_tools) - -class ResolveToolsRequest: - """Represents a request containing remote servers and user information. - - :ivar List[RemoteServer] remoteservers: List of remote server configurations. - :ivar UserInfo user: User information. - """ - - def __init__(self, remoteservers: List[RemoteServer], user: UserInfo) -> None: - """Initialize RemoteServersRequest with servers and user info. - - :param List[RemoteServer] remoteservers: List of remote server configurations. - :param UserInfo user: User information. - """ - self.remoteservers = remoteservers - self.user: UserInfo = user - - def to_dict(self) -> Dict[str, Any]: - """Convert to dictionary for JSON serialization.""" - result = { - "remoteservers": [rs.to_dict() for rs in self.remoteservers] - } - if self.user: - # Handle both UserInfo objects and dictionaries - if isinstance(self.user, dict): - # Validate required fields for dict - if self.user.get("objectId") and self.user.get("tenantId"): - result["user"] = { - "objectId": self.user["objectId"], - "tenantId": self.user["tenantId"] - } - elif hasattr(self.user, "objectId") and hasattr(self.user, "tenantId"): - # UserInfo object - if self.user.objectId and self.user.tenantId: - result["user"] = { - "objectId": self.user.objectId, - "tenantId": self.user.tenantId - } - return result - - -class ToolConfigurationParser: - """Parses and processes tool configuration. - - This class handles parsing and categorizing tool configurations into - remote tools (MCP/A2A) and named MCP tools. - - :param List[Mapping[str, Any]] tools_config: - List of tool configurations to parse. Can be None. - """ - - def __init__(self, tools_definitions: Optional[List[Any]] = None): - """Initialize the parser. - - :param tools_definitions: List of tool configurations (can be dicts or ToolDefinition objects), or None. - :type tools_definitions: Optional[List[Any]] - """ - # Convert dictionaries to ToolDefinition objects if needed - self._tools_definitions = [] - for tool_def in (tools_definitions or []): - if isinstance(tool_def, dict): - # Convert dict to ToolDefinition - tool_type = tool_def.get("type") - if tool_type: - self._tools_definitions.append(ToolDefinition(type=tool_type, **{k: v for k, v in tool_def.items() if k != "type"})) - elif isinstance(tool_def, ToolDefinition): - self._tools_definitions.append(tool_def) - - self._remote_tools: List[ToolDefinition] = [] - self._named_mcp_tools: List[ToolDefinition] = [] - self._parse_tools_config() - - def _parse_tools_config(self) -> None: - """Parse tools configuration into categorized lists. - - Separates tool configurations into remote tools (MCP/A2A types) and - named MCP tools based on the 'type' field in each configuration. - """ - for tool_definition in self._tools_definitions: - tool_type = tool_definition.type.lower() - if tool_type in ["mcp", "a2a"]: - self._remote_tools.append(tool_definition) - else: - self._named_mcp_tools.append(tool_definition) - -def to_remote_server(tool_definition: ToolDefinition) -> RemoteServer: - """Convert ToolDefinition to RemoteServer. - - :param ToolDefinition tool_definition: - Tool definition to convert. - :return: Converted RemoteServer instance. - :rtype: RemoteServer - """ - return RemoteServer( - projectConnectionId=tool_definition.project_connection_id, - protocol=tool_definition.type.lower() - ) - - -@dataclass -class MCPToolSchema: - """Represents the input schema for an MCP tool. - - :ivar str type: JSON schema type, typically "object". - :ivar Mapping[str, Any] properties: Dictionary of parameter properties. - :ivar List[str] required: List of required parameter names. - """ - - type: str - properties: Mapping[str, Any] - required: Optional[List[str]] = None - - -@dataclass -class MCPToolMetadata: - """Represents the _meta field for an MCP tool. - - :ivar str type: JSON schema type, typically "object". - :ivar Mapping[str, Any] properties: Dictionary of metadata properties. - :ivar List[str] required: List of required metadata parameter names. - """ - - type: str - properties: Mapping[str, Any] - required: Optional[List[str]] = None - - -@dataclass -class MCPTool: - """Represents a single MCP tool from the tools/list response. - - :ivar str name: Unique name of the tool. - :ivar str title: Display title of the tool. - :ivar str description: Detailed description of the tool's functionality. - :ivar MCPToolSchema inputSchema: Schema defining the tool's input parameters. - :ivar Optional[MCPToolMetadata] _meta: Optional metadata schema for the tool. - """ - - name: str - title: str - description: str - inputSchema: MCPToolSchema - _meta: Optional[MCPToolMetadata] = None - -@dataclass -class EnrichedMCPTool(MCPTool): - """Represents an enriched MCP tool with additional metadata. - - :ivar ToolDefinition tool_definition: Associated tool definition. - """ - tool_definition: Optional[ToolDefinition] = None - -@dataclass -class MCPToolsListResult: - """Represents the result field of an MCP tools/list response. - - :ivar List[MCPTool] tools: List of available MCP tools. - """ - - tools: List[MCPTool] - - -@dataclass -class MCPToolsListResponse: - """Root response model for the MCP tools/list JSON-RPC response. - - :ivar str jsonrpc: JSON-RPC protocol version (e.g., "2.0"). - :ivar int id: Request identifier. - :ivar MCPToolsListResult result: Result containing the list of tools. - """ - - jsonrpc: str - id: int - result: MCPToolsListResult - - @classmethod - def from_dict(cls, data: Mapping[str, Any], tool_definitions: List[ToolDefinition]) -> "MCPToolsListResponse": - """Create an MCPToolsListResponse from a dictionary. - - :param Mapping[str, Any] data: Dictionary representation of the JSON-RPC response. - :return: Parsed MCPToolsListResponse instance. - :rtype: MCPToolsListResponse - """ - result_data = data.get("result", {}) - tools_list = [] - tool_definitions_map = {f"{td.type.lower()}": td for td in tool_definitions} - filter_tools = len(tool_definitions_map) > 0 - for tool_data in result_data.get("tools", []): - - if filter_tools and tool_data["name"].lower() not in tool_definitions_map: - continue - # Parse inputSchema - input_schema_data = tool_data.get("inputSchema", {}) - input_schema = MCPToolSchema( - type=input_schema_data.get("type", "object"), - properties=input_schema_data.get("properties", {}), - required=input_schema_data.get("required") - ) - - # Parse _meta if present - meta = None - meta_data = tool_data.get("_meta") - if meta_data: - meta = MCPToolMetadata( - type=meta_data.get("type", "object"), - properties=meta_data.get("properties", {}), - required=meta_data.get("required") - ) - - # Create MCPTool - mcp_tool = EnrichedMCPTool( - name=tool_data["name"], - title=tool_data.get("title", tool_data["name"]), - description=tool_data.get("description", ""), - inputSchema=input_schema, - _meta=meta, - tool_definition=tool_definitions_map.get(tool_data["name"].lower()) - ) - - tools_list.append(mcp_tool) - - # Create result - result = MCPToolsListResult(tools=tools_list) - - return cls( - jsonrpc=data.get("jsonrpc", "2.0"), - id=data.get("id", 0), - result=result - ) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py deleted file mode 100644 index 047a3b7919e7..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- - -from ._client import AzureAIToolClient, FoundryTool -from .._exceptions import OAuthConsentRequiredError, MCPToolApprovalRequiredError - -__all__ = [ - "AzureAIToolClient", - "FoundryTool", - "OAuthConsentRequiredError", - "MCPToolApprovalRequiredError", -] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py deleted file mode 100644 index 986e8756e1b6..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py +++ /dev/null @@ -1,207 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# pylint: disable=protected-access,do-not-import-asyncio -from typing import Any, List, Mapping, Union, TYPE_CHECKING -from asyncio import gather -from azure.core import AsyncPipelineClient -from azure.core.pipeline import policies -from azure.core.tracing.decorator_async import distributed_trace_async - -from ._configuration import AzureAIToolClientConfiguration -from .._utils._model_base import InvocationPayloadBuilder -from .._model_base import FoundryTool, ToolSource - -from .operations._operations import MCPToolsOperations, RemoteToolsOperations - -if TYPE_CHECKING: - from azure.core.credentials_async import AsyncTokenCredential - -class AzureAIToolClient: - """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. - - This client provides access to tools from both MCP (Model Context Protocol) servers - and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. - - :param str endpoint: - The fully qualified endpoint for the Azure AI Agents service. - Example: "https://.api.azureml.ms" - :param credential: - Credential for authenticating requests to the service. - Use credentials from azure-identity like DefaultAzureCredential. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str agent_name: - Name of the agent to use for tool operations. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations defining which tools to include. - :keyword Mapping[str, Any] user: - User information for tool invocations (object_id, tenant_id). - :keyword str api_version: - API version to use when communicating with the service. - Default is the latest supported version. - :keyword transport: - Custom transport implementation. Default is RequestsTransport. - :paramtype transport: ~azure.core.pipeline.transport.HttpTransport - - """ - - def __init__( - self, - endpoint: str, - credential: "AsyncTokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the asynchronous Azure AI Tool Client. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional keyword arguments for client configuration. - """ - self._config = AzureAIToolClientConfiguration( - endpoint, - credential, - **kwargs, - ) - - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=endpoint, policies=_policies, **kwargs) - - # Initialize specialized clients with client and config - self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) - self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) - - async def list_tools(self) -> List[FoundryTool]: - """List all available tools from configured sources. - - Retrieves tools from both MCP servers and Azure AI Tools API endpoints, - returning them as AzureAITool instances ready for invocation. - :return: List of available tools from all configured sources. - :rtype: List[~AzureAITool] - :raises ~Tool_Client.exceptions.OAuthConsentRequiredError: - Raised when the service requires user OAuth consent. - :raises ~Tool_Client.exceptions.MCPToolApprovalRequiredError: - Raised when tool access requires human approval. - :raises ~azure.core.exceptions.HttpResponseError: - Raised for HTTP communication failures. - - """ - - existing_names: set[str] = set() - - tools: List[FoundryTool] = [] - - # Fetch MCP tools and Tools API tools in parallel - # Build list of coroutines to gather based on configuration - tasks = [] - if ( - self._config.tool_config._named_mcp_tools - and len(self._config.tool_config._named_mcp_tools) > 0 - ): - tasks.append(self._mcp_tools.list_tools(existing_names)) - if ( - self._config.tool_config._remote_tools - and len(self._config.tool_config._remote_tools) > 0 - ): - tasks.append(self._remote_tools.resolve_tools(existing_names)) - - # Execute all tasks in parallel if any exist - if tasks: - results = await gather(*tasks) - for result in results: - tools.extend(result) - - for tool in tools: - # Capture tool in a closure to avoid shadowing issues - def make_invoker(captured_tool): - async def _invoker(*args, **kwargs): - return await self.invoke_tool(captured_tool, *args, **kwargs) - return _invoker - tool.invoker = make_invoker(tool) - - return tools - - @distributed_trace_async - async def invoke_tool( - self, - tool: Union[str, FoundryTool], - *args: Any, - **kwargs: Any, - ) -> Any: - """Invoke a tool by instance, name, or descriptor. - - :param tool: Tool to invoke, specified as an AzureAITool instance, - tool name string, or FoundryTool. - :type tool: Union[~AzureAITool, str, ~Tool_Client.models.FoundryTool] - :param args: Positional arguments to pass to the tool. - :type args: Any - :return: The result of invoking the tool. - :rtype: Any - """ - descriptor = await self._resolve_tool_descriptor(tool) - payload = InvocationPayloadBuilder.build_payload(args, kwargs, configuration={}) - return await self._invoke_tool(descriptor, payload, **kwargs) - - async def _resolve_tool_descriptor( - self, tool: Union[str, FoundryTool] - ) -> FoundryTool: - """Resolve a tool reference to a descriptor. - - :param tool: Tool to resolve, either a FoundryTool instance or a string name/key. - :type tool: Union[str, FoundryTool] - :return: The resolved FoundryTool descriptor. - :rtype: FoundryTool - """ - if isinstance(tool, FoundryTool): - return tool - if isinstance(tool, str): - # Fetch all tools and find matching descriptor - descriptors = await self.list_tools() - for descriptor in descriptors: - if tool in (descriptor.name, descriptor.key): - return descriptor - raise KeyError(f"Unknown tool: {tool}") - raise TypeError("Tool must be an AsyncAzureAITool, FoundryTool, or registered name/key") - - async def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: #pylint: disable=unused-argument - """Invoke a tool descriptor. - - :param descriptor: The tool descriptor to invoke. - :type descriptor: FoundryTool - :param arguments: Arguments to pass to the tool. - :type arguments: Mapping[str, Any] - :return: The result of the tool invocation. - :rtype: Any - """ - if descriptor.source is ToolSource.MCP_TOOLS: - return await self._mcp_tools.invoke_tool(descriptor, arguments) - if descriptor.source is ToolSource.REMOTE_TOOLS: - return await self._remote_tools.invoke_tool(descriptor, arguments) - raise ValueError(f"Unsupported tool source: {descriptor.source}") - - async def close(self) -> None: - """Close the underlying HTTP pipeline.""" - await self._client.close() - - async def __aenter__(self) -> "AzureAIToolClient": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client.__aexit__(*exc_details) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py deleted file mode 100644 index 4eb5503dee8d..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py +++ /dev/null @@ -1,86 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- - -from typing import Any, Mapping, List, Optional, TYPE_CHECKING - -from azure.core.pipeline import policies - -from .._utils._model_base import ToolConfigurationParser - -if TYPE_CHECKING: - from azure.core.credentials_async import AsyncTokenCredential - -class AzureAIToolClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for Azure AI Tool Client. - - Manages authentication, endpoint configuration, and policy settings for the - Azure AI Tool Client. This class is used internally by the client and should - not typically be instantiated directly. - - :param str endpoint: - Fully qualified endpoint for the Azure AI Agents service. - :param credential: - Azure TokenCredential for authentication. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str api_version: - API version to use. Default is the latest supported version. - :keyword List[str] credential_scopes: - OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. - :keyword str agent_name: - Name of the agent. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations. - :keyword Mapping[str, Any] user: - User information for tool invocations. - """ - - def __init__( - self, - endpoint: str, - credential: "AsyncTokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the configuration. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional configuration options. - """ - api_version: str = kwargs.pop("api_version", "2025-05-15-preview") - - self.endpoint = endpoint - self.credential = credential - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) - - # Tool configuration - self.agent_name: str = kwargs.pop("agent_name", "$default") - self.tools: Optional[List[Mapping[str, Any]]] = kwargs.pop("tools", None) - self.user: Optional[Mapping[str, Any]] = kwargs.pop("user", None) - - # Initialize tool configuration parser - self.tool_config = ToolConfigurationParser(self.tools) - - self._configure(**kwargs) - - # Warn about unused kwargs - if kwargs: - import warnings - warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( - self.credential, *self.credential_scopes, **kwargs - ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py deleted file mode 100644 index 7d1310518519..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py +++ /dev/null @@ -1,187 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# mypy: ignore-errors - -import json -from typing import Any, Dict, List, Mapping, MutableMapping - -from azure.core import AsyncPipelineClient -from ..._exceptions import OAuthConsentRequiredError -from .._configuration import AzureAIToolClientConfiguration - -from ...operations._operations import ( - build_remotetools_invoke_tool_request, - build_remotetools_resolve_tools_request, - prepare_remotetools_invoke_tool_request_content, - prepare_remotetools_resolve_tools_request_content, - build_mcptools_list_tools_request, - prepare_mcptools_list_tools_request_content, - build_mcptools_invoke_tool_request, - prepare_mcptools_invoke_tool_request_content, - API_VERSION, - MCP_ENDPOINT_PATH, - TOOL_PROPERTY_OVERRIDES, - DEFAULT_ERROR_MAP, - MCP_HEADERS, - REMOTE_TOOLS_HEADERS, - prepare_request_headers, - prepare_error_map, - handle_response_error, - build_list_tools_request, - process_list_tools_response, - build_invoke_mcp_tool_request, - build_resolve_tools_request, - process_resolve_tools_response, - build_invoke_remote_tool_request, - process_invoke_remote_tool_response, -) -from ..._model_base import FoundryTool, ToolSource, UserInfo - -from ..._utils._model_base import ToolsResponse, ToolDescriptorBuilder, ToolConfigurationParser, ResolveToolsRequest -from ..._utils._model_base import to_remote_server, MCPToolsListResponse, MetadataMapper - -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.pipeline import PipelineResponse - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) - -class MCPToolsOperations: - - def __init__(self, *args, **kwargs) -> None: - """Initialize MCP client. - - Parameters - ---------- - client : AsyncPipelineClient - Azure AsyncPipelineClient for HTTP requests - config : AzureAIToolClientConfiguration - Configuration object - """ - input_args = list(args) - self._client : AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - - if self._client is None or self._config is None: - raise ValueError("Both 'client' and 'config' must be provided") - - self._endpoint_path = MCP_ENDPOINT_PATH - self._api_version = API_VERSION - - async def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: - """List MCP tools. - - :return: List of tool descriptors from MCP server. - :rtype: List[FoundryTool] - """ - _request, error_map, remaining_kwargs = build_list_tools_request(self._api_version, kwargs) - - path_format_arguments = {"endpoint": self._config.endpoint} - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **remaining_kwargs) - response = pipeline_response.http_response - - handle_response_error(response, error_map) - return process_list_tools_response(response, self._config.tool_config._named_mcp_tools, existing_names) - - async def invoke_tool( - self, - tool: FoundryTool, - arguments: Mapping[str, Any], - **kwargs: Any - ) -> Any: - """Invoke an MCP tool. - - :param tool: Tool descriptor for the tool to invoke. - :type tool: FoundryTool - :param arguments: Input arguments for the tool. - :type arguments: Mapping[str, Any] - :return: Result of the tool invocation. - :rtype: Any - """ - _request, error_map = build_invoke_mcp_tool_request(self._api_version, tool, arguments) - - path_format_arguments = {"endpoint": self._config.endpoint} - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **kwargs) - response = pipeline_response.http_response - - handle_response_error(response, error_map) - return response.json().get("result") - -class RemoteToolsOperations: - def __init__(self, *args, **kwargs) -> None: - """Initialize Tools API client. - - :param client: Azure PipelineClient for HTTP requests. - :type client: ~azure.core.PipelineClient - :param config: Configuration object. - :type config: ~Tool_Client.models.AzureAIToolClientConfiguration - :raises ValueError: If required parameters are not provided. - """ - input_args = list(args) - self._client : AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - - if self._client is None or self._config is None: - raise ValueError("Both 'client' and 'config' must be provided") - - - # Apply agent name substitution to endpoint paths - self.agent = self._config.agent_name.strip() if self._config.agent_name and self._config.agent_name.strip() else "$default" - self._api_version = API_VERSION - - async def resolve_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: - """Resolve remote tools from Azure AI Tools API. - - :return: List of tool descriptors from Tools API. - :rtype: List[FoundryTool] - """ - result = build_resolve_tools_request(self.agent, self._api_version, self._config.tool_config, self._config.user, kwargs) - if result[0] is None: - return [] - - _request, error_map, remaining_kwargs = result - - path_format_arguments = {"endpoint": self._config.endpoint} - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **remaining_kwargs) - response = pipeline_response.http_response - - handle_response_error(response, error_map) - return process_resolve_tools_response(response, self._config.tool_config._remote_tools, existing_names) - - async def invoke_tool( - self, - tool: FoundryTool, - arguments: Mapping[str, Any], - ) -> Any: - """Invoke a remote tool. - - :param tool: Tool descriptor to invoke. - :type tool: FoundryTool - :param arguments: Input arguments for the tool. - :type arguments: Mapping[str, Any] - :return: Result of the tool invocation. - :rtype: Any - """ - _request, error_map = build_invoke_remote_tool_request(self.agent, self._api_version, tool, self._config.user, arguments) - - path_format_arguments = {"endpoint": self._config.endpoint} - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - pipeline_response: PipelineResponse = await self._client._pipeline.run(_request) - response = pipeline_response.http_response - - handle_response_error(response, error_map) - return process_invoke_remote_tool_response(response) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py deleted file mode 100644 index 0a84ef2e6409..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py +++ /dev/null @@ -1,551 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# mypy: ignore-errors - -import json -from typing import Any, Dict, List, Mapping, MutableMapping, Tuple, Union -from azure.core import PipelineClient -from .._configuration import AzureAIToolClientConfiguration -from .._model_base import FoundryTool, ToolSource, UserInfo - -from .._utils._model_base import ToolsResponse, ToolDescriptorBuilder, ToolConfigurationParser, ResolveToolsRequest -from .._utils._model_base import to_remote_server, MCPToolsListResponse, MetadataMapper -from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse - -from .._exceptions import OAuthConsentRequiredError - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) - - -# Shared constants -API_VERSION = "2025-11-15-preview" -MCP_ENDPOINT_PATH = "/mcp_tools" - -# Tool-specific property key overrides -# Format: {"tool_name": {"tool_def_key": "meta_schema_key"}} -TOOL_PROPERTY_OVERRIDES: Dict[str, Dict[str, str]] = { - "image_generation": { - "model": "imagegen_model_deployment_name" - }, - # Add more tool-specific mappings as needed -} - -# Shared error map -DEFAULT_ERROR_MAP: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, -} - -# Shared header configurations -MCP_HEADERS = { - "Content-Type": "application/json", - "Accept": "application/json,text/event-stream", - "Connection": "keep-alive", - "Cache-Control": "no-cache", -} - -REMOTE_TOOLS_HEADERS = { - "Content-Type": "application/json", - "Accept": "application/json", -} - -# Helper functions for request/response processing -def prepare_request_headers(base_headers: Dict[str, str], custom_headers: Mapping[str, str] = None) -> Dict[str, str]: - """Prepare request headers by merging base and custom headers. - - :param base_headers: Base headers to use - :param custom_headers: Custom headers to merge - :return: Merged headers dictionary - """ - headers = base_headers.copy() - if custom_headers: - headers.update(custom_headers) - return headers - -def prepare_error_map(custom_error_map: Mapping[int, Any] = None) -> MutableMapping: - """Prepare error map by merging default and custom error mappings. - - :param custom_error_map: Custom error mappings to merge - :return: Merged error map - """ - error_map = DEFAULT_ERROR_MAP.copy() - if custom_error_map: - error_map.update(custom_error_map) - return error_map - -def format_and_execute_request( - client: PipelineClient, - request: HttpRequest, - endpoint: str, - **kwargs: Any -) -> HttpResponse: - """Format request URL and execute pipeline. - - :param client: Pipeline client - :param request: HTTP request to execute - :param endpoint: Endpoint URL for formatting - :return: HTTP response - """ - path_format_arguments = {"endpoint": endpoint} - request.url = client.format_url(request.url, **path_format_arguments) - pipeline_response: PipelineResponse = client._pipeline.run(request, **kwargs) - return pipeline_response.http_response - -def handle_response_error(response: HttpResponse, error_map: MutableMapping) -> None: - """Handle HTTP response errors. - - :param response: HTTP response to check - :param error_map: Error map for status code mapping - :raises HttpResponseError: If response status is not 200 - """ - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - -def process_list_tools_response( - response: HttpResponse, - named_mcp_tools: Any, - existing_names: set -) -> List[FoundryTool]: - """Process list_tools response and build descriptors. - - :param response: HTTP response with MCP tools - :param named_mcp_tools: Named MCP tools configuration - :param existing_names: Set of existing tool names - :return: List of tool descriptors - """ - mcp_response = MCPToolsListResponse.from_dict(response.json(), named_mcp_tools) - raw_tools = mcp_response.result.tools - return ToolDescriptorBuilder.build_descriptors( - raw_tools, - ToolSource.MCP_TOOLS, - existing_names, - ) - -def process_resolve_tools_response( - response: HttpResponse, - remote_tools: Any, - existing_names: set -) -> List[FoundryTool]: - """Process resolve_tools response and build descriptors. - - :param response: HTTP response with remote tools - :param remote_tools: Remote tools configuration - :param existing_names: Set of existing tool names - :return: List of tool descriptors - """ - payload = response.json() - response_type = payload.get("type") - result = payload.get("toolResult") - - if response_type == "OAuthConsentRequired": - consent_url = result.get("consentUrl") - message = result.get("message") - if not consent_url: - consent_url = message - raise OAuthConsentRequiredError(message, consent_url=consent_url, payload=payload) - - toolResponse = ToolsResponse.from_dict(payload, remote_tools) - return ToolDescriptorBuilder.build_descriptors( - toolResponse.enriched_tools, - ToolSource.REMOTE_TOOLS, - existing_names, - ) - -def build_list_tools_request( - api_version: str, - kwargs: Dict[str, Any] -) -> Tuple[HttpRequest, MutableMapping, Dict[str, str]]: - """Build request for listing MCP tools. - - :param api_version: API version - :param kwargs: Additional arguments (headers, params, error_map) - :return: Tuple of (request, error_map, params) - """ - error_map = prepare_error_map(kwargs.pop("error_map", None)) - _headers = prepare_request_headers(MCP_HEADERS, kwargs.pop("headers", None)) - _params = kwargs.pop("params", {}) or {} - - _content = prepare_mcptools_list_tools_request_content() - content = json.dumps(_content) - _request = build_mcptools_list_tools_request(api_version=api_version, headers=_headers, params=_params, content=content) - - return _request, error_map, kwargs - -def build_invoke_mcp_tool_request( - api_version: str, - tool: FoundryTool, - arguments: Mapping[str, Any], - **kwargs: Any -) -> Tuple[HttpRequest, MutableMapping]: - """Build request for invoking MCP tool. - - :param api_version: API version - :param tool: Tool descriptor - :param arguments: Tool arguments - :return: Tuple of (request, error_map) - """ - error_map = prepare_error_map() - _headers = prepare_request_headers(MCP_HEADERS) - _params = {} - - _content = prepare_mcptools_invoke_tool_request_content(tool, arguments, TOOL_PROPERTY_OVERRIDES) - - content = json.dumps(_content) - _request = build_mcptools_invoke_tool_request(api_version=api_version, headers=_headers, params=_params, content=content) - - return _request, error_map - -def build_resolve_tools_request( - agent_name: str, - api_version: str, - tool_config: ToolConfigurationParser, - user: UserInfo, - kwargs: Dict[str, Any] -) -> Union[Tuple[HttpRequest, MutableMapping, Dict[str, Any]], Tuple[None, None, None]]: - """Build request for resolving remote tools. - - :param agent_name: Agent name - :param api_version: API version - :param tool_config: Tool configuration - :param user: User info - :param kwargs: Additional arguments - :return: Tuple of (request, error_map, remaining_kwargs) or (None, None, None) - """ - error_map = prepare_error_map(kwargs.pop("error_map", None)) - _headers = prepare_request_headers(REMOTE_TOOLS_HEADERS, kwargs.pop("headers", None)) - _params = kwargs.pop("params", {}) or {} - - _content = prepare_remotetools_resolve_tools_request_content(tool_config, user) - if _content is None: - return None, None, None - - content = json.dumps(_content.to_dict()) - _request = build_remotetools_resolve_tools_request(agent_name, api_version=api_version, headers=_headers, params=_params, content=content) - - return _request, error_map, kwargs - -def build_invoke_remote_tool_request( - agent_name: str, - api_version: str, - tool: FoundryTool, - user: UserInfo, - arguments: Mapping[str, Any] -) -> Tuple[HttpRequest, MutableMapping]: - """Build request for invoking remote tool. - - :param agent_name: Agent name - :param api_version: API version - :param tool: Tool descriptor - :param user: User info - :param arguments: Tool arguments - :return: Tuple of (request, error_map) - """ - error_map = prepare_error_map() - _headers = prepare_request_headers(REMOTE_TOOLS_HEADERS) - _params = {} - - _content = prepare_remotetools_invoke_tool_request_content(tool, user, arguments) - content = json.dumps(_content) - _request = build_remotetools_invoke_tool_request(agent_name, api_version=api_version, headers=_headers, params=_params, content=content) - - return _request, error_map - -def process_invoke_remote_tool_response(response: HttpResponse) -> Any: - """Process remote tool invocation response. - - :param response: HTTP response - :return: Tool result - :raises OAuthConsentRequiredError: If OAuth consent is required - """ - payload = response.json() - response_type = payload.get("type") - result = payload.get("toolResult") - - if response_type == "OAuthConsentRequired": - raise OAuthConsentRequiredError(result.get("message"), consent_url=result.get("consentUrl"), payload=payload) - return result - -class MCPToolsOperations: - - def __init__(self, *args, **kwargs) -> None: - """Initialize MCP client. - - Parameters - ---------- - client : PipelineClient - Azure PipelineClient for HTTP requests - config : AzureAIToolClientConfiguration - Configuration object - """ - input_args = list(args) - self._client : PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - - if self._client is None or self._config is None: - raise ValueError("Both 'client' and 'config' must be provided") - - self._endpoint_path = MCP_ENDPOINT_PATH - self._api_version = API_VERSION - - def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: - """List MCP tools. - - :return: List of tool descriptors from MCP server. - :rtype: List[FoundryTool] - """ - _request, error_map, remaining_kwargs = build_list_tools_request(self._api_version, kwargs) - response = format_and_execute_request(self._client, _request, self._config.endpoint, **remaining_kwargs) - handle_response_error(response, error_map) - return process_list_tools_response(response, self._config.tool_config._named_mcp_tools, existing_names) - - def invoke_tool( - self, - tool: FoundryTool, - arguments: Mapping[str, Any], - ) -> Any: - """Invoke an MCP tool. - - :param tool: Tool descriptor for the tool to invoke. - :type tool: FoundryTool - :param arguments: Input arguments for the tool. - :type arguments: Mapping[str, Any] - :return: Result of the tool invocation. - :rtype: Any - """ - _request, error_map = build_invoke_mcp_tool_request(self._api_version, tool, arguments) - response = format_and_execute_request(self._client, _request, self._config.endpoint) - handle_response_error(response, error_map) - return response.json().get("result") - -def prepare_mcptools_list_tools_request_content() -> Any: - return { - "jsonrpc": "2.0", - "id": 1, - "method": "tools/list", - "params": {} - } - -def build_mcptools_list_tools_request( - api_version: str, - headers: Mapping[str, str] = None, - params: Mapping[str, str] = None, - **kwargs: Any - ) -> HttpRequest: - """Build the HTTP request for listing MCP tools. - - :param api_version: API version to use. - :type api_version: str - :param headers: Additional headers for the request. - :type headers: Mapping[str, str], optional - :param params: Query parameters for the request. - :type params: Mapping[str, str], optional - :return: Constructed HttpRequest object. - :rtype: ~azure.core.rest.HttpRequest - """ - _headers = headers or {} - _params = params or {} - _params["api-version"] = api_version - - _url = f"/mcp_tools" - return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) - -def prepare_mcptools_invoke_tool_request_content(tool: FoundryTool, arguments: Mapping[str, Any], tool_overrides: Dict[str, Dict[str, str]]) -> Any: - - params = { - "name": tool.name, - "arguments": dict(arguments), - } - - if tool.tool_definition: - - key_overrides = tool_overrides.get(tool.name, {}) - meta_config = MetadataMapper.prepare_metadata_dict( - tool.metadata, - tool.tool_definition.__dict__ if hasattr(tool.tool_definition, '__dict__') else tool.tool_definition, - key_overrides - ) - if meta_config: - params["_meta"] = meta_config - - payload = { - "jsonrpc": "2.0", - "id": 2, - "method": "tools/call", - "params": params - } - return payload - -def build_mcptools_invoke_tool_request( - api_version: str, - headers: Mapping[str, str] = None, - params: Mapping[str, str] = None, - **kwargs: Any -) -> HttpRequest: - """Build the HTTP request for invoking an MCP tool. - - :param api_version: API version to use. - :type api_version: str - :param headers: Additional headers for the request. - :type headers: Mapping[str, str], optional - :param params: Query parameters for the request. - :type params: Mapping[str, str], optional - :return: Constructed HttpRequest object. - :rtype: ~azure.core.rest.HttpRequest - """ - _headers = headers or {} - _params = params or {} - _params["api-version"] = api_version - - _url = f"/mcp_tools" - return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) - -class RemoteToolsOperations: - def __init__(self, *args, **kwargs) -> None: - """Initialize Tools API client. - - :param client: Azure PipelineClient for HTTP requests. - :type client: ~azure.core.PipelineClient - :param config: Configuration object. - :type config: ~Tool_Client.models.AzureAIToolClientConfiguration - :raises ValueError: If required parameters are not provided. - """ - input_args = list(args) - self._client : PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - - if self._client is None or self._config is None: - raise ValueError("Both 'client' and 'config' must be provided") - - - # Apply agent name substitution to endpoint paths - self.agent = self._config.agent_name.strip() if self._config.agent_name and self._config.agent_name.strip() else "$default" - self._api_version = API_VERSION - - def resolve_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: - """Resolve remote tools from Azure AI Tools API. - - :return: List of tool descriptors from Tools API. - :rtype: List[FoundryTool] - """ - result = build_resolve_tools_request(self.agent, self._api_version, self._config.tool_config, self._config.user, kwargs) - if result[0] is None: - return [] - - _request, error_map, remaining_kwargs = result - response = format_and_execute_request(self._client, _request, self._config.endpoint, **remaining_kwargs) - handle_response_error(response, error_map) - return process_resolve_tools_response(response, self._config.tool_config._remote_tools, existing_names) - - def invoke_tool( - self, - tool: FoundryTool, - arguments: Mapping[str, Any], - ) -> Any: - """Invoke a remote tool. - - :param tool: Tool descriptor to invoke. - :type tool: FoundryTool - :param arguments: Input arguments for the tool. - :type arguments: Mapping[str, Any] - :return: Result of the tool invocation. - :rtype: Any - """ - _request, error_map = build_invoke_remote_tool_request(self.agent, self._api_version, tool, self._config.user, arguments) - response = format_and_execute_request(self._client, _request, self._config.endpoint) - handle_response_error(response, error_map) - return process_invoke_remote_tool_response(response) - -def prepare_remotetools_invoke_tool_request_content(tool: FoundryTool, user: UserInfo, arguments: Mapping[str, Any]) -> Any: - payload = { - "toolName": tool.name, - "arguments": dict(arguments), - "remoteServer": to_remote_server(tool.tool_definition).to_dict(), - } - if user: - # Handle both UserInfo objects and dictionaries - if isinstance(user, dict): - if user.get("objectId") and user.get("tenantId"): - payload["user"] = { - "objectId": user["objectId"], - "tenantId": user["tenantId"], - } - elif hasattr(user, "objectId") and hasattr(user, "tenantId"): - if user.objectId and user.tenantId: - payload["user"] = { - "objectId": user.objectId, - "tenantId": user.tenantId, - } - return payload - -def build_remotetools_invoke_tool_request( - agent_name: str, - api_version: str, - headers: Mapping[str, str] = None, - params: Mapping[str, str] = None, - **kwargs: Any - ) -> HttpRequest: - """Build the HTTP request for invoking a remote tool. - - :param api_version: API version to use. - :type api_version: str - :param headers: Additional headers for the request. - :type headers: Mapping[str, str], optional - :param params: Query parameters for the request. - :type params: Mapping[str, str], optional - :return: Constructed HttpRequest object. - :rtype: ~azure.core.rest.HttpRequest - """ - _headers = headers or {} - _params = params or {} - _params["api-version"] = api_version - - _url = f"/agents/{agent_name}/tools/invoke" - return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) - - -def prepare_remotetools_resolve_tools_request_content(tool_config: ToolConfigurationParser, user: UserInfo = None) -> ResolveToolsRequest: - resolve_tools_request: ResolveToolsRequest = None - if tool_config._remote_tools: - remote_servers = [] - for remote_tool in tool_config._remote_tools: - remote_servers.append(to_remote_server(remote_tool)) - resolve_tools_request = ResolveToolsRequest(remote_servers, user=user) - - return resolve_tools_request - -def build_remotetools_resolve_tools_request( - agent_name: str, - api_version: str, - headers: Mapping[str, str] = None, - params: Mapping[str, str] = None, - **kwargs: Any - ) -> HttpRequest: - """Build the HTTP request for resolving remote tools. - - :param api_version: API version to use. - :type api_version: str - :param headers: Additional headers for the request. - :type headers: Mapping[str, str], optional - :param params: Query parameters for the request. - :type params: Mapping[str, str], optional - :return: Constructed HttpRequest object. - :rtype: ~azure.core.rest.HttpRequest - """ - _headers = headers or {} - _params = params or {} - _params["api-version"] = api_version - - _url = f"/agents/{agent_name}/tools/resolve" - return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py index 319e02da7e98..e15ccd86f9cc 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py @@ -43,6 +43,9 @@ def get_dimensions(): def get_project_endpoint(): + project_endpoint = os.environ.get(Constants.AZURE_AI_PROJECT_ENDPOINT) + if project_endpoint: + return project_endpoint project_resource_id = os.environ.get(Constants.AGENT_PROJECT_RESOURCE_ID) if project_resource_id: last_part = project_resource_id.split("/")[-1] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_context.py new file mode 100644 index 000000000000..f86d1ae0d4ac --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_context.py @@ -0,0 +1,32 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import AsyncContextManager, ClassVar, Optional + +from azure.ai.agentserver.core.tools import FoundryToolRuntime + + +class AgentServerContext(AsyncContextManager["AgentServerContext"]): + _INSTANCE: ClassVar[Optional["AgentServerContext"]] = None + + def __init__(self, tool_runtime: FoundryToolRuntime): + self._tool_runtime = tool_runtime + + self.__class__._INSTANCE = self + + @classmethod + def get(cls) -> "AgentServerContext": + if cls._INSTANCE is None: + raise ValueError("AgentServerContext has not been initialized.") + return cls._INSTANCE + + @property + def tools(self) -> FoundryToolRuntime: + return self._tool_runtime + + async def __aenter__(self) -> "AgentServerContext": + await self._tool_runtime.__aenter__() + return self + + async def __aexit__(self, exc_type, exc_value, traceback) -> None: + await self._tool_runtime.__aexit__(exc_type, exc_value, traceback) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index cf85b2fcea07..a5f69664cf66 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -12,6 +12,8 @@ from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette @@ -25,17 +27,19 @@ from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential +from ._context import AgentServerContext from ..models import projects as project_models from ..constants import Constants -from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, request_context +from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, get_project_endpoint, request_context from ..models import ( Response as OpenAIResponse, ResponseStreamEvent, ) from .common.agent_run_context import AgentRunContext -from ..client.tools.aio._client import AzureAIToolClient -from ..client.tools._utils._model_base import ToolDefinition, UserInfo +from ..tools import DefaultFoundryToolRuntime, FoundryTool, FoundryToolClient, FoundryToolRuntime, UserInfo, \ + UserInfoContextMiddleware +from ..utils._credential import AsyncTokenCredentialAdapter logger = get_logger() DEBUG_ERRORS = os.environ.get(Constants.AGENT_DEBUG_ERRORS, "false").lower() == "true" @@ -47,18 +51,15 @@ def __init__(self, app: ASGIApp, agent: Optional['FoundryCBAgent'] = None): self.agent = agent async def dispatch(self, request: Request, call_next): - user_info: Optional[UserInfo] = None if request.url.path in ("/runs", "/responses"): try: - user_info = self.set_user_info_to_context_var(request) self.set_request_id_to_context_var(request) payload = await request.json() except Exception as e: logger.error(f"Invalid JSON payload: {e}") return JSONResponse({"error": f"Invalid JSON payload: {e}"}, status_code=400) try: - agent_tools = self.agent.tools if self.agent else [] - request.state.agent_run_context = AgentRunContext(payload, user_info=user_info, agent_tools=agent_tools) + request.state.agent_run_context = AgentRunContext(payload) self.set_run_context_to_context_var(request.state.agent_run_context) except Exception as e: logger.error(f"Context build failed: {e}.", exc_info=True) @@ -93,37 +94,16 @@ def set_run_context_to_context_var(self, run_context): ctx.update(res) request_context.set(ctx) - def set_user_info_to_context_var(self, request) -> Optional[UserInfo]: - user_info: Optional[UserInfo] = None - try: - object_id_header = request.headers.get("x-aml-oid", None) - tenant_id_header = request.headers.get("x-aml-tid", None) - if not object_id_header and not tenant_id_header: - return None - user_info = UserInfo( - objectId=object_id_header, - tenantId=tenant_id_header - ) - - except Exception as e: - logger.error(f"Failed to parse X-User-Info header: {e}", exc_info=True) - if user_info: - ctx = request_context.get() or {} - for key, value in user_info.to_dict().items(): - if key == "objectId": - continue # skip user objectId - ctx[f"azure.ai.agentserver.user.{key}"] = str(value) - request_context.set(ctx) - return user_info - class FoundryCBAgent: - _cached_tools_endpoint: Optional[str] = None - _cached_agent_name: Optional[str] = None - - def __init__(self, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any) -> None: - self.credentials = credentials or AsyncDefaultTokenCredential() - self.tools = kwargs.get("tools", []) + def __init__(self, + credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, + project_endpoint: Optional[str] = None) -> None: + self.credentials = AsyncTokenCredentialAdapter(credentials) if credentials else AsyncDefaultTokenCredential() + project_endpoint = get_project_endpoint() or project_endpoint + if not project_endpoint: + raise ValueError("Project endpoint is required.") + AgentServerContext(DefaultFoundryToolRuntime(project_endpoint, self.credentials)) async def runs_endpoint(request): # Set up tracing context and span @@ -202,6 +182,7 @@ async def readiness_endpoint(request): ] self.app = Starlette(routes=routes) + UserInfoContextMiddleware.install(self.app) self.app.add_middleware( CORSMiddleware, allow_origins=["*"], @@ -424,91 +405,17 @@ def setup_otlp_exporter(self, endpoint, provider): provider.add_span_processor(processor) logger.info(f"Tracing setup with OTLP exporter: {endpoint}") - @staticmethod - def _configure_endpoint() -> tuple[str, Optional[str]]: - """Configure and return the tools endpoint and agent name from environment variables. - - :return: A tuple of (tools_endpoint, agent_name). - :rtype: tuple[str, Optional[str]] - """ - if not FoundryCBAgent._cached_tools_endpoint: - project_endpoint_format: str = "https://{account_name}.services.ai.azure.com/api/projects/{project_name}" - workspace_endpoint = os.getenv(Constants.AZURE_AI_WORKSPACE_ENDPOINT) - tools_endpoint = os.getenv(Constants.AZURE_AI_TOOLS_ENDPOINT) - project_endpoint = os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT) - - if not tools_endpoint: - # project endpoint corrupted could have been an overridden environment variable - # try to reconstruct tools endpoint from workspace endpoint - # Robustly reconstruct project_endpoint from workspace_endpoint if needed. - - if workspace_endpoint: - # Expected format: - # "https://.api.azureml.ms/subscriptions//resourceGroups// - # providers/Microsoft.MachineLearningServices/workspaces/@@AML" - from urllib.parse import urlparse - parsed_url = urlparse(workspace_endpoint) - path_parts = [p for p in parsed_url.path.split('/') if p] - # Find the 'workspaces' part and extract account_name@project_name@AML - try: - workspaces_idx = path_parts.index("workspaces") - if workspaces_idx + 1 >= len(path_parts): - raise ValueError( - f"Workspace endpoint path does not contain workspace info " - f"after 'workspaces': {workspace_endpoint}" - ) - workspace_info = path_parts[workspaces_idx + 1] - workspace_parts = workspace_info.split('@') - if len(workspace_parts) < 2: - raise ValueError( - f"Workspace info '{workspace_info}' does not contain both account_name " - f"and project_name separated by '@'." - ) - account_name = workspace_parts[0] - project_name = workspace_parts[1] - # Documented expected format for PROJECT_ENDPOINT_FORMAT: - # "https://.api.azureml.ms/api/projects/{project_name}" - project_endpoint = project_endpoint_format.format( - account_name=account_name, project_name=project_name - ) - except (ValueError, IndexError) as e: - raise ValueError( - f"Failed to reconstruct project endpoint from workspace endpoint " - f"'{workspace_endpoint}': {e}" - ) from e - # should never reach here - logger.info("Reconstructed tools endpoint from project endpoint %s", project_endpoint) - tools_endpoint = project_endpoint - - tools_endpoint = project_endpoint - - if not tools_endpoint: - raise ValueError( - "Project endpoint needed for Azure AI tools endpoint is not found. " - ) - FoundryCBAgent._cached_tools_endpoint = tools_endpoint - - agent_name = os.getenv(Constants.AGENT_NAME) - if agent_name is None: - if os.getenv("CONTAINER_APP_NAME"): - raise ValueError( - "Agent name needed for Azure AI hosted agents is not found. " - ) - agent_name = "$default" - FoundryCBAgent._cached_agent_name = agent_name - - return FoundryCBAgent._cached_tools_endpoint, FoundryCBAgent._cached_agent_name - def get_tool_client( - self, tools: Optional[list[ToolDefinition]], user_info: Optional[UserInfo] - ) -> AzureAIToolClient: + self, tools: Optional[list[FoundryTool]], user_info: Optional[UserInfo] + ) -> FoundryToolClient: + # TODO: remove this method logger.debug("Creating AzureAIToolClient with tools: %s", tools) if not self.credentials: raise ValueError("Credentials are required to create Tool Client.") tools_endpoint, agent_name = self._configure_endpoint() - return AzureAIToolClient( + return FoundryToolClient( endpoint=tools_endpoint, credential=self.credentials, tools=tools, diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 5289df0b3524..53eb15af3550 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -7,7 +7,8 @@ from ...models.projects import AgentId, AgentReference, ResponseConversation1 from .id_generator.foundry_id_generator import FoundryIdGenerator from .id_generator.id_generator import IdGenerator -from ...client.tools._model_base import UserInfo +from ...tools import UserInfo + logger = get_logger() @@ -65,12 +66,14 @@ def get_conversation_object(self) -> ResponseConversation1: def get_tools(self) -> list: # request tools take precedence over agent tools + # TODO: remove this method request_tools = self.request.get("tools", []) if not request_tools: return self._agent_tools return request_tools def get_user_info(self) -> UserInfo: + # TODO: remove this method return self._user_info diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py new file mode 100644 index 000000000000..f158cd370990 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py @@ -0,0 +1,17 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) + +from .client._client import FoundryToolClient +from ._exceptions import * +from .client._models import FoundryConnectedTool, FoundryHostedMcpTool, FoundryTool, FoundryToolProtocol, \ + FoundryToolSource, ResolvedFoundryTool, SchemaDefinition, SchemaProperty, SchemaType, UserInfo +from .runtime._catalog import * +from .runtime._facade import * +from .runtime._invoker import * +from .runtime._resolver import * +from .runtime._runtime import * +from .runtime._starlette import * +from .runtime._user import * \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py new file mode 100644 index 000000000000..b91c1f71c7a3 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py @@ -0,0 +1,76 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from .client._models import FoundryTool, ResolvedFoundryTool + + +class ToolInvocationError(RuntimeError): + """Raised when a tool invocation fails. + + :ivar ResolvedFoundryTool tool: The tool that failed during invocation. + + :param str message: Human-readable message describing the error. + :param ResolvedFoundryTool tool: The tool that failed during invocation. + + This exception is raised when an error occurs during the invocation of a tool, + providing details about the failure. + """ + + def __init__(self, message: str, tool: ResolvedFoundryTool): + super().__init__(message) + self.tool = tool + + +class OAuthConsentRequiredError(RuntimeError): + """Raised when the service requires end-user OAuth consent. + + This exception is raised when a tool or service operation requires explicit + OAuth consent from the end user before the operation can proceed. + + :ivar str message: Human-readable guidance returned by the service. + :ivar str consent_url: Link that the end user must visit to provide consent. + :ivar str project_connection_id: The project connection ID related to the consent request. + + :param str message: Human-readable guidance returned by the service. + :param str consent_url: Link that the end user must visit to provide the required consent. + :param str project_connection_id: The project connection ID related to the consent request. + """ + + def __init__(self, message: str, consent_url: str, project_connection_id: str): + super().__init__(message) + self.message = message + self.consent_url = consent_url + self.project_connection_id = project_connection_id + + +class UnableToResolveToolInvocationError(RuntimeError): + """Raised when a tool cannot be resolved. + + :ivar str message: Human-readable message describing the error. + :ivar FoundryTool tool: The tool that could not be resolved. + + :param str message: Human-readable message describing the error. + :param FoundryTool tool: The tool that could not be resolved. + + This exception is raised when a tool cannot be found or resolved + from the available tool sources. + """ + + def __init__(self, message: str, tool: FoundryTool): + super().__init__(message) + self.tool = tool + + +class InvalidToolFacadeError(RuntimeError): + """Raised when a tool facade is invalid. + + This exception is raised when a tool facade does not conform + to the expected structure or contains invalid data. + """ + pass + diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/__init__.py similarity index 73% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/__init__.py index fdf8caba9ef5..28077537d94b 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/__init__.py @@ -2,4 +2,4 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -__path__ = __import__("pkgutil").extend_path(__path__, __name__) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py new file mode 100644 index 000000000000..cbd0dbba6aa6 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py @@ -0,0 +1,174 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +import asyncio +import itertools +from collections import defaultdict +from typing import Any, AsyncContextManager, AsyncIterable, Awaitable, Callable, Collection, Coroutine, DefaultDict, Dict, \ + Iterable, List, \ + Mapping, Optional, \ + Tuple + +from azure.core import AsyncPipelineClient +from azure.core.credentials_async import AsyncTokenCredential +from azure.core.tracing.decorator_async import distributed_trace_async + +from ._configuration import FoundryToolClientConfiguration +from ._models import FoundryTool, FoundryToolDetails, FoundryToolSource, ResolvedFoundryTool, UserInfo +from .operations._foundry_connected_tools import FoundryConnectedToolsOperations +from .operations._foundry_hosted_mcp_tools import FoundryMcpToolsOperations +from .._exceptions import ToolInvocationError + + +class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): + """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. + + This client provides access to tools from both MCP (Model Context Protocol) servers + and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. + + :param str endpoint: + The fully qualified endpoint for the Azure AI Agents service. + Example: "https://.api.azureml.ms" + :param credential: + Credential for authenticating requests to the service. + Use credentials from azure-identity like DefaultAzureCredential. + :type credential: ~azure.core.credentials.TokenCredential + """ + + def __init__(self, endpoint: str, credential: "AsyncTokenCredential"): + """Initialize the asynchronous Azure AI Tool Client. + + :param endpoint: The service endpoint URL. + :type endpoint: str + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + """ + # noinspection PyTypeChecker + config = FoundryToolClientConfiguration(credential) + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=endpoint, config=config) + + self._hosted_mcp_tools = FoundryMcpToolsOperations(self._client) + self._connected_tools = FoundryConnectedToolsOperations(self._client) + + @distributed_trace_async + async def list_tools(self, + tools: Collection[FoundryTool], + agent_name, + user: Optional[UserInfo] = None) -> List[ResolvedFoundryTool]: + """List all available tools from configured sources. + + Retrieves tools from both MCP servers and Azure AI Tools API endpoints, + returning them as ResolvedFoundryTool instances ready for invocation. + :param tools: Collection of FoundryTool instances to resolve. + :type tools: Collection[~FoundryTool] + :param user: Information about the user requesting the tools. + :type user: Optional[UserInfo] + :param agent_name: Name of the agent requesting the tools. + :type agent_name: str + :return: List of resolved Foundry tools. + :rtype: List[ResolvedFoundryTool] + :raises ~azure.ai.agentserver.core.tools._exceptions.OAuthConsentRequiredError: + Raised when the service requires user OAuth consent. + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + """ + resolved_tools: List[ResolvedFoundryTool] = [] + results = await self._list_tools_details_internal(tools, agent_name, user) + for definition, details in results: + resolved_tools.append(ResolvedFoundryTool(definition=definition, details=details)) + return resolved_tools + + @distributed_trace_async + async def list_tools_details(self, + tools: Collection[FoundryTool], + agent_name, + user: Optional[UserInfo] = None) -> Mapping[str, List[FoundryToolDetails]]: + """List all available tools from configured sources. + + Retrieves tools from both MCP servers and Azure AI Tools API endpoints, + returning them as ResolvedFoundryTool instances ready for invocation. + :param tools: Collection of FoundryTool instances to resolve. + :type tools: Collection[~FoundryTool] + :param user: Information about the user requesting the tools. + :type user: Optional[UserInfo] + :param agent_name: Name of the agent requesting the tools. + :type agent_name: str + :return: Mapping of tool IDs to lists of FoundryToolDetails. + :rtype: Mapping[str, List[FoundryToolDetails]] + :raises ~azure.ai.agentserver.core.tools._exceptions.OAuthConsentRequiredError: + Raised when the service requires user OAuth consent. + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + """ + resolved_tools: Dict[str, List[FoundryToolDetails]] = defaultdict(list) + results = await self._list_tools_details_internal(tools, agent_name, user) + for definition, details in results: + resolved_tools[definition.id].append(details) + return resolved_tools + + async def _list_tools_details_internal( + self, + tools: Collection[FoundryTool], + agent_name, + user: Optional[UserInfo] = None, + ) -> Iterable[Tuple[FoundryTool, FoundryToolDetails]]: + tools_by_source: DefaultDict[FoundryToolSource, List[FoundryTool]] = defaultdict(list) + for t in tools: + tools_by_source[t.source].append(t) + + listing_tools = [] + if FoundryToolSource.HOSTED_MCP in tools_by_source: + # noinspection PyTypeChecker + listing_tools.append(asyncio.create_task( + self._hosted_mcp_tools.list_tools(tools_by_source[FoundryToolSource.HOSTED_MCP]) + )) + if FoundryToolSource.CONNECTED in tools_by_source: + # noinspection PyTypeChecker + listing_tools.append(asyncio.create_task( + self._connected_tools.list_tools(tools_by_source[FoundryToolSource.CONNECTED], user, agent_name) + )) + iters = await asyncio.gather(*listing_tools) + return itertools.chain.from_iterable(iters) + + @distributed_trace_async + async def invoke_tool(self, + tool: ResolvedFoundryTool, + arguments: Dict[str, Any], + agent_name: str, + user: Optional[UserInfo] = None) -> Any: + """Invoke a tool by instance, name, or descriptor. + + :param tool: Tool to invoke, specified as an AzureAITool instance, + tool name string, or FoundryTool. + :type tool: ResolvedFoundryTool + :param arguments: Arguments to pass to the tool. + :type arguments: Dict[str, Any] + :param user: Information about the user invoking the tool. + :type user: Optional[UserInfo] + :param agent_name: Name of the agent invoking the tool. + :type agent_name: str + :return: The result of invoking the tool. + :rtype: Any + :raises ~Tool_Client.exceptions.OAuthConsentRequiredError: + Raised when the service requires user OAuth consent. + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + :raises ~ToolInvocationError: + Raised when the tool invocation fails or source is not supported. + """ + if tool.source is FoundryToolSource.HOSTED_MCP: + return await self._hosted_mcp_tools.invoke_tool(tool, arguments) + if tool.source is FoundryToolSource.CONNECTED: + return await self._connected_tools.invoke_tool(tool, arguments, user, agent_name) + raise ToolInvocationError(f"Unsupported tool source: {tool.source}", tool=tool) + + async def close(self) -> None: + """Close the underlying HTTP pipeline.""" + await self._client.close() + + async def __aenter__(self) -> "FoundryToolClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py new file mode 100644 index 000000000000..5c3f19a61d55 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py @@ -0,0 +1,35 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from azure.core.configuration import Configuration +from azure.core.credentials_async import AsyncTokenCredential +from azure.core.pipeline import policies + +from ...application._package_metadata import get_current_app + + +class FoundryToolClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes + """Configuration for Azure AI Tool Client. + + Manages authentication, endpoint configuration, and policy settings for the + Azure AI Tool Client. This class is used internally by the client and should + not typically be instantiated directly. + + :param credential: + Azure TokenCredential for authentication. + :type credential: ~azure.core.credentials.TokenCredential + """ + + def __init__(self, credential: "AsyncTokenCredential"): + super().__init__() + + self.retry_policy = policies.AsyncRetryPolicy() + self.logging_policy = policies.NetworkTraceLoggingPolicy() + self.request_id_policy = policies.RequestIdPolicy() + self.http_logging_policy = policies.HttpLoggingPolicy() + self.user_agent_policy = policies.UserAgentPolicy( + base_user_agent=get_current_app().as_user_agent("FoundryToolClient")) + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( + credential, "https://ai.azure.com/.default" + ) + self.redirect_policy = policies.AsyncRedirectPolicy() diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py new file mode 100644 index 000000000000..8664e23c7c8b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py @@ -0,0 +1,552 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +import asyncio +import inspect +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from enum import Enum +from typing import Annotated, Any, Awaitable, Callable, ClassVar, Dict, Iterable, List, Literal, Mapping, Optional, Set, Type, Union + +from azure.core import CaseInsensitiveEnumMeta +from pydantic import AliasChoices, AliasPath, BaseModel, Discriminator, Field, ModelWrapValidatorHandler, Tag, \ + TypeAdapter, model_validator + +from .._exceptions import OAuthConsentRequiredError + + +class FoundryToolSource(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Identifies the origin of a tool. + + Specifies whether a tool comes from an MCP (Model Context Protocol) server + or from the Azure AI Tools API (remote tools). + """ + + HOSTED_MCP = "hosted_mcp" + CONNECTED = "connected" + + +class FoundryToolProtocol(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Identifies the protocol used by a connected tool.""" + + MCP = "mcp" + A2A = "a2a" + + +@dataclass(frozen=True, kw_only=True, eq=False) +class FoundryTool(ABC): + """Definition of a foundry tool including its parameters.""" + source: FoundryToolSource = field(init=False) + + @property + @abstractmethod + def id(self) -> str: + """Unique identifier for the tool.""" + raise NotImplementedError + + def __str__(self): + return self.id + + +@dataclass(frozen=True, kw_only=True, eq=False) +class FoundryHostedMcpTool(FoundryTool): + """Foundry MCP tool definition. + + :ivar str name: Name of MCP tool. + :ivar Mapping[str, Any] configuration: Tools configuration. + """ + source: Literal[FoundryToolSource.HOSTED_MCP] = field(init=False, default=FoundryToolSource.HOSTED_MCP) + name: str + configuration: Optional[Mapping[str, Any]] = None + + @property + def id(self) -> str: + """Unique identifier for the tool.""" + return f"{self.source}:{self.name}" + + +@dataclass(frozen=True, kw_only=True, eq=False) +class FoundryConnectedTool(FoundryTool): + """Foundry connected tool definition. + + :ivar str project_connection_id: connection name of foundry tool. + """ + source: Literal[FoundryToolSource.CONNECTED] = field(init=False, default=FoundryToolSource.CONNECTED) + protocol: str + project_connection_id: str + + @property + def id(self) -> str: + return f"{self.source}:{self.protocol}:{self.project_connection_id}" + + +@dataclass(frozen=True) +class FoundryToolDetails: + """Details about a Foundry tool. + + :ivar str name: Name of the tool. + :ivar str description: Description of the tool. + :ivar SchemaDefinition input_schema: Input schema for the tool parameters. + :ivar Optional[SchemaDefinition] metadata: Optional metadata schema for the tool. + """ + name: str + description: str + input_schema: "SchemaDefinition" + metadata: Optional["SchemaDefinition"] = None + + +@dataclass(frozen=True) +class ResolvedFoundryTool: + """Resolved Foundry tool with definition and details. + + :ivar ToolDefinition definition: + Optional tool definition object, or None. + :ivar FoundryToolDetails details: + Details about the tool, including name, description, and input schema. + """ + + definition: FoundryTool + details: FoundryToolDetails + invoker: Optional[Callable[..., Awaitable[Any]]] = None # TODO: deprecated + + @property + def id(self) -> str: + return f"{self.definition.id}:{self.details.name}" + + @property + def source(self) -> FoundryToolSource: + """Origin of the tool.""" + return self.definition.source + + @property + def name(self) -> str: + """Name of the tool.""" + return self.details.name + + @property + def description(self) -> str: + """Description of the tool.""" + return self.details.description + + @property + def input_schema(self) -> "SchemaDefinition": + """Input schema of the tool.""" + return self.details.input_schema + + @property + def metadata(self) -> Optional["SchemaDefinition"]: + """Metadata schema of the tool, if any.""" + return self.details.metadata + + +@dataclass(frozen=True) +class UserInfo: + """Represents user information. + + :ivar str object_id: User's object identifier. + :ivar str tenant_id: Tenant identifier. + """ + + object_id: str + tenant_id: str + + +class SchemaType(str, Enum): + """ + Enumeration of possible schema types. + + :ivar py_type: The corresponding Python runtime type for this schema type + (e.g., ``SchemaType.STRING.py_type is str``). + """ + + py_type: Type[Any] + """The corresponding Python runtime type for this schema type.""" + + STRING = ("string", str) + """Schema type for string values (maps to ``str``).""" + + NUMBER = ("number", float) + """Schema type for numeric values with decimals (maps to ``float``).""" + + INTEGER = ("integer", int) + """Schema type for integer values (maps to ``int``).""" + + BOOLEAN = ("boolean", bool) + """Schema type for boolean values (maps to ``bool``).""" + + ARRAY = ("array", list) + """Schema type for array values (maps to ``list``).""" + + OBJECT = ("object", dict) + """Schema type for object/dictionary values (maps to ``dict``).""" + + def __new__(cls, value: str, py_type: Type[Any]): + """ + Create an enum member whose value is the schema type string, while also + attaching the mapped Python type. + + :param value: The serialized schema type string (e.g. ``"string"``). + :param py_type: The mapped Python runtime type (e.g. ``str``). + """ + obj = str.__new__(cls, value) + obj._value_ = value + obj.py_type = py_type + return obj + + @classmethod + def from_python_type(cls, t: Type[Any]) -> "SchemaType": + """ + Get the matching :class:`SchemaType` for a given Python runtime type. + + :param t: A Python runtime type (e.g. ``str``, ``int``, ``float``). + :returns: The corresponding :class:`SchemaType`. + :raises ValueError: If ``t`` is not supported by this enumeration. + """ + for member in cls: + if member.py_type is t: + return member + raise ValueError(f"Unsupported python type: {t!r}") + + +class SchemaProperty(BaseModel): + """ + A JSON Schema-like description of a single property (field) or nested schema node. + + This model is intended to be recursively nestable via :attr:`items` (for arrays) + and :attr:`properties` (for objects). + + :ivar type: The schema node type (e.g., ``string``, ``object``, ``array``). + :ivar description: Optional human-readable description of the property. + :ivar items: The item schema for an ``array`` type. Typically set when + :attr:`type` is :data:`~SchemaType.ARRAY`. + :ivar properties: Nested properties for an ``object`` type. Typically set when + :attr:`type` is :data:`~SchemaType.OBJECT`. Keys are property names, values + are their respective schemas. + :ivar default: Optional default value for the property. + :ivar required: For an ``object`` schema node, the set of required property + names within :attr:`properties`. (This mirrors JSON Schema’s ``required`` + keyword; it is *not* “this property is required in a parent object”.) + """ + + type: SchemaType + description: Optional[str] = None + items: Optional["SchemaProperty"] = None + properties: Optional[Mapping[str, "SchemaProperty"]] = None + default: Any = None + required: Optional[Set[str]] = None + + def has_default(self) -> bool: + """ + Check if the property has a default value defined. + + :return: True if a default value is set, False otherwise. + :rtype: bool + """ + return "default" in self.model_fields_set + + +class SchemaDefinition(BaseModel): + """ + A top-level JSON Schema-like definition for an object. + + :ivar type: The schema type of the root. Typically :data:`~SchemaType.OBJECT`. + :ivar properties: Mapping of top-level property names to their schemas. + :ivar required: Set of required top-level property names within + :attr:`properties`. + """ + + type: SchemaType = SchemaType.OBJECT + properties: Mapping[str, SchemaProperty] + required: Optional[Set[str]] = None + + def extract_from(self, + datasource: Mapping[str, Any], + property_alias: Optional[Dict[str, List[str]]] = None) -> Dict[str, Any]: + return self._extract(datasource, self.properties, self.required, property_alias) + + @classmethod + def _extract(cls, + datasource: Mapping[str, Any], + properties: Mapping[str, SchemaProperty], + required: Optional[Set[str]] = None, + property_alias: Optional[Dict[str, List[str]]] = None) -> Dict[str, Any]: + result: Dict[str, Any] = {} + + for property_name, schema in properties.items(): + # Determine the keys to look for in the datasource + keys_to_check = [property_name] + if property_alias and property_name in property_alias: + keys_to_check.extend(property_alias[property_name]) + + # Find the first matching key in the datasource + value_found = False + for key in keys_to_check: + if key in datasource: + value = datasource[key] + value_found = True + break + + if not value_found and schema.has_default(): + value = schema.default + value_found = True + + if not value_found: + # If the property is required but not found, raise an error + if required and property_name in required: + raise KeyError(f"Required property '{property_name}' not found in datasource.") + # If not found and not required, skip to next property + continue + + # Process the value based on its schema type + if schema.type == SchemaType.OBJECT and schema.properties: + if isinstance(value, Mapping): + nested_value = cls._extract( + value, + schema.properties, + schema.required, + property_alias + ) + result[property_name] = nested_value + elif schema.type == SchemaType.ARRAY and schema.items: + if isinstance(value, Iterable): + nested_list = [] + for item in value: + if schema.items.type == SchemaType.OBJECT and schema.items.properties: + if isinstance(item, dict): + nested_item = SchemaDefinition._extract( + item, + schema.items.properties, + schema.items.required, + property_alias + ) + nested_list.append(nested_item) + else: + nested_list.append(item) + result[property_name] = nested_list + else: + result[property_name] = value + + return result + + +class RawFoundryHostedMcpTool(BaseModel): + """Pydantic model for a single MCP tool. + + :ivar str name: Unique name identifier of the tool. + :ivar Optional[str] title: Display title of the tool, defaults to name if not provided. + :ivar str description: Human-readable description of the tool. + :ivar SchemaDefinition input_schema: JSON schema for tool input parameters. + :ivar Optional[SchemaDefinition] meta: Optional metadata for the tool. + """ + + name: str + title: Optional[str] = None + description: str = "" + input_schema: SchemaDefinition = Field( + default_factory=SchemaDefinition, + validation_alias="inputSchema" + ) + meta: Optional[SchemaDefinition] = Field(default=None, validation_alias="_meta") + + def model_post_init(self, __context: Any) -> None: + if self.title is None: + self.title = self.name + + +class RawFoundryHostedMcpTools(BaseModel): + """Pydantic model for the result containing list of tools. + + :ivar List[RawFoundryHostedMcpTool] tools: List of MCP tool definitions. + """ + + tools: List[RawFoundryHostedMcpTool] = Field(default_factory=list) + + +class ListFoundryHostedMcpToolsResponse(BaseModel): + """Pydantic model for the complete MCP tools/list JSON-RPC response. + + :ivar str jsonrpc: JSON-RPC version, defaults to "2.0". + :ivar int id: Request identifier, defaults to 0. + :ivar RawFoundryHostedMcpTools result: Result containing the list of tools. + """ + + jsonrpc: str = "2.0" + id: int = 0 + result: RawFoundryHostedMcpTools = Field( + default_factory=RawFoundryHostedMcpTools + ) + + +class BaseConnectedToolsErrorResult(BaseModel, ABC): + """Base model for connected tools error responses.""" + + @abstractmethod + def as_exception(self) -> Exception: + """Convert the error result to an appropriate exception. + + :return: An exception representing the error. + :rtype: Exception + """ + raise NotImplementedError + + +class OAuthConsentRequiredErrorResult(BaseConnectedToolsErrorResult): + """Model for OAuth consent required error responses. + + :ivar Literal["OAuthConsentRequired"] type: Error type identifier. + :ivar Optional[str] consent_url: URL for user consent, if available. + :ivar Optional[str] message: Human-readable error message. + :ivar Optional[str] project_connection_id: Project connection ID related to the error. + """ + + type: Literal["OAuthConsentRequired"] + consent_url: str = Field( + validation_alias=AliasChoices( + AliasPath("toolResult", "consentUrl"), + AliasPath("toolResult", "message"), + ), + ) + message: str = Field( + validation_alias=AliasPath("toolResult", "message"), + ) + project_connection_id: str = Field( + validation_alias=AliasPath("toolResult", "projectConnectionId"), + ) + + def as_exception(self) -> Exception: + return OAuthConsentRequiredError(self.message, self.consent_url, self.project_connection_id) + + +class RawFoundryConnectedTool(BaseModel): + """Pydantic model for a single connected tool. + + :ivar str name: Name of the tool. + :ivar str description: Description of the tool. + :ivar Optional[SchemaDefinition] input_schema: Input schema for the tool parameters. + """ + name: str + description: str + input_schema: SchemaDefinition = Field( + default=SchemaDefinition, + validation_alias="parameters", + ) + + +class RawFoundryConnectedRemoteServer(BaseModel): + """Pydantic model for a connected remote server. + + :ivar str protocol: Protocol used by the remote server. + :ivar str project_connection_id: Project connection ID of the remote server. + :ivar List[RawFoundryConnectedTool] tools: List of connected tools from this server. + """ + protocol: str = Field( + validation_alias=AliasPath("remoteServer", "protocol"), + ) + project_connection_id: str = Field( + validation_alias=AliasPath("remoteServer", "projectConnectionId"), + ) + tools: List[RawFoundryConnectedTool] = Field( + default_factory=list, + validation_alias="manifest", + ) + + +class ListConnectedToolsResult(BaseModel): + """Pydantic model for the result of listing connected tools. + + :ivar List[ConnectedRemoteServer] servers: List of connected remote servers. + """ + servers: List[RawFoundryConnectedRemoteServer] = Field( + default_factory=list, + validation_alias="tools", + ) + + +class ListFoundryConnectedToolsResponse(BaseModel): + """Pydantic model for the response of listing the connected tools. + + :ivar Optional[ConnectedToolsResult] result: Result containing connected tool servers. + :ivar Optional[BaseConnectedToolsErrorResult] error: Error result, if any. + """ + + result: Optional[ListConnectedToolsResult] = None + error: Optional[BaseConnectedToolsErrorResult] = None + + # noinspection DuplicatedCode + _TYPE_ADAPTER: ClassVar[TypeAdapter] = TypeAdapter( + Annotated[ + Union[ + Annotated[ + Annotated[ + Union[OAuthConsentRequiredErrorResult], + Field(discriminator="type") + ], + Tag("ErrorType") + ], + Annotated[ListConnectedToolsResult, Tag("ResultType")], + ], + Discriminator( + lambda payload: "ErrorType" if isinstance(payload, dict) and "type" in payload else "ResultType" + ), + ]) + + @model_validator(mode="wrap") + @classmethod + def _validator(cls, data: Any, handler: ModelWrapValidatorHandler) -> "ListFoundryConnectedToolsResponse": + parsed = cls._TYPE_ADAPTER.validate_python(data) + normalized = {} + if isinstance(parsed, ListConnectedToolsResult): + normalized["result"] = parsed + elif isinstance(parsed, BaseConnectedToolsErrorResult): + normalized["error"] = parsed + return handler(normalized) + + +class InvokeConnectedToolsResult(BaseModel): + """Pydantic model for the result of invoking a connected tool. + + :ivar Any value: The result value from the tool invocation. + """ + value: Any = Field(validation_alias="toolResult") + + +class InvokeFoundryConnectedToolsResponse(BaseModel): + """Pydantic model for the response of invoking a connected tool. + + :ivar Optional[InvokeConnectedToolsResult] result: Result of the tool invocation. + :ivar Optional[BaseConnectedToolsErrorResult] error: Error result, if any. + """ + result: Optional[InvokeConnectedToolsResult] = None + error: Optional[BaseConnectedToolsErrorResult] = None + + # noinspection DuplicatedCode + _TYPE_ADAPTER: ClassVar[TypeAdapter] = TypeAdapter( + Annotated[ + Union[ + Annotated[ + Annotated[ + Union[OAuthConsentRequiredErrorResult], + Field(discriminator="type") + ], + Tag("ErrorType") + ], + Annotated[InvokeConnectedToolsResult, Tag("ResultType")], + ], + Discriminator( + lambda payload: "ErrorType" if isinstance(payload, dict) and + # handle other error types in the future + payload.get("type") == "OAuthConsentRequired" + else "ResultType" + ), + ]) + + @model_validator(mode="wrap") + @classmethod + def _validator(cls, data: Any, handler: ModelWrapValidatorHandler) -> "InvokeFoundryConnectedToolsResponse": + parsed = cls._TYPE_ADAPTER.validate_python(data) + normalized = {} + if isinstance(parsed, InvokeConnectedToolsResult): + normalized["result"] = parsed + elif isinstance(parsed, BaseConnectedToolsErrorResult): + normalized["error"] = parsed + return handler(normalized) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py new file mode 100644 index 000000000000..5248ab7aa7fa --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py @@ -0,0 +1,73 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +from abc import ABC +import json +from typing import Any, ClassVar, MutableMapping, Type + +from azure.core import AsyncPipelineClient +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, \ + ResourceNotFoundError, ResourceNotModifiedError, map_error +from azure.core.rest import AsyncHttpResponse, HttpRequest + +ErrorMapping = MutableMapping[int, Type[HttpResponseError]] + + +class BaseOperations(ABC): + DEFAULT_ERROR_MAP: ClassVar[ErrorMapping] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + + def __init__(self, client: AsyncPipelineClient, error_map: ErrorMapping | None = None) -> None: + self._client = client + self._error_map = self._prepare_error_map(error_map) + + @classmethod + def _prepare_error_map(cls, custom_error_map: ErrorMapping | None = None) -> MutableMapping: + """Prepare error map by merging default and custom error mappings. + + :param custom_error_map: Custom error mappings to merge + :return: Merged error map + """ + error_map = cls.DEFAULT_ERROR_MAP + if custom_error_map: + error_map = dict(cls.DEFAULT_ERROR_MAP) + error_map.update(custom_error_map) + return error_map + + async def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> AsyncHttpResponse: + """Send an HTTP request. + + :param request: HTTP request + :param stream: Stream to be used for HTTP requests + :param kwargs: Keyword arguments + + :return: Response object + """ + response: AsyncHttpResponse = await self._client.send_request(request, stream=stream, **kwargs) + self._handle_response_error(response) + return response + + def _handle_response_error(self, response: AsyncHttpResponse) -> None: + """Handle HTTP response errors. + + :param response: HTTP response to check + :raises HttpResponseError: If response status is not 200 + """ + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=self._error_map) + raise HttpResponseError(response=response) + + def _extract_response_json(self, response: AsyncHttpResponse) -> Any: + try: + payload_text = response.text() + payload_json = json.loads(payload_text) if payload_text else {} + except AttributeError as e: + payload_bytes = response.body() + payload_json = json.loads(payload_bytes.decode("utf-8")) if payload_bytes else {} + return payload_json \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_connected_tools.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_connected_tools.py new file mode 100644 index 000000000000..83138a17ad9a --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_connected_tools.py @@ -0,0 +1,180 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from abc import ABC +from typing import Any, AsyncIterable, ClassVar, Dict, Iterable, List, Mapping, Optional, Tuple, cast + +from azure.core.pipeline.transport import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async + +from ._base import BaseOperations +from .._models import FoundryConnectedTool, FoundryToolDetails, FoundryToolSource, InvokeFoundryConnectedToolsResponse, \ + ListFoundryConnectedToolsResponse, ResolvedFoundryTool, UserInfo +from ..._exceptions import ToolInvocationError + + +class BaseFoundryConnectedToolsOperations(BaseOperations, ABC): + """Base operations for Foundry connected tools.""" + + _API_VERSION: ClassVar[str] = "2025-11-15-preview" + + _HEADERS: ClassVar[Dict[str, str]] = { + "Content-Type": "application/json", + "Accept": "application/json", + } + + _QUERY_PARAMS: ClassVar[Dict[str, Any]] = { + "api-version": _API_VERSION + } + + @staticmethod + def _list_tools_path(agent_name: str) -> str: + return f"/agents/{agent_name}/tools/resolve" + + @staticmethod + def _invoke_tool_path(agent_name: str) -> str: + return f"/agents/{agent_name}/tools/invoke" + + def _build_list_tools_request( + self, + tools: List[FoundryConnectedTool], + user: Optional[UserInfo], + agent_name: str,) -> HttpRequest: + payload: Dict[str, Any] = { + "remoteServers": [ + { + "projectConnectionId": tool.project_connection_id, + "protocol": tool.protocol, + } for tool in tools + ], + } + if user: + payload["user"] = { + "objectId": user.object_id, + "tenantId": user.tenant_id, + } + return self._client.post( + self._list_tools_path(agent_name), + params=self._QUERY_PARAMS, + headers=self._HEADERS, + content=payload) + + @classmethod + def _convert_listed_tools( + cls, + resp: ListFoundryConnectedToolsResponse, + input_tools: List[FoundryConnectedTool]) -> Iterable[Tuple[FoundryConnectedTool, FoundryToolDetails]]: + if resp.error: + raise resp.error.as_exception() + if not resp.result: + return + + tool_map = {(tool.project_connection_id, tool.protocol): tool for tool in input_tools} + for server in resp.result.servers: + input_tool = tool_map.get((server.project_connection_id, server.protocol)) + if not input_tool: + continue + + for tool in server.tools: + details = FoundryToolDetails( + name=tool.name, + description=tool.description, + input_schema=tool.input_schema, + ) + yield input_tool, details + + def _build_invoke_tool_request( + self, + tool: ResolvedFoundryTool, + arguments: Dict[str, Any], + user: Optional[UserInfo], + agent_name: str) -> HttpRequest: + if tool.definition.source != FoundryToolSource.CONNECTED: + raise ToolInvocationError(f"Tool {tool.name} is not a Foundry connected tool.", tool=tool) + + tool_def = cast(FoundryConnectedTool, tool.definition) + payload: Dict[str, Any] = { + "toolName": tool.name, + "arguments": arguments, + "remoteServer": { + "projectConnectionId": tool_def.project_connection_id, + "protocol": tool_def.protocol, + }, + } + if user: + payload["user"] = { + "objectId": user.object_id, + "tenantId": user.tenant_id, + } + return self._client.post( + self._invoke_tool_path(agent_name), + params=self._QUERY_PARAMS, + headers=self._HEADERS, + content=payload) + + @classmethod + def _convert_invoke_result(cls, resp: InvokeFoundryConnectedToolsResponse) -> Any: + if resp.error: + raise resp.error.as_exception() + if not resp.result: + return None + return resp.result.value + + +class FoundryConnectedToolsOperations(BaseFoundryConnectedToolsOperations): + """Operations for managing Foundry connected tools.""" + + @distributed_trace_async + async def list_tools(self, + tools: List[FoundryConnectedTool], + user: Optional[UserInfo], + agent_name: str) -> Iterable[Tuple[FoundryConnectedTool, FoundryToolDetails]]: + """List connected tools. + + :param tools: List of connected tool definitions. + :type tools: List[FoundryConnectedTool] + :param user: User information for the request. Value can be None if running in local. + :type user: Optional[UserInfo] + :param agent_name: Name of the agent. + :type agent_name: str + :return: An async iterable of tuples containing the tool definition and its details. + :rtype: AsyncIterable[Tuple[FoundryConnectedTool, FoundryToolDetails]] + """ + if not tools: + return [] + + request = self._build_list_tools_request(tools, user, agent_name) + response = await self._send_request(request) + async with response: + json_response = self._extract_response_json(response) + tools_response = ListFoundryConnectedToolsResponse.model_validate(json_response) + return self._convert_listed_tools(tools_response, tools) + + + @distributed_trace_async + async def invoke_tool( + self, + tool: ResolvedFoundryTool, + arguments: Dict[str, Any], + user: Optional[UserInfo], + agent_name: str) -> Any: + """Invoke a connected tool. + + :param tool: Tool descriptor to invoke. + :type tool: ResolvedFoundryTool + :param arguments: Input arguments for the tool. + :type arguments: Mapping[str, Any] + :param user: User information for the request. Value can be None if running in local. + :type user: Optional[UserInfo] + :param agent_name: Name of the agent. + :type agent_name: str + :return: Result of the tool invocation. + :rtype: Any + """ + request = self._build_invoke_tool_request(tool, arguments, user, agent_name) + response = await self._send_request(request) + async with response: + json_response = self._extract_response_json(response) + invoke_response = InvokeFoundryConnectedToolsResponse.model_validate(json_response) + return self._convert_invoke_result(invoke_response) + \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_hosted_mcp_tools.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_hosted_mcp_tools.py new file mode 100644 index 000000000000..0c01164a6809 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_hosted_mcp_tools.py @@ -0,0 +1,168 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from abc import ABC +from typing import Any, AsyncIterable, ClassVar, Dict, Iterable, List, Mapping, TYPE_CHECKING, Tuple, cast + +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async + +from ._base import BaseOperations +from .._models import FoundryHostedMcpTool, FoundryToolDetails, FoundryToolSource, ListFoundryHostedMcpToolsResponse, \ + ResolvedFoundryTool +from ..._exceptions import ToolInvocationError + + +class BaseFoundryHostedMcpToolsOperations(BaseOperations, ABC): + """Base operations for Foundry-hosted MCP tools.""" + + _PATH: ClassVar[str] = "/mcp_tools" + + _API_VERSION: ClassVar[str] = "2025-11-15-preview" + + _HEADERS: ClassVar[Dict[str, str]] = { + "Content-Type": "application/json", + "Accept": "application/json,text/event-stream", + "Connection": "keep-alive", + "Cache-Control": "no-cache", + } + + _QUERY_PARAMS: ClassVar[Dict[str, Any]] = { + "api-version": _API_VERSION + } + + _LIST_TOOLS_REQUEST_BODY: ClassVar[Dict[str, Any]] = { + "jsonrpc": "2.0", + "id": 1, + "method": "tools/list", + "params": {} + } + + _INVOKE_TOOL_REQUEST_BODY_TEMPLATE: ClassVar[Dict[str, Any]] = { + "jsonrpc": "2.0", + "id": 2, + "method": "tools/call", + } + + # Tool-specific property key overrides + # Format: {"tool_name": {"tool_def_key": "meta_schema_key"}} + _TOOL_PROPERTY_ALIAS: ClassVar[Dict[str, Dict[str, List[str]]]] = { + "_default": { + "imagegen_model_deployment_name": ["model_deployment_name"], + "model_deployment_name": ["model"], + "deployment_name": ["model"], + }, + "image_generation": { + "imagegen_model_deployment_name": ["model"] + }, + # Add more tool-specific mappings as needed + } + + def _build_list_tools_request(self) -> HttpRequest: + """Build request for listing MCP tools. + + :return: Request for listing MCP tools. + """ + return self._client.post(self._PATH, + params=self._QUERY_PARAMS, + headers=self._HEADERS, + content=self._LIST_TOOLS_REQUEST_BODY) + + @staticmethod + def _convert_listed_tools( + response: ListFoundryHostedMcpToolsResponse, + allowed_tools: List[FoundryHostedMcpTool]) -> Iterable[Tuple[FoundryHostedMcpTool, FoundryToolDetails]]: + + allowlist = {tool.name: tool for tool in allowed_tools} + for tool in response.result.tools: + definition = allowlist.get(tool.name) + if not definition: + continue + details = FoundryToolDetails( + name=tool.name, + description=tool.description, + metadata=tool.meta, + input_schema=tool.input_schema) + yield definition, details + + def _build_invoke_tool_request(self, tool: ResolvedFoundryTool, arguments: Dict[str, Any]) -> HttpRequest: + if tool.definition.source != FoundryToolSource.HOSTED_MCP: + raise ToolInvocationError(f"Tool {tool.name} is not a Foundry-hosted MCP tool.", tool=tool) + definition = cast(FoundryHostedMcpTool, tool.definition) if TYPE_CHECKING else tool.definition + + payload = dict(self._INVOKE_TOOL_REQUEST_BODY_TEMPLATE) + payload["params"] = { + "name": tool.name, + "arguments": arguments + } + if tool.metadata and definition.configuration: + payload["_meta"] = tool.metadata.extract_from(definition.configuration, + self._resolve_property_alias(tool.name)) + + return self._client.post(self._PATH, + params=self._QUERY_PARAMS, + headers=self._HEADERS, + content=payload) + + @classmethod + def _resolve_property_alias(cls, tool_name: str) -> Dict[str, List[str]]: + """Get property key overrides for a specific tool. + + :param tool_name: Name of the tool. + :type tool_name: str + :return: Property key overrides. + :rtype: Dict[str, List[str]] + """ + overrides = dict(cls._TOOL_PROPERTY_ALIAS.get("_default", {})) + tool_specific = cls._TOOL_PROPERTY_ALIAS.get(tool_name, {}) + overrides.update(tool_specific) + return overrides + + +class FoundryMcpToolsOperations(BaseFoundryHostedMcpToolsOperations): + """Operations for Foundry-hosted MCP tools.""" + + @distributed_trace_async + async def list_tools( + self, + allowed_tools: List[FoundryHostedMcpTool] + ) -> Iterable[Tuple[FoundryHostedMcpTool, FoundryToolDetails]]: + """List MCP tools. + + :param allowed_tools: List of allowed MCP tools to filter. + :type allowed_tools: List[FoundryHostedMcpTool] + :return: An async iterable of tuples containing tool definitions and their details. + :rtype: AsyncIterable[Tuple[FoundryHostedMcpTool, FoundryToolDetails]] + """ + if not allowed_tools: + return [] + + request = self._build_list_tools_request() + response = await self._send_request(request) + async with response: + json_response = self._extract_response_json(response) + tools_response = ListFoundryHostedMcpToolsResponse.model_validate(json_response) + + return self._convert_listed_tools(tools_response, allowed_tools) + + @distributed_trace_async + async def invoke_tool( + self, + tool: ResolvedFoundryTool, + arguments: Dict[str, Any], + ) -> Any: + """Invoke an MCP tool. + + :param tool: Tool descriptor for the tool to invoke. + :type tool: ResolvedFoundryTool + :param arguments: Input arguments for the tool. + :type arguments: Dict[str, Any] + :return: Result of the tool invocation. + :rtype: Any + """ + request = self._build_invoke_tool_request(tool, arguments) + response = await self._send_request(request) + async with response: + json_response = self._extract_response_json(response) + invoke_response = json_response + return invoke_response diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/__init__.py new file mode 100644 index 000000000000..28077537d94b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/__init__.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py new file mode 100644 index 000000000000..17eb8c2eec48 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py @@ -0,0 +1,146 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +import asyncio +import threading +from abc import ABC, abstractmethod +from collections import defaultdict +from concurrent.futures import Future +from typing import Any, Awaitable, Collection, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union + +from cachetools import TTLCache + +from ._facade import FoundryToolLike, ensure_foundry_tool +from ._user import UserProvider +from ..client._client import FoundryToolClient +from ..client._models import FoundryTool, FoundryToolDetails, FoundryToolSource, ResolvedFoundryTool, UserInfo + + +class FoundryToolCatalog(ABC): + """Base class for Foundry tool catalogs.""" + def __init__(self, user_provider: UserProvider): + self._user_provider = user_provider + + async def get(self, tool: FoundryToolLike) -> Optional[ResolvedFoundryTool]: + """Gets a Foundry tool by its definition. + + :param tool: The Foundry tool to resolve. + :type tool: FoundryToolLike + :return: The resolved Foundry tool. + :rtype: Optional[ResolvedFoundryTool] + """ + tools = await self.list([tool]) + return tools[0] if tools else None + + @abstractmethod + async def list(self, tools: List[FoundryToolLike]) -> List[ResolvedFoundryTool]: + """Lists all available Foundry tools. + + :param tools: The list of Foundry tools to resolve. + :type tools: List[FoundryToolLike] + :return: A list of resolved Foundry tools. + :rtype: List[ResolvedFoundryTool] + """ + raise NotImplementedError + + +_CachedValueType = Union[Awaitable[List[FoundryToolDetails]], List[FoundryToolDetails]] + + +class CachedFoundryToolCatalog(FoundryToolCatalog, ABC): + """Cached implementation of FoundryToolCatalog with concurrency-safe caching.""" + + def __init__(self, user_provider: UserProvider): + super().__init__(user_provider) + self._cache: MutableMapping[Any, _CachedValueType] = self._create_cache() + + def _create_cache(self) -> MutableMapping[Any, _CachedValueType]: + return TTLCache(maxsize=1024, ttl=600) + + def _get_key(self, user: Optional[UserInfo], tool: FoundryTool) -> Any: + if tool.source is FoundryToolSource.HOSTED_MCP: + return tool.id + return user, tool.id + + async def list(self, tools: List[FoundryToolLike]) -> List[ResolvedFoundryTool]: + user = await self._user_provider.get_user() + foundry_tools = {} + tools_to_fetch = {} + fetching_tasks = [] + for t in tools: + tool = ensure_foundry_tool(t) + key = self._get_key(user, tool) + foundry_tools[key] = tool + if key not in self._cache: + tools_to_fetch[key] = tool + elif (task := self._cache[key]) and isinstance(task, Awaitable): + fetching_tasks.append(task) + + # for tools that are not being listed, create a batch task, convert to per-tool resolving tasks, and cache them + if tools_to_fetch: + # Awaitable[Mapping[str, List[FoundryToolDetails]]] + fetched_tools = asyncio.create_task(self._fetch_tools(tools_to_fetch.values(), user)) + + for k, t in tools_to_fetch.items(): + # safe to write cache since it's the only runner in this event loop + task = asyncio.create_task(self._per_tool_fetching_task(k, t, fetched_tools)) + self._cache[k] = task + fetching_tasks.append(task) + + try: + # now we have every tool associated with a task + if fetching_tasks: + await asyncio.gather(*fetching_tasks) + except: + # exception can only be caused by fetching tasks, remove them from cache + for k in tools_to_fetch.keys(): + if k in self._cache: + del self._cache[k] + raise + + resolved_tools = [] + for key, tool in foundry_tools.items(): + # this acts like a lock - every task of the same tool waits for the same underlying fetch + task_or_value = self._cache[key] + details_list = (await task_or_value) if isinstance(task_or_value, Awaitable) else task_or_value + for details in details_list: + resolved_tools.append( + ResolvedFoundryTool( + definition=tool, + details=details + ) + ) + + return resolved_tools + + async def _per_tool_fetching_task( + self, + cache_key: Any, + tool: FoundryTool, + fetching: Awaitable[Mapping[str, List[FoundryToolDetails]]] + ) -> List[FoundryToolDetails]: + details = await fetching + details_list = details.get(tool.id, []) + # replace the task in cache with the actual value to optimize memory usage + self._cache[cache_key] = details_list + return details_list + + @abstractmethod + async def _fetch_tools(self, + tools: Collection[FoundryTool], + user: Optional[UserInfo]) -> Mapping[str, List[FoundryToolDetails]]: + raise NotImplementedError + + +class DefaultFoundryToolCatalog(CachedFoundryToolCatalog): + """Default implementation of FoundryToolCatalog.""" + + def __init__(self, client: FoundryToolClient, user_provider: UserProvider, agent_name: str): + super().__init__(user_provider) + self._client = client + self._agent_name = agent_name + + async def _fetch_tools(self, + tools: Collection[FoundryTool], + user: Optional[UserInfo]) -> Mapping[str, List[FoundryToolDetails]]: + return await self._client.list_tools_details(tools, self._agent_name, user) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py new file mode 100644 index 000000000000..ebaca87cf1a7 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py @@ -0,0 +1,49 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import Any, Dict, Union + +from .. import FoundryConnectedTool, FoundryHostedMcpTool +from .._exceptions import InvalidToolFacadeError +from ..client._models import FoundryTool, FoundryToolProtocol + +# FoundryToolFacade: a “tool descriptor” bag. +# +# Reserved keys: +# Required: +# - "type": str Discriminator, e.g. "mcp" | "a2a" | "code_interpreter" | ... +# Optional: +# - "project_connection_id": str Project connection id of Foundry connected tools, required with "type" is "mcp" or a2a. +# +# Custom keys: +# - Allowed, but MUST NOT shadow reserved keys. +FoundryToolFacade = Dict[str, Any] + +FoundryToolLike = Union[FoundryToolFacade, FoundryTool] + + +def ensure_foundry_tool(tool: FoundryToolLike) -> FoundryTool: + """Ensure the input is a FoundryTool instance. + + :param tool: The tool descriptor, either as a FoundryToolFacade or FoundryTool. + :type tool: FoundryToolLike + :return: The corresponding FoundryTool instance. + :rtype: FoundryTool + """ + if isinstance(tool, FoundryTool): + return tool + + tool = tool.copy() + tool_type = tool.pop("type", None) + if not isinstance(tool_type, str) or not tool_type: + raise InvalidToolFacadeError("FoundryToolFacade must have a valid 'type' field of type str.") + + try: + protocol = FoundryToolProtocol(tool_type) + project_connection_id = tool.pop("project_connection_id", None) + if not isinstance(project_connection_id, str) or not project_connection_id: + raise InvalidToolFacadeError(f"project_connection_id is required for tool protocol {protocol}.") + + return FoundryConnectedTool(protocol=protocol, project_connection_id=project_connection_id) + except: + return FoundryHostedMcpTool(name=tool_type, configuration=tool) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_invoker.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_invoker.py new file mode 100644 index 000000000000..d24c79dd4d12 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_invoker.py @@ -0,0 +1,69 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from abc import ABC, abstractmethod +from typing import Any, Dict + +from ._user import UserProvider +from ..client._client import FoundryToolClient +from ..client._models import ResolvedFoundryTool + + +class FoundryToolInvoker(ABC): + """Abstract base class for Foundry tool invokers.""" + + @property + @abstractmethod + def resolved_tool(self) -> ResolvedFoundryTool: + """Get the resolved tool definition. + + :return: The tool definition. + :rtype: ResolvedFoundryTool + """ + raise NotImplementedError + + @abstractmethod + async def invoke(self, arguments: Dict[str, Any]) -> Any: + """Invoke the tool with the given arguments. + + :param arguments: The arguments to pass to the tool. + :type arguments: Dict[str, Any] + :return: The result of the tool invocation + :rtype: Any + """ + raise NotImplementedError + + +class DefaultFoundryToolInvoker(FoundryToolInvoker): + """Default implementation of FoundryToolInvoker.""" + + def __init__(self, + resolved_tool: ResolvedFoundryTool, + client: FoundryToolClient, + user_provider: UserProvider, + agent_name: str): + self._resolved_tool = resolved_tool + self._client = client + self._user_provider = user_provider + self._agent_name = agent_name + + @property + def resolved_tool(self) -> ResolvedFoundryTool: + """Get the resolved tool definition. + + :return: The tool definition. + :rtype: ResolvedFoundryTool + """ + return self._resolved_tool + + async def invoke(self, arguments: Dict[str, Any]) -> Any: + """Invoke the tool with the given arguments. + + :param arguments: The arguments to pass to the tool + :type arguments: Dict[str, Any] + :return: The result of the tool invocation + :rtype: Any + """ + user = await self._user_provider.get_user() + result = await self._client.invoke_tool(self._resolved_tool, arguments, self._agent_name, user) + return result diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py new file mode 100644 index 000000000000..2764558b06bb --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py @@ -0,0 +1,57 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from abc import ABC, abstractmethod +from typing import Awaitable, Union, overload + +from ._catalog import FoundryToolCatalog +from ._facade import FoundryToolLike, ensure_foundry_tool +from ._invoker import DefaultFoundryToolInvoker, FoundryToolInvoker +from ._user import UserProvider +from .. import FoundryToolClient +from .._exceptions import UnableToResolveToolInvocationError +from ..client._models import ResolvedFoundryTool + + +class FoundryToolInvocationResolver(ABC): + """Resolver for Foundry tool invocations.""" + + @abstractmethod + async def resolve(self, tool: Union[FoundryToolLike, ResolvedFoundryTool]) -> FoundryToolInvoker: + """Resolves a Foundry tool invocation. + + :param tool: The Foundry tool to resolve. + :type tool: Union[FoundryToolLike, ResolvedFoundryTool] + :return: The resolved Foundry tool invoker. + :rtype: FoundryToolInvoker + """ + raise NotImplementedError + + +class DefaultFoundryToolInvocationResolver(FoundryToolInvocationResolver): + """Default implementation of FoundryToolInvocationResolver.""" + + def __init__(self, + catalog: FoundryToolCatalog, + client: FoundryToolClient, + user_provider: UserProvider, + agent_name: str): + self._catalog = catalog + self._client = client + self._user_provider = user_provider + self._agent_name = agent_name + + async def resolve(self, tool: Union[FoundryToolLike, ResolvedFoundryTool]) -> FoundryToolInvoker: + """Resolves a Foundry tool invocation. + + :param tool: The Foundry tool to resolve. + :type tool: Union[FoundryToolLike, ResolvedFoundryTool] + :return: The resolved Foundry tool invoker. + :rtype: FoundryToolInvoker + """ + resolved_tool = (tool + if isinstance(tool, ResolvedFoundryTool) + else await self._catalog.get(ensure_foundry_tool(tool))) + if not resolved_tool: + raise UnableToResolveToolInvocationError(f"Unable to resolve tool {tool} from catalog", tool) + return DefaultFoundryToolInvoker(resolved_tool, self._client, self._user_provider, self._agent_name) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py new file mode 100644 index 000000000000..8ff723a6f7dc --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py @@ -0,0 +1,87 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +import os +from typing import Any, AsyncContextManager, Dict, Optional, Union + +from azure.core.credentials_async import AsyncTokenCredential + +from ._catalog import DefaultFoundryToolCatalog, FoundryToolCatalog +from ._facade import FoundryToolLike +from ._resolver import DefaultFoundryToolInvocationResolver, FoundryToolInvocationResolver +from ._user import ContextVarUserProvider, UserProvider +from ..client._models import ResolvedFoundryTool +from ..client._client import FoundryToolClient +from ...constants import Constants + + +class FoundryToolRuntime(AsyncContextManager["FoundryToolRuntime"]): + """Base class for Foundry tool runtimes.""" + + @property + def catalog(self) -> FoundryToolCatalog: + """The tool catalog. + + :return: The tool catalog. + :rtype: FoundryToolCatalog + """ + raise NotImplementedError + + @property + def invocation(self) -> FoundryToolInvocationResolver: + """The tool invocation resolver. + + :return: The tool invocation resolver. + :rtype: FoundryToolInvocationResolver + """ + raise NotImplementedError + + async def invoke(self, tool: Union[FoundryToolLike, ResolvedFoundryTool], arguments: Dict[str, Any]) -> Any: + """Invoke a tool with the given arguments. + + :param tool: The tool to invoke. + :type tool: Union[FoundryToolLike, ResolvedFoundryTool] + :param arguments: The arguments to pass to the tool. + :type arguments: Dict[str, Any] + :return: The result of the tool invocation. + :rtype: Any + """ + invoker = await self.invocation.resolve(tool) + return await invoker.invoke(arguments) + + +class DefaultFoundryToolRuntime(FoundryToolRuntime): + """Default implementation of FoundryToolRuntime.""" + + def __init__(self, + project_endpoint: str, + credential: "AsyncTokenCredential", + user_provider: Optional[UserProvider] = None): + # Do we need introduce DI here? + self._user_provider = user_provider or ContextVarUserProvider() + self._agent_name = os.getenv(Constants.AGENT_NAME, "$default") + self._client = FoundryToolClient(endpoint=project_endpoint, credential=credential) + self._catalog = DefaultFoundryToolCatalog(client=self._client, + user_provider=self._user_provider, + agent_name=self._agent_name) + self._invocation = DefaultFoundryToolInvocationResolver(catalog=self._catalog, + client=self._client, + user_provider=self._user_provider, + agent_name=self._agent_name) + + @property + def catalog(self) -> FoundryToolCatalog: + """The tool catalog.""" + return self._catalog + + @property + def invocation(self) -> FoundryToolInvocationResolver: + """The tool invocation resolver.""" + return self._invocation + + async def __aenter__(self) -> "DefaultFoundryToolRuntime": + await self._client.__aenter__() + return self + + async def __aexit__(self, exc_type, exc_value, traceback): + await self._client.__aexit__(exc_type, exc_value, traceback) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py new file mode 100644 index 000000000000..17b25095a953 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py @@ -0,0 +1,65 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from contextvars import ContextVar +from typing import Awaitable, Callable, Optional + +from starlette.applications import Starlette +from starlette.middleware.base import BaseHTTPMiddleware +from starlette.requests import Request +from starlette.types import ASGIApp + +from ._user import ContextVarUserProvider, resolve_user_from_headers +from ..client._models import UserInfo + +_UserContextType = ContextVar[Optional[UserInfo]] +_ResolverType = Callable[[Request], Awaitable[Optional[UserInfo]]] + +class UserInfoContextMiddleware(BaseHTTPMiddleware): + """Middleware to set user information in a context variable for each request.""" + + def __init__(self, app: ASGIApp, user_info_var: _UserContextType, user_resolver: _ResolverType): + super().__init__(app) + self._user_info_var = user_info_var + self._user_resolver = user_resolver + + @classmethod + def install(cls, + app: Starlette, + user_context: Optional[_UserContextType] = None, + user_resolver: Optional[_ResolverType] = None): + """Install the middleware into a Starlette application. + + :param app: The Starlette application to install the middleware into. + :type app: Starlette + :param user_context: Optional context variable to use for storing user info. + If not provided, a default context variable will be used. + :type user_context: Optional[ContextVar[Optional[UserInfo]]] + :param user_resolver: Optional function to resolve user info from the request. + If not provided, a default resolver will be used. + :type user_resolver: Optional[Callable[[Request], Awaitable[Optional[UserInfo]]]] + """ + app.add_middleware(UserInfoContextMiddleware, + user_info_var=user_context or ContextVarUserProvider.default_user_info_context, + user_resolver=user_resolver or cls._default_user_resolver) + + @staticmethod + async def _default_user_resolver(request: Request) -> Optional[UserInfo]: + return resolve_user_from_headers(request.headers) + + async def dispatch(self, request: Request, call_next): + """Process the incoming request, setting the user info in the context variable. + + :param request: The incoming Starlette request. + :type request: Request + :param call_next: The next middleware or endpoint to call. + :type call_next: Callable[[Request], Awaitable[Response]] + :return: The response from the next middleware or endpoint. + :rtype: Response + """ + user = await self._user_resolver(request) + token = self._user_info_var.set(user) + try: + return await call_next(request) + finally: + self._user_info_var.reset(token) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_user.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_user.py new file mode 100644 index 000000000000..14d8aad2690a --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_user.py @@ -0,0 +1,52 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from contextvars import ContextVar +from abc import ABC, abstractmethod +from typing import ClassVar, Mapping, Optional + +from ..client._models import UserInfo + + +class UserProvider(ABC): + """Base class for user providers.""" + + @abstractmethod + async def get_user(self) -> Optional[UserInfo]: + """Get the user information.""" + raise NotImplementedError + + +class ContextVarUserProvider(UserProvider): + """User provider that retrieves user information from a ContextVar.""" + default_user_info_context: ClassVar[ContextVar[UserInfo]] = ContextVar("user_info_context") + + def __init__(self, context: Optional[ContextVar[UserInfo]] = None): + self.context = context or self.default_user_info_context + + async def get_user(self) -> Optional[UserInfo]: + """Get the user information from the context variable.""" + return self.context.get(None) + + +def resolve_user_from_headers(headers: Mapping[str, str], + object_id_header: str = "x-aml-oid", + tenant_id_header: str = "x-aml-tid") -> Optional[UserInfo]: + """Resolve user information from HTTP headers. + + :param headers: The HTTP headers. + :type headers: Mapping[str, str] + :param object_id_header: The header name for the object ID. + :type object_id_header: str + :param tenant_id_header: The header name for the tenant ID. + :type tenant_id_header: str + :return: The user information or None if not found. + :rtype: Optional[UserInfo] + """ + object_id = headers.get(object_id_header, "") + tenant_id = headers.get(tenant_id_header, "") + + if not object_id or not tenant_id: + return None + + return UserInfo(object_id=object_id, tenant_id=tenant_id) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/__init__.py new file mode 100644 index 000000000000..41fc7e00dd6d --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) + +from ._name_resolver import * diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/_name_resolver.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/_name_resolver.py new file mode 100644 index 000000000000..ab9c87fd113c --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/_name_resolver.py @@ -0,0 +1,37 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from ..client._models import ResolvedFoundryTool + + +class ToolNameResolver: + """Utility class for resolving tool names to be registered to model.""" + + def __init__(self): + self._count_by_name = dict() + self._stable_names = dict() + + def resolve(self, tool: ResolvedFoundryTool) -> str: + """Resolve a stable name for the given tool. + If the tool name has not been used before, use it as is. + If it has been used, append an underscore and a count to make it unique. + + :param tool: The tool to resolve the name for. + :type tool: ResolvedFoundryTool + :return: The resolved stable name for the tool. + :rtype: str + """ + final_name = self._stable_names.get(tool.id) + if final_name is not None: + return final_name + + dup_count = self._count_by_name.setdefault(tool.details.name, 0) + + if dup_count == 0: + final_name = tool.details.name + else: + final_name = f"{tool.details.name}_{dup_count}" + + self._stable_names[tool.id] = final_name + self._count_by_name[tool.details.name] = dup_count + 1 + return self._stable_names[tool.id] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/__init__.py new file mode 100644 index 000000000000..28077537d94b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/__init__.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py new file mode 100644 index 000000000000..24de2e1345a4 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py @@ -0,0 +1,89 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +import asyncio +import inspect +from types import TracebackType +from typing import Any, Optional, Sequence, Type, Union + +from azure.core.credentials import AccessToken, TokenCredential +from azure.core.credentials_async import AsyncTokenCredential + + +async def _to_thread(func, *args, **kwargs): + """Compatibility wrapper for asyncio.to_thread (Python 3.8+).""" + if hasattr(asyncio, "to_thread"): + return await asyncio.to_thread(func, *args, **kwargs) # py>=3.9 + loop = asyncio.get_running_loop() + return await loop.run_in_executor(None, lambda: func(*args, **kwargs)) + + +class AsyncTokenCredentialAdapter(AsyncTokenCredential): + """ + AsyncTokenCredential adapter for either: + - azure.core.credentials.TokenCredential (sync) + - azure.core.credentials_async.AsyncTokenCredential (async) + """ + + def __init__(self, credential: TokenCredential |AsyncTokenCredential) -> None: + if not hasattr(credential, "get_token"): + raise TypeError("credential must have a get_token method") + self._credential = credential + self._is_async = isinstance(credential, AsyncTokenCredential) or inspect.iscoroutinefunction( + getattr(credential, "get_token", None) + ) + + async def get_token( + self, + *scopes: str, + claims: str | None = None, + tenant_id: str | None = None, + enable_cae: bool = False, + **kwargs: Any, + ) -> AccessToken: + if self._is_async: + return await self._credential.get_token(*scopes, + claims=claims, + tenant_id=tenant_id, + enable_cae=enable_cae, + **kwargs) + return await _to_thread(self._credential.get_token, + *scopes, + claims=claims, + tenant_id=tenant_id, + enable_cae=enable_cae, + **kwargs) + + async def close(self) -> None: + """ + Best-effort resource cleanup: + - if underlying has async close(): await it + - else if underlying has sync close(): run it in a thread + """ + close_fn = getattr(self._credential, "close", None) + if close_fn is None: + return + + if inspect.iscoroutinefunction(close_fn): + await close_fn() + else: + await _to_thread(close_fn) + + async def __aenter__(self) -> "AsyncTokenCredentialAdapter": + enter = getattr(self._credential, "__aenter__", None) + if enter is not None and inspect.iscoroutinefunction(enter): + await enter() + return self + + async def __aexit__( + self, + exc_type: Type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: TracebackType | None = None, + ) -> None: + aexit = getattr(self._credential, "__aexit__", None) + if aexit is not None and inspect.iscoroutinefunction(aexit): + return await aexit(exc_type, exc_value, traceback) + await self.close() \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 9f3d01c09c88..e53b8f5474b7 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -30,6 +30,7 @@ dependencies = [ "starlette>=0.45.0", "uvicorn>=0.31.0", "aiohttp>=3.13.0", # used by azure-identity aio + "cachetools" ] [build-system] diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py index 52648465e151..a754c4d72772 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py @@ -72,7 +72,7 @@ async def agent_run(context: AgentRunContext): "I am mock agent with no intelligence in stream mode.", context ) - tool = await my_agent.get_tool_client().list_tools() + tool = await my_agent.get_tool_client().list_tools_details() tool_list = [t.name for t in tool] # Build assistant output content output_content = [ diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index 13bb7c2af189..fd190bc4d5cf 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -3,26 +3,28 @@ # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) -from typing import TYPE_CHECKING, Optional, Any +from typing import Optional, TYPE_CHECKING +from azure.ai.agentserver.core.application import PackageMetadata, set_current_app +from ._context import LanggraphRunContext from ._version import VERSION -from .tool_client import ToolClient from .langgraph import LangGraphAdapter if TYPE_CHECKING: # pragma: no cover - from . import models + from .models.response_api_converter import ResponseAPIConverter from azure.core.credentials_async import AsyncTokenCredential def from_langgraph( agent, credentials: Optional["AsyncTokenCredential"] = None, - converter: Optional["models.response_api_converter.ResponseAPIConverter"] = None, - **kwargs: Any + converter: Optional["ResponseAPIConverter"] = None ) -> "LangGraphAdapter": - return LangGraphAdapter(agent, credentials=credentials, converter=converter, **kwargs) + return LangGraphAdapter(agent, credentials=credentials, converter=converter) -__all__ = ["from_langgraph", "ToolClient"] +__all__ = ["from_langgraph", "LanggraphRunContext"] __version__ = VERSION + +set_current_app(PackageMetadata.from_dist("azure-ai-agentserver-langgraph")) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py new file mode 100644 index 000000000000..846133a7912c --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py @@ -0,0 +1,20 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from dataclasses import dataclass +from typing import TYPE_CHECKING + +from langgraph.runtime import get_runtime + +from .tools._context import FoundryToolContext + + +@dataclass +class LanggraphRunContext: + + tools: FoundryToolContext + + @classmethod + def get_current(cls) -> "LanggraphRunContext": + lg_runtime = get_runtime(cls) + return lg_runtime.context diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 23a144c62cc0..b68d8d408eb5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -5,22 +5,21 @@ # mypy: disable-error-code="assignment,arg-type" import os import re -from typing import TYPE_CHECKING, Any, Awaitable, Protocol, Union, Optional, List +from typing import Optional, TYPE_CHECKING, Union -from langchain_core.runnables import RunnableConfig -from langchain_core.tools import StructuredTool from langgraph.graph.state import CompiledStateGraph -from azure.ai.agentserver.core.client.tools import OAuthConsentRequiredError from azure.ai.agentserver.core.constants import Constants from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.server.base import FoundryCBAgent from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext - -from .models.response_api_converter import ResponseAPIConverter, GraphInputArguments +from azure.ai.agentserver.core.tools import OAuthConsentRequiredError +from ._context import LanggraphRunContext +from .models.response_api_converter import GraphInputArguments, ResponseAPIConverter from .models.response_api_default_converter import ResponseAPIDefaultConverter from .models.utils import is_state_schema_valid -from .tool_client import ToolClient +from .tools._context import FoundryToolContext +from .tools._resolver import FoundryLangChainToolResolver if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential @@ -28,24 +27,6 @@ logger = get_logger() -class GraphFactory(Protocol): - """Protocol for graph factory functions. - - A graph factory is a callable that takes a ToolClient and returns - a CompiledStateGraph, either synchronously or asynchronously. - """ - - def __call__(self, tools: List[StructuredTool]) -> Union[CompiledStateGraph, Awaitable[CompiledStateGraph]]: - """Create a CompiledStateGraph using the provided ToolClient. - - :param tools: The list of StructuredTool instances. - :type tools: List[StructuredTool] - :return: A compiled LangGraph state graph, or an awaitable that resolves to one. - :rtype: Union[CompiledStateGraph, Awaitable[CompiledStateGraph]] - """ - ... - - class LangGraphAdapter(FoundryCBAgent): """ Adapter for LangGraph Agent. @@ -53,10 +34,9 @@ class LangGraphAdapter(FoundryCBAgent): def __init__( self, - graph: Union[CompiledStateGraph, GraphFactory], + graph: CompiledStateGraph, credentials: "Optional[AsyncTokenCredential]" = None, converter: "Optional[ResponseAPIConverter]" = None, - **kwargs: Any ) -> None: """ Initialize the LangGraphAdapter with a CompiledStateGraph or a function that returns one. @@ -69,149 +49,44 @@ def __init__( :param converter: custom response converter. :type converter: Optional[ResponseAPIConverter] """ - super().__init__(credentials=credentials, **kwargs) # pylint: disable=unexpected-keyword-arg - self._graph_or_factory: Union[CompiledStateGraph, GraphFactory] = graph - self._resolved_graph: "Optional[CompiledStateGraph]" = None + super().__init__(credentials=credentials) # pylint: disable=unexpected-keyword-arg + self._graph = graph + self._tool_resolver = FoundryLangChainToolResolver() self.azure_ai_tracer = None - # If graph is already compiled, validate and set up state converter - if isinstance(graph, CompiledStateGraph): - self._resolved_graph = graph - if not converter: - if is_state_schema_valid(self._resolved_graph.builder.state_schema): - self.converter = ResponseAPIDefaultConverter(graph=self._resolved_graph) - else: - raise ValueError("converter is required for non-MessagesState graph.") + if not converter: + if is_state_schema_valid(self._graph.builder.state_schema): + self.converter = ResponseAPIDefaultConverter(graph=self._graph) else: - self.converter = converter + raise ValueError("converter is required for non-MessagesState graph.") else: - # Defer validation until graph is resolved self.converter = converter - @property - def graph(self) -> "Optional[CompiledStateGraph]": - """ - Get the resolved graph. This property provides backward compatibility. - - :return: The resolved CompiledStateGraph if available, None otherwise. - :rtype: Optional[CompiledStateGraph] - """ - return self._resolved_graph - async def agent_run(self, context: AgentRunContext): # Resolve graph - always resolve if it's a factory function to get fresh graph each time # For factories, get a new graph instance per request to avoid concurrency issues - tool_client = None try: - if callable(self._graph_or_factory): - graph, tool_client = await self._resolve_graph_for_request(context) - elif self._resolved_graph is None: - await self._resolve_graph(context) - graph = self._resolved_graph - else: - graph = self._resolved_graph - input_arguments = await self.converter.convert_request(context) self.ensure_runnable_config(context, input_arguments) + + lg_run_context = await self.setup_lg_run_context() if not context.stream: - try: - response = await self.agent_run_non_stream(input_arguments, context, graph) - return response - finally: - # Close tool_client for non-streaming requests - if tool_client is not None: - try: - await tool_client.close() - logger.debug("Closed tool_client after non-streaming request") - except Exception as e: - logger.warning(f"Error closing tool_client: {e}") + response = await self.agent_run_non_stream(input_arguments, context, lg_run_context) + return response # For streaming, pass tool_client to be closed after streaming completes - return self.agent_run_astream(input_arguments, context, graph, tool_client) + return self.agent_run_astream(input_arguments, context, lg_run_context) except OAuthConsentRequiredError as e: - # Clean up tool_client if OAuth error occurs before streaming starts - if tool_client is not None: - await tool_client.close() - if not context.stream: response = await self.respond_with_oauth_consent(context, e) return response return self.respond_with_oauth_consent_astream(context, e) except Exception: - # Clean up tool_client if error occurs before streaming starts - if tool_client is not None: - await tool_client.close() raise - async def _resolve_graph(self, context: AgentRunContext): - """Resolve the graph if it's a factory function (for single-use/first-time resolution). - Creates a ToolClient and calls the factory function with it. - This is used for the initial resolution to set up converter. - - :param context: The context for the agent run. - :type context: AgentRunContext - """ - if callable(self._graph_or_factory): - logger.debug("Resolving graph from factory function") - - # Create ToolClient with credentials - tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) # pylint: disable=no-member - tool_client_wrapper = ToolClient(tool_client) - tools = await tool_client_wrapper.list_tools() - # Call the factory function with ToolClient - # Support both sync and async factories - import inspect - result = self._graph_or_factory(tools) - if inspect.iscoroutine(result): - self._resolved_graph = await result - else: - self._resolved_graph = result - - # Validate and set up state converter if not already set from initialization - if not self.converter and self._resolved_graph is not None: - if is_state_schema_valid(self._resolved_graph.builder.state_schema): - self.converter = ResponseAPIDefaultConverter(graph=self._resolved_graph) - else: - raise ValueError("converter is required for non-MessagesState graph.") - logger.debug("Graph resolved successfully") - else: - # Should not reach here, but just in case - self._resolved_graph = self._graph_or_factory - - async def _resolve_graph_for_request(self, context: AgentRunContext): - """ - Resolve a fresh graph instance for a single request to avoid concurrency issues. - Creates a ToolClient and calls the factory function with it. - This method returns a new graph instance and the tool_client for cleanup. - - :param context: The context for the agent run. - :type context: AgentRunContext - :return: A tuple of (compiled graph instance, tool_client wrapper). - :rtype: tuple[CompiledStateGraph, ToolClient] - """ - logger.debug("Resolving fresh graph from factory function for request") - - # Create ToolClient with credentials - tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) # pylint: disable=no-member - tool_client_wrapper = ToolClient(tool_client) - tools = await tool_client_wrapper.list_tools() - # Call the factory function with ToolClient - # Support both sync and async factories - import inspect - result = self._graph_or_factory(tools) # type: ignore[operator] - if inspect.iscoroutine(result): - graph = await result - else: - graph = result - - # Ensure state converter is set up (use existing one or create new) - if not self.converter: - if is_state_schema_valid(graph.builder.state_schema): - self.converter = ResponseAPIDefaultConverter(graph=graph) - else: - raise ValueError("converter is required for non-MessagesState graph.") - logger.debug("Fresh graph resolved successfully for request") - return graph, tool_client_wrapper + async def setup_lg_run_context(self): + resolved = await self._tool_resolver.resolve_from_registry() + return LanggraphRunContext(FoundryToolContext(resolved)) def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=None): # set env vars for langsmith @@ -241,67 +116,55 @@ def get_trace_attributes(self): attrs["service.namespace"] = "azure.ai.agentserver.langgraph" return attrs - async def agent_run_non_stream(self, input_arguments: GraphInputArguments, context: AgentRunContext, graph: CompiledStateGraph): + async def agent_run_non_stream(self, input_arguments: GraphInputArguments, context: AgentRunContext, + lg_run_context: LanggraphRunContext): """ Run the agent with non-streaming response. - :param input_data: The input data to run the agent with. - :type input_data: dict + :param input_arguments: The input data to run the agent with. + :type input_arguments: GraphInputArguments :param context: The context for the agent run. :type context: AgentRunContext - :param graph: The compiled graph instance to use for this request. - :type graph: CompiledStateGraph + :param lg_run_context: The tool context for the agent run. + :type lg_run_context: FoundryToolContext :return: The response of the agent run. :rtype: dict """ try: - result = await graph.ainvoke(**input_arguments) - output = await self.converter.convert_response_non_stream(result, context) + result = await self._graph.ainvoke(**input_arguments, context=lg_run_context) + output = self.converter.convert_response_non_stream(result, context) return output except Exception as e: logger.error(f"Error during agent run: {e}", exc_info=True) raise e - async def agent_run_astream( - self, - input_arguments: GraphInputArguments, - context: AgentRunContext, - graph: CompiledStateGraph, - tool_client: "Optional[ToolClient]" = None - ): + async def agent_run_astream(self, + input_arguments: GraphInputArguments, + context: AgentRunContext, + lg_run_context: LanggraphRunContext): """ Run the agent with streaming response. - :param input_data: The input data to run the agent with. - :type input_data: dict + :param input_arguments: The input data to run the agent with. + :type input_arguments: GraphInputArguments :param context: The context for the agent run. :type context: AgentRunContext - :param graph: The compiled graph instance to use for this request. - :type graph: CompiledStateGraph - :param tool_client: Optional ToolClient to close after streaming completes. - :type tool_client: Optional[ToolClient] + :param lg_run_context: The tool context for the agent run. + :type lg_run_context: FoundryToolContext :return: An async generator yielding the response stream events. :rtype: AsyncGenerator[dict] """ try: logger.info(f"Starting streaming agent run {context.response_id}") - stream = graph.astream(**input_arguments) + stream = self._graph.astream(**input_arguments, context=lg_run_context) async for output_event in self.converter.convert_response_stream(stream, context): yield output_event except Exception as e: logger.error(f"Error during streaming agent run: {e}", exc_info=True) raise e - finally: - # Close tool_client if provided - if tool_client is not None: - try: - await tool_client.close() - logger.debug("Closed tool_client after streaming completed") - except Exception as e: - logger.warning(f"Error closing tool_client in stream: {e}") def ensure_runnable_config(self, context: AgentRunContext, input_arguments: GraphInputArguments): """ diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py deleted file mode 100644 index 78baf96bee80..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py +++ /dev/null @@ -1,226 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -"""Tool client for integrating AzureAIToolClient with LangGraph.""" - -from typing import TYPE_CHECKING, Any, Dict, List, Optional - -from langchain_core.tools import StructuredTool -from pydantic import BaseModel, Field, create_model - -if TYPE_CHECKING: - from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient, FoundryTool - -# pylint: disable=client-accepts-api-version-keyword,missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs -class ToolClient: - """Client that integrates AzureAIToolClient with LangGraph. - - This class provides methods to list tools from AzureAIToolClient and convert them - to LangChain BaseTool format, as well as invoke tools in a format compatible with - LangGraph's create_react_agent and StateGraph. - - :param tool_client: The AzureAIToolClient instance to use for tool operations. - :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient - - .. admonition:: Example: - - .. code-block:: python - - from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient - from azure.ai.agentserver.langgraph import ToolClient - from azure.identity.aio import DefaultAzureCredential - - async with DefaultAzureCredential() as credential: - tool_client = AzureAIToolClient( - endpoint="https://", - credential=credential - ) - - client = ToolClient(tool_client) - - # List tools as LangChain BaseTool instances - tools = await client.list_tools() - - # Use with create_react_agent - from langgraph.prebuilt import create_react_agent - from langchain_openai import AzureChatOpenAI - - model = AzureChatOpenAI(model="gpt-4o") - agent = create_react_agent(model, tools) - - # Invoke a tool directly - result = await client.invoke_tool( - tool_name="my_tool", - tool_input={"param": "value"} - ) - - :meta private: - """ - - def __init__(self, tool_client: "AzureAIToolClient") -> None: - """Initialize the ToolClient. - - :param tool_client: The AzureAIToolClient instance to use for tool operations. - :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient - """ - self._tool_client = tool_client - self._langchain_tools_cache: Optional[List[StructuredTool]] = None - - async def list_tools(self) -> List[StructuredTool]: - """List all available tools as LangChain BaseTool instances. - - Retrieves tools from AzureAIToolClient and converts them to LangChain - StructuredTool instances that can be used with LangGraph's create_react_agent - or StateGraph. - - :return: List of LangChain StructuredTool instances. - :rtype: List[~langchain_core.tools.StructuredTool] - :raises ~azure.core.exceptions.HttpResponseError: - Raised for HTTP communication failures. - - .. admonition:: Example: - - .. code-block:: python - - client = ToolClient(tool_client) - tools = await client.list_tools() - - # Use with create_react_agent - agent = create_react_agent(model, tools) - """ - # Get tools from AzureAIToolClient - if self._langchain_tools_cache is not None: - return self._langchain_tools_cache - - azure_tools = await self._tool_client.list_tools() - self._langchain_tools_cache = [] - # Convert to LangChain StructuredTool instances - for azure_tool in azure_tools: - langchain_tool = self._convert_to_langchain_tool(azure_tool) - self._langchain_tools_cache.append(langchain_tool) - - return self._langchain_tools_cache - - def _convert_to_langchain_tool(self, azure_tool: "FoundryTool") -> StructuredTool: - """Convert an AzureAITool to a LangChain StructuredTool. - - :param azure_tool: The AzureAITool to convert. - :type azure_tool: ~azure.ai.agentserver.core.client.tools.aio.AzureAITool - :return: A LangChain StructuredTool instance. - :rtype: ~langchain_core.tools.StructuredTool - """ - # Get the input schema from the tool descriptor - input_schema = azure_tool.input_schema or {} - - # Create a Pydantic model for the tool's input schema - args_schema = self._create_pydantic_model( - tool_name=azure_tool.name, - schema=dict(input_schema) - ) - - # Create an async function that invokes the tool - async def tool_func(**kwargs: Any) -> str: - """Invoke the Azure AI tool. - - :return: The result from the tool invocation as a string. - :rtype: str - :raises OAuthConsentRequiredError: If OAuth consent is required for the tool invocation. - """ - # Let OAuthConsentRequiredError propagate up to be handled by the agent - result = await azure_tool(**kwargs) - # Convert result to string for LangChain compatibility - if isinstance(result, dict): - import json - return json.dumps(result) - return str(result) - - # Create a StructuredTool with the async function - structured_tool = StructuredTool( - name=azure_tool.name, - description=azure_tool.description or "No description available", - coroutine=tool_func, - args_schema=args_schema, - ) - - return structured_tool - - def _create_pydantic_model( - self, - tool_name: str, - schema: Dict[str, Any] - ) -> type[BaseModel]: - """Create a Pydantic model from a JSON schema. - - :param tool_name: Name of the tool (used for model name). - :type tool_name: str - :param schema: JSON schema for the tool's input parameters. - :type schema: Dict[str, Any] - :return: A Pydantic model class. - :rtype: type[BaseModel] - """ - # Get properties from schema - properties = schema.get("properties") or {} - required_fields = schema.get("required") or [] - - # Build field definitions for Pydantic model - field_definitions = {} - for prop_name, prop_schema in properties.items(): - prop_type = self._json_type_to_python_type(prop_schema.get("type", "string")) - prop_description = prop_schema.get("description", "") - - # Determine if field is required - is_required = prop_name in required_fields - - if is_required: - field_definitions[prop_name] = ( - prop_type, - Field(..., description=prop_description) - ) - else: - field_definitions[prop_name] = ( - prop_type, - Field(default=None, description=prop_description) - ) - - # Create the model dynamically - model_name = f"{tool_name.replace('-', '_').replace(' ', '_').title()}-Input" - return create_model(model_name, **field_definitions) # type: ignore[call-overload] - - def _json_type_to_python_type(self, json_type: str) -> type: - """Convert JSON schema type to Python type. - - :param json_type: JSON schema type string. - :type json_type: str - :return: Corresponding Python type. - :rtype: type - """ - type_mapping = { - "string": str, - "integer": int, - "number": float, - "boolean": bool, - "array": list, - "object": dict, - } - return type_mapping.get(json_type, str) - - async def close(self) -> None: - await self._tool_client.close() - - async def __aenter__(self) -> "ToolClient": - """Async context manager entry. - - :return: The ToolClient instance. - :rtype: ToolClient - """ - return self - - async def __aexit__(self, *exc_details: Any) -> None: - """Async context manager exit. - - :param exc_details: Exception details if an exception occurred. - :type exc_details: Any - :return: None - :rtype: None - """ - # The tool_client lifecycle is managed externally diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/__init__.py new file mode 100644 index 000000000000..daf51382381d --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) + +from ._builder import * +from ._chat_model import FoundryToolLateBindingChatModel +from ._middleware import FoundryToolBindingMiddleware +from ._tool_node import FoundryToolNodeWrappers, FoundryToolCallWrapper \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py new file mode 100644 index 000000000000..afba02e26a0a --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py @@ -0,0 +1,61 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import List, Optional, Union, overload + +from langchain_core.language_models import BaseChatModel + +from azure.ai.agentserver.core.tools import FoundryToolLike +from ._chat_model import FoundryToolLateBindingChatModel +from ._middleware import FoundryToolBindingMiddleware +from ._resolver import get_registry + + +@overload +def use_foundry_tools(tools: List[FoundryToolLike], /) -> FoundryToolBindingMiddleware: + """Use foundry tools as middleware. + + :param tools: A list of foundry tools to bind. + :type tools: List[FoundryToolLike] + :return: A FoundryToolBindingMiddleware that binds the given tools. + :rtype: FoundryToolBindingMiddleware + """ + ... + + +@overload +def use_foundry_tools(model: BaseChatModel, tools: List[FoundryToolLike], /) -> FoundryToolLateBindingChatModel: + """Use foundry tools with a chat model. + + :param model: The chat model to bind the tools to. + :type model: BaseChatModel + :param tools: A list of foundry tools to bind. + :type tools: List[FoundryToolLike] + :return: A FoundryToolLateBindingChatModel that binds the given tools to the model. + :rtype: FoundryToolLateBindingChatModel + """ + ... + + +def use_foundry_tools( + model_or_tools: Union[BaseChatModel, List[FoundryToolLike]], + tools: Optional[List[FoundryToolLike]] = None, + /, +) -> Union[FoundryToolBindingMiddleware, FoundryToolLateBindingChatModel]: + """Use foundry tools with a chat model or as middleware. + + :param model_or_tools: The chat model to bind the tools to, or a list of foundry tools to bind as middleware. + :type model_or_tools: Union[BaseChatModel, List[FoundryToolLike]] + :param tools: A list of foundry tools to bind (required if model_or_tools is a chat model). + :type tools: Optional[List[FoundryToolLike]] + :return: A FoundryToolLateBindingChatModel or FoundryToolBindingMiddleware that binds the given tools. + :rtype: Union[FoundryToolBindingMiddleware, FoundryToolLateBindingChatModel] + """ + if isinstance(model_or_tools, BaseChatModel): + if tools is None: + raise ValueError("Tools must be provided when a model is given.") + get_registry().extend(tools) + return FoundryToolLateBindingChatModel(model_or_tools, foundry_tools=tools) + else: + get_registry().extend(model_or_tools) + return FoundryToolBindingMiddleware(model_or_tools) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py new file mode 100644 index 000000000000..2ab97a4b0269 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py @@ -0,0 +1,112 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +from typing import Any, Callable, Dict, List, Optional, Sequence + +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.language_models import BaseChatModel, LanguageModelInput +from langchain_core.messages import AIMessage, BaseMessage +from langchain_core.outputs import ChatResult +from langchain_core.runnables import Runnable, RunnableConfig +from langchain_core.tools import BaseTool +from langgraph.prebuilt import ToolNode + +from azure.ai.agentserver.core.tools import FoundryToolLike +from ._tool_node import FoundryToolCallWrapper, FoundryToolNodeWrappers + + +class FoundryToolLateBindingChatModel(BaseChatModel): + """A ChatModel that supports late binding of Foundry tools during invocation. + + This ChatModel allows you to specify Foundry tools that will be resolved and bound + at the time of invocation, rather than at the time of model creation. + + :param delegate: The underlying chat model to delegate calls to. + :type delegate: BaseChatModel + :param foundry_tools: A list of Foundry tools to be resolved and bound during invocation. + :type foundry_tools: List[FoundryToolLike] + """ + + def __init__(self, delegate: BaseChatModel, foundry_tools: List[FoundryToolLike]): + super().__init__() + self._delegate = delegate + self._foundry_tools_to_bind = foundry_tools + self._bound_tools: List[Dict[str, Any] | type | Callable | BaseTool] = [] + self._bound_kwargs: dict[str, Any] = {} + + @property + def tool_node(self) -> ToolNode: + """Get a ToolNode that uses this chat model's Foundry tool call wrappers. + + :return: A ToolNode with Foundry tool call wrappers. + :rtype: ToolNode + """ + return ToolNode([], **self.tool_node_wrapper) + + @property + def tool_node_wrapper(self) -> FoundryToolNodeWrappers: + """Get the Foundry tool call wrappers for this chat model. + + Example:: + >>> from langgraph.prebuilt import ToolNode + >>> foundry_tool_bound_chat_model = FoundryToolLateBindingChatModel(...) + >>> ToolNode([...], **foundry_tool_bound_chat_model.as_wrappers()) + + :return: The Foundry tool call wrappers. + :rtype: FoundryToolNodeWrappers + """ + return FoundryToolCallWrapper(self._foundry_tools_to_bind).as_wrappers() + + def bind_tools(self, + tools: Sequence[ + Dict[str, Any] | type | Callable | BaseTool # noqa: UP006 + ], + *, + tool_choice: str | None = None, + **kwargs: Any) -> Runnable[LanguageModelInput, AIMessage]: + """Record tools to be bound later during invocation.""" + + self._bound_tools.extend(tools) + if tool_choice is not None: + self._bound_kwargs["tool_choice"] = tool_choice + self._bound_kwargs.update(kwargs) + + return self + + def _bound_delegate_for_call(self) -> Runnable[LanguageModelInput, AIMessage]: + from .._context import LanggraphRunContext + + foundry_tools = LanggraphRunContext.get_current().tools.resolved_tools.get(self._foundry_tools_to_bind) + all_tools = self._bound_tools.copy() + all_tools.extend(foundry_tools) + + if not all_tools: + return self._delegate + + bound_kwargs = self._bound_kwargs or {} + return self._delegate.bind_tools(all_tools, **bound_kwargs) + + def invoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: + return self._bound_delegate_for_call().invoke(input, config=config, **kwargs) + + async def ainvoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: + return await self._bound_delegate_for_call().ainvoke(input, config=config, **kwargs) + + def stream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): + yield from self._bound_delegate_for_call().stream(input, config=config, **kwargs) + + async def astream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): + async for x in self._bound_delegate_for_call().astream(input, config=config, **kwargs): + yield x + + @property + def _llm_type(self) -> str: + return f"foundry_tool_binding_model({getattr(self.delegate, '_llm_type', type(self.delegate).__name__)})" + + def _generate(self, messages: list[BaseMessage], stop: list[str] | None = None, + run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any) -> ChatResult: + # should never be called as invoke/ainvoke/stream/astream are redirected to delegate + raise NotImplementedError() + diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_context.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_context.py new file mode 100644 index 000000000000..53789efec1a4 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_context.py @@ -0,0 +1,16 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from dataclasses import dataclass, field + +from ._resolver import ResolvedTools + + +@dataclass +class FoundryToolContext: + """Context for tool resolution. + + :param resolved_tools: The resolved tools of all registered foundry tools. + :type resolved_tools: ResolvedTools + """ + resolved_tools: ResolvedTools = field(default_factory=lambda: ResolvedTools([])) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py new file mode 100644 index 000000000000..d3b95e95e9c6 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py @@ -0,0 +1,110 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +from typing import Awaitable, Callable, List + +from langchain_core.tools import BaseTool, Tool +from langgraph.typing import ContextT, StateT_co + +from azure.ai.agentserver.core.tools import FoundryToolLike +from langchain.agents.middleware import AgentMiddleware, ModelRequest, ModelResponse +from langchain.agents.middleware.types import ModelCallResult +from langchain_core.messages import ToolMessage +from langgraph.prebuilt.tool_node import ToolCallRequest +from langgraph.types import Command + +from ._chat_model import FoundryToolLateBindingChatModel +from ._tool_node import FoundryToolCallWrapper + + +class FoundryToolBindingMiddleware(AgentMiddleware[StateT_co, ContextT]): + """Middleware that binds foundry tools to tool calls in the agent. + + :param foundry_tools: A list of foundry tools to bind. + :type foundry_tools: List[FoundryToolLike] + """ + + def __init__(self, foundry_tools: List[FoundryToolLike]): + super().__init__() + + # to ensure `create_agent()` will create a tool node when there are foundry tools to bind + # this tool will never be bound to model and called + self.tools = [self._dummy_tool()] if foundry_tools else [] + + self._foundry_tools_to_bind = foundry_tools + self._tool_call_wrapper = FoundryToolCallWrapper(self._foundry_tools_to_bind) + + @staticmethod + def _dummy_tool() -> BaseTool: + return Tool(name="__dummy_tool_by_foundry_middleware__", + func=lambda x: None, + description="__dummy_tool_by_foundry_middleware__") + + def wrap_model_call(self, request: ModelRequest, + handler: Callable[[ModelRequest], ModelResponse]) -> ModelCallResult: + """Wrap the model call to use a FoundryToolBindingChatModel. + + :param request: The model request. + :type request: ModelRequest + :param handler: The model call handler. + :type handler: Callable[[ModelRequest], ModelResponse] + :return: The model call result. + :rtype: ModelCallResult + """ + return handler(self._wrap_model(request)) + + async def awrap_model_call(self, request: ModelRequest, + handler: Callable[[ModelRequest], Awaitable[ModelResponse]]) -> ModelCallResult: + """Asynchronously wrap the model call to use a FoundryToolBindingChatModel. + + :param request: The model request. + :type request: ModelRequest + :param handler: The model call handler. + :type handler: Callable[[ModelRequest], Awaitable[ModelResponse]] + :return: The model call result. + :rtype: ModelCallResult + """ + return await handler(self._wrap_model(request)) + + def _wrap_model(self, request: ModelRequest) -> ModelRequest: + """Wrap the model in the request with a FoundryToolBindingChatModel. + + :param request: The model request. + :type request: ModelRequest + :return: The modified model request. + :rtype: ModelRequest + """ + if not self._foundry_tools_to_bind: + return request + wrapper = FoundryToolLateBindingChatModel(request.model, self._foundry_tools_to_bind) + return request.override(model=wrapper) + + def wrap_tool_call(self, request: ToolCallRequest, + handler: Callable[[ToolCallRequest], ToolMessage | Command]) -> ToolMessage | Command: + """Wrap the tool call to use FoundryToolCallWrapper. + + :param request: The tool call request. + :type request: ToolCallRequest + :param handler: The tool call handler. + :type handler: Callable[[ToolCallRequest], ToolMessage | Command] + :return: The tool call result. + :rtype: ToolMessage | Command + """ + return self._tool_call_wrapper.call_tool(request, handler) + + async def awrap_tool_call( + self, + request: ToolCallRequest, + handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]]) -> ToolMessage | Command: + """Asynchronously wrap the tool call to use FoundryToolCallWrapper. + + :param request: The tool call request. + :type request: ToolCallRequest + :param handler: The tool call handler. + :type handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]] + :return: The tool call result. + :rtype: ToolMessage | Command + """ + return await self._tool_call_wrapper.call_tool_async(request, handler) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py new file mode 100644 index 000000000000..6915148a45af --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py @@ -0,0 +1,148 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from collections import defaultdict +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, overload + +from langchain_core.tools import BaseTool, StructuredTool +from pydantic import BaseModel, Field, create_model + +from azure.ai.agentserver.core import AgentServerContext +from azure.ai.agentserver.core.tools import FoundryTool, FoundryToolLike, ResolvedFoundryTool, SchemaDefinition, ensure_foundry_tool +from azure.ai.agentserver.core.tools.utils import ToolNameResolver + + +class ResolvedTools(Iterable[BaseTool]): + """A resolved view of foundry tools into LangChain tools. + + :param tools: An iterable of tuples of resolved foundry tools and their corresponding LangChain tools. + :type tools: Iterable[Tuple[ResolvedFoundryTool, BaseTool]] + """ + def __init__(self, tools: Iterable[Tuple[ResolvedFoundryTool, BaseTool]]): + self._by_source_id: Dict[str, List[BaseTool]] = defaultdict(list) + for rt, t in tools: + self._by_source_id[rt.definition.id].append(t) + + @overload + def get(self, tool: FoundryToolLike, /) -> Iterable[BaseTool]: + """Get the LangChain tools for the given foundry tool. + + :param tool: The foundry tool to get the LangChain tools for. + :type tool: FoundryToolLike + :return: The list of LangChain tools for the given foundry tool. + :rtype: Iterable[BaseTool] + """ + ... + + @overload + def get(self, tools: Iterable[FoundryToolLike], /) -> Iterable[BaseTool]: + """Get the LangChain tools for the given foundry tools. + + :param tools: The foundry tools to get the LangChain tools for. + :type tools: Iterable[FoundryToolLike] + :return: The list of LangChain tools for the given foundry tools. + :rtype: Iterable[BaseTool] + """ + ... + + @overload + def get(self) -> Iterable[BaseTool]: + """Get all LangChain tools. + + :return: The list of all LangChain tools. + :rtype: Iterable[BaseTool] + """ + ... + + def get(self, tool: Union[FoundryToolLike, Iterable[FoundryToolLike], None] = None) -> Iterable[BaseTool]: + """Get the LangChain tools for the given foundry tool(s), or all tools if none is given. + + :param tool: The foundry tool or tools to get the LangChain tools for, or None to get all tools. + :type tool: Union[FoundryToolLike, Iterable[FoundryToolLike], None] + :return: The list of LangChain tools for the given foundry tool(s), or all tools if none is given. + :rtype: Iterable[BaseTool] + """ + if tool is None: + yield from self + return + + tool_list = [tool] if not isinstance(tool, Iterable) else tool + for t in tool_list: + ft = ensure_foundry_tool(t) + yield from self._by_source_id.get(ft.id, []) + + def __iter__(self): + for tool_list in self._by_source_id.values(): + yield from tool_list + + +class FoundryLangChainToolResolver: + """Resolves foundry tools into LangChain tools. + + :param name_resolver: The tool name resolver. + :type name_resolver: Optional[ToolNameResolver] + """ + def __init__(self, name_resolver: Optional[ToolNameResolver] = None): + self._name_resolver = name_resolver or ToolNameResolver() + + async def resolve_from_registry(self) -> ResolvedTools: + """Resolve the foundry tools from the global registry into LangChain tools. + + :return: The resolved LangChain tools. + :rtype: Iterable[Tuple[ResolvedFoundryTool, BaseTool]] + """ + return await self.resolve(get_registry()) + + async def resolve(self, foundry_tools: List[FoundryToolLike]) -> ResolvedTools: + """Resolve the given foundry tools into LangChain tools. + + :param foundry_tools: The foundry tools to resolve. + :type foundry_tools: List[FoundryToolLike] + :return: The resolved LangChain tools. + :rtype: Iterable[Tuple[ResolvedFoundryTool, BaseTool]] + """ + context = AgentServerContext.get() + resolved_foundry_tools = await context.tools.catalog.list(foundry_tools) + return ResolvedTools(tools=((tool, self._create_structured_tool(tool)) for tool in resolved_foundry_tools)) + + def _create_structured_tool(self, resolved_tool: ResolvedFoundryTool) -> StructuredTool: + name = self._name_resolver.resolve(resolved_tool) + args_schema = self._create_pydantic_model(name, resolved_tool.input_schema) + + async def _tool_func(**kwargs: Any) -> str: + result = await AgentServerContext.get().tools.invoke(resolved_tool, kwargs) + if isinstance(result, dict): + import json + return json.dumps(result) + return str(result) + + return StructuredTool.from_function( + name=name, + description=resolved_tool.details.description, + coroutine=_tool_func, + args_schema=args_schema + ) + + @classmethod + def _create_pydantic_model(cls, tool_name: str, input_schema: SchemaDefinition) -> type[BaseModel]: + field_definitions = {} + required_fields = input_schema.required + for prop_name, prop in input_schema.properties.items(): + py_type = prop.type.py_type + default = ... if prop_name in required_fields else None + field_definitions[prop_name] = (py_type, Field(default, description=prop.description)) + + model_name = f"{tool_name.replace('-', '_').replace(' ', '_').title()}-Input" + return create_model(model_name, **field_definitions) + + +_tool_registry: List[FoundryToolLike] = [] + + +def get_registry() -> List[FoundryToolLike]: + """Get the global foundry tool registry. + + :return: The list of registered foundry tools. + :rtype: List[FoundryToolLike] + """ + return _tool_registry diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py new file mode 100644 index 000000000000..9dac2ec3a731 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py @@ -0,0 +1,91 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import Awaitable, Callable, List, TypedDict, Union + +from azure.ai.agentserver.core.tools import FoundryToolLike +from langchain_core.messages import ToolMessage +from langgraph.prebuilt.tool_node import AsyncToolCallWrapper, ToolCallRequest, ToolCallWrapper +from langgraph.types import Command + +ToolInvocationResult = Union[ToolMessage, Command] +ToolInvocation = Callable[[ToolCallRequest], ToolInvocationResult] +AsyncToolInvocation = Callable[[ToolCallRequest], Awaitable[ToolInvocationResult]] + + +class FoundryToolNodeWrappers(TypedDict): + """A TypedDict for Foundry tool node wrappers. + + Example:: + >>> from langgraph.prebuilt import ToolNode + >>> call_wrapper = FoundryToolCallWrapper(...) + >>> ToolNode([...], **call_wrapper.as_wrappers()) + + :param wrap_tool_call: The synchronous tool call wrapper. + :type wrap_tool_call: ToolCallWrapper + :param awrap_tool_call: The asynchronous tool call wrapper. + :type awrap_tool_call: AsyncToolCallWrapper + """ + + wrap_tool_call: ToolCallWrapper + + awrap_tool_call: AsyncToolCallWrapper + + +class FoundryToolCallWrapper: + """A ToolCallWrapper that tries to resolve invokable foundry tools from context if tool is not resolved yet.""" + def __init__(self, foundry_tools: List[FoundryToolLike]): + self._allowed_foundry_tools = foundry_tools + + def as_wrappers(self) -> FoundryToolNodeWrappers: + """Get the wrappers as a TypedDict. + + :return: The wrappers as a TypedDict. + :rtype: FoundryToolNodeWrappers + """ + return FoundryToolNodeWrappers( + wrap_tool_call=self.call_tool, + awrap_tool_call=self.call_tool_async, + ) + + def call_tool(self, request: ToolCallRequest, invocation: ToolInvocation) -> ToolInvocationResult: + """Call the tool, resolving foundry tools from context if necessary. + + :param request: The tool call request. + :type request: ToolCallRequest + :param invocation: The tool invocation function. + :type invocation: ToolInvocation + :return: The result of the tool invocation. + :rtype: ToolInvocationResult + """ + return invocation(self._maybe_calling_foundry_tool(request)) + + async def call_tool_async(self, request: ToolCallRequest, invocation: AsyncToolInvocation) -> ToolInvocationResult: + """Call the tool, resolving foundry tools from context if necessary. + + :param request: The tool call request. + :type request: ToolCallRequest + :param invocation: The tool invocation function. + :type invocation: AsyncToolInvocation + :return: The result of the tool invocation. + :rtype: ToolInvocationResult + """ + return await invocation(self._maybe_calling_foundry_tool(request)) + + def _maybe_calling_foundry_tool(self, request: ToolCallRequest) -> ToolCallRequest: + if request.tool or not self._allowed_foundry_tools: + # tool is already resolved + return request + + from .._context import LanggraphRunContext + + tool_name = request.tool_call["name"] + for t in LanggraphRunContext.get_current().tools.resolved_tools.get(self._allowed_foundry_tools): + if t.name == tool_name: + return ToolCallRequest( + tool_call=request.tool_call, + tool=t, + state=request.state, + runtime=request.runtime, + ) + return request diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/react_agent_tool.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/react_agent_tool.py new file mode 100644 index 000000000000..111a2307c6af --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/react_agent_tool.py @@ -0,0 +1,47 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +import os + +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from dotenv import load_dotenv +from langchain.agents import create_agent +from langchain_core.tools import tool +from langchain_openai import AzureChatOpenAI +from langgraph.checkpoint.memory import MemorySaver + +from azure.ai.agentserver.langgraph import from_langgraph +from azure.ai.agentserver.langgraph.tools import use_foundry_tools + +load_dotenv() + +token_provider = get_bearer_token_provider( + DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" +) + +memory = MemorySaver() +deployment_name = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "gpt-4o") +model = AzureChatOpenAI(model=deployment_name, azure_ad_token_provider=token_provider) + +foundry_tools = use_foundry_tools([ + { + # use the python tool to calculate what is 4 * 3.82. and then find its square root and then find the square root of that result + "type": "code_interpreter" + }, + { + # Give me the Azure CLI commands to create an Azure Container App with a managed identity. search Microsoft Learn + "type": "mcp", + "project_connection_id": "MicrosoftLearn" + }, + # { + # "type": "mcp", + # "project_connection_id": "FoundryMCPServerpreview" + # } +]) + + +agent_executor = create_agent(model, checkpointer=memory, middleware=[foundry_tools]) + +if __name__ == "__main__": + # host the langgraph agent + from_langgraph(agent_executor).run() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py index 7daa62d0ec9f..9084b07872a9 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py @@ -14,7 +14,7 @@ from langchain_openai import AzureChatOpenAI from langgraph.checkpoint.memory import MemorySaver -from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient +from azure.ai.agentserver.core.tools import FoundryToolClient from azure.ai.agentserver.langgraph import ToolClient, from_langgraph from azure.identity.aio import DefaultAzureCredential @@ -59,7 +59,7 @@ async def quickstart(): ] # Create the AzureAIToolClient # This client supports both MCP tools and Azure AI Tools API - tool_client = AzureAIToolClient( + tool_client = FoundryToolClient( endpoint=project_endpoint, credential=credential, tools=tool_definitions @@ -70,7 +70,7 @@ async def quickstart(): # List all available tools and convert to LangChain format print("Fetching tools from Azure AI Tool Client...") - tools = await client.list_tools() + tools = await client.list_tools_details() print(f"Found {len(tools)} tools:") for tool in tools: print(f" - {tool.name}: {tool.description}") From 2c5e2c84d3f6dcad57bc0ce5522cd8b50a6d3e41 Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Wed, 21 Jan 2026 12:19:43 -0800 Subject: [PATCH 51/94] [agentserver] Fix linter errors in core & lg pkg (#44760) * remove unused code in af * validate core with tox * validate langgraph with tox --- .../agentframework/_agent_framework.py | 51 +- .../agentserver/core/application/__init__.py | 2 +- .../core/application/_configuration.py | 4 +- .../agentserver/core/application/_options.py | 5 +- .../core/application/_package_metadata.py | 4 +- .../azure/ai/agentserver/core/server/base.py | 52 +- .../core/server/common/constants.py | 2 +- .../ai/agentserver/core/tools/__init__.py | 82 +- .../ai/agentserver/core/tools/_exceptions.py | 2 - .../agentserver/core/tools/client/_client.py | 99 +- .../core/tools/client/_configuration.py | 40 +- .../agentserver/core/tools/client/_models.py | 894 +++++++++--------- .../core/tools/client/operations/_base.py | 4 +- .../operations/_foundry_hosted_mcp_tools.py | 6 +- .../core/tools/runtime/_catalog.py | 11 +- .../agentserver/core/tools/runtime/_facade.py | 5 +- .../core/tools/runtime/_resolver.py | 17 +- .../core/tools/runtime/_runtime.py | 10 +- .../core/tools/runtime/_starlette.py | 2 +- .../agentserver/core/tools/runtime/_user.py | 12 +- .../agentserver/core/tools/utils/__init__.py | 6 +- .../core/tools/utils/_name_resolver.py | 4 +- .../ai/agentserver/core/utils/_credential.py | 33 +- .../custom_mock_agent_with_tools_test.py | 108 --- .../ai/agentserver/langgraph/__init__.py | 2 +- .../ai/agentserver/langgraph/_context.py | 3 +- .../ai/agentserver/langgraph/langgraph.py | 54 +- .../models/human_in_the_loop_helper.py | 119 ++- .../models/human_in_the_loop_json_helper.py | 29 +- .../models/response_api_converter.py | 29 +- .../models/response_api_default_converter.py | 144 +-- ...ponse_api_non_stream_response_converter.py | 67 +- .../models/response_api_request_converter.py | 2 +- .../response_api_stream_response_converter.py | 36 +- .../item_resource_helpers.py | 19 +- .../response_event_generator.py | 10 +- ..._function_call_argument_event_generator.py | 14 +- .../response_output_item_event_generator.py | 13 +- .../response_output_text_event_generator.py | 5 +- .../response_stream_event_generator.py | 24 +- .../agentserver/langgraph/tools/__init__.py | 12 +- .../agentserver/langgraph/tools/_builder.py | 11 +- .../langgraph/tools/_chat_model.py | 17 +- .../langgraph/tools/_middleware.py | 9 +- .../agentserver/langgraph/tools/_resolver.py | 16 +- .../agentserver/langgraph/tools/_tool_node.py | 7 +- .../samples/custom_state/main.py | 29 +- .../graph_factory_example.py | 128 --- .../use_tool_client_example.py | 110 --- 49 files changed, 1104 insertions(+), 1260 deletions(-) delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py index 8d22cd9ff263..40e1e72b70a4 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py @@ -5,31 +5,28 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Optional, Protocol, Union, List +from typing import Any, AsyncGenerator, Optional, TYPE_CHECKING, Union -from agent_framework import AgentProtocol, AIFunction, CheckpointStorage, InMemoryCheckpointStorage, WorkflowCheckpoint -from agent_framework.azure import AzureAIClient # pylint: disable=no-name-in-module +from agent_framework import AgentProtocol, CheckpointStorage, WorkflowCheckpoint from agent_framework._workflows import get_checkpoint_summary +from agent_framework.azure import AzureAIClient # pylint: disable=no-name-in-module from opentelemetry import trace -from azure.ai.agentserver.core.tools import OAuthConsentRequiredError from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.constants import Constants as AdapterConstants from azure.ai.agentserver.core.logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger from azure.ai.agentserver.core.models import ( - CreateResponse, Response as OpenAIResponse, ResponseStreamEvent, ) from azure.ai.agentserver.core.models.projects import ResponseErrorEvent, ResponseFailedEvent - +from azure.ai.agentserver.core.tools import OAuthConsentRequiredError from .models.agent_framework_input_converters import AgentFrameworkInputConverter from .models.agent_framework_output_non_streaming_converter import ( AgentFrameworkOutputNonStreamingConverter, ) from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter from .models.human_in_the_loop_helper import HumanInTheLoopHelper -from .models.constants import Constants from .persistence import AgentThreadRepository, CheckpointRepository if TYPE_CHECKING: @@ -38,24 +35,6 @@ logger = get_logger() -class AgentFactory(Protocol): - """Protocol for agent factory functions. - - An agent factory is a callable that takes a list of tools and returns - an AgentProtocol, either synchronously or asynchronously. - """ - - def __call__(self, tools: List[AIFunction]) -> Union[AgentProtocol, Awaitable[AgentProtocol]]: - """Create an AgentProtocol using the provided tools. - - :param tools: The list of AIFunction tools available to the agent. - :type tools: List[AIFunction] - :return: An Agent Framework agent, or an awaitable that resolves to one. - :rtype: Union[AgentProtocol, Awaitable[AgentProtocol]] - """ - ... - - class AgentFrameworkCBAgent(FoundryCBAgent): """ Adapter class for integrating Agent Framework agents with the FoundryCB agent interface. @@ -84,7 +63,7 @@ def __init__(self, agent: AgentProtocol, :param agent: The Agent Framework agent to adapt, or a callable that takes ToolClient and returns AgentProtocol (sync or async). - :type agent: Union[AgentProtocol, AgentFactory] + :type agent: AgentProtocol :param credentials: Azure credentials for authentication. :type credentials: Optional[AsyncTokenCredential] :param thread_repository: An optional AgentThreadRepository instance for managing thread messages. @@ -105,26 +84,6 @@ def agent(self) -> "AgentProtocol": """ return self._agent - def _resolve_stream_timeout(self, request_body: CreateResponse) -> float: - """Resolve idle timeout for streaming updates. - - Order of precedence: - 1) request_body.stream_timeout_s (if provided) - 2) env var Constants.AGENTS_ADAPTER_STREAM_TIMEOUT_S - 3) Constants.DEFAULT_STREAM_TIMEOUT_S - - :param request_body: The CreateResponse request body. - :type request_body: CreateResponse - - :return: The resolved stream timeout in seconds. - :rtype: float - """ - override = request_body.get("stream_timeout_s", None) - if override is not None: - return float(override) - env_val = os.getenv(Constants.AGENTS_ADAPTER_STREAM_TIMEOUT_S) - return float(env_val) if env_val is not None else float(Constants.DEFAULT_STREAM_TIMEOUT_S) - def init_tracing(self): try: otel_exporter_endpoint = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py index ccf4062cce31..6e70a718531c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py @@ -9,4 +9,4 @@ "set_current_app" ] -from ._package_metadata import PackageMetadata, set_current_app \ No newline at end of file +from ._package_metadata import PackageMetadata, set_current_app diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_configuration.py index fe05dae18a67..1f8a01d57639 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_configuration.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_configuration.py @@ -31,12 +31,12 @@ class ToolsConfiguration: catalog_cache_max_size: int = 1024 -@dataclass(frozen=True, kw_only=True) +@dataclass(frozen=True) class AgentServerConfiguration: """Resolved configuration for the Agent Server application.""" - agent_name: str = "$default" project_endpoint: str credential: AsyncTokenCredential + agent_name: str = "$default" http: HttpServerConfiguration = field(default_factory=HttpServerConfiguration) tools: ToolsConfiguration = field(default_factory=ToolsConfiguration) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py index cb4e8bde0bfd..dc80c1538327 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py @@ -1,7 +1,9 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from typing import Literal, NotRequired, TypedDict, Union +from typing import Literal, TypedDict, Union + +from typing_extensions import NotRequired from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential @@ -41,4 +43,3 @@ class ToolsOptions(TypedDict): """ catalog_cache_ttl: NotRequired[int] catalog_cache_max_size: NotRequired[int] - diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py index 36ff9313a6a2..5701110e5c7f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py @@ -41,10 +41,10 @@ def as_user_agent(self, component: str | None = None) -> str: def set_current_app(app: PackageMetadata) -> None: - global _app + global _app # pylint: disable=W0603 _app = app def get_current_app() -> PackageMetadata: - global _app + global _app # pylint: disable=W0602 return _app diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index a5f69664cf66..2afbed6e99a8 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- # pylint: disable=broad-exception-caught,unused-argument,logging-fstring-interpolation,too-many-statements,too-many-return-statements # mypy: ignore-errors -import asyncio +import asyncio # pylint: disable=C4763 import inspect import json import os @@ -12,8 +12,6 @@ from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn -from azure.core.credentials import TokenCredential -from azure.core.credentials_async import AsyncTokenCredential from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette @@ -25,10 +23,12 @@ from starlette.routing import Route from starlette.types import ASGIApp +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential from ._context import AgentServerContext -from ..models import projects as project_models +from ..models import projects as project_models from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, get_project_endpoint, request_context from ..models import ( @@ -37,8 +37,7 @@ ) from .common.agent_run_context import AgentRunContext -from ..tools import DefaultFoundryToolRuntime, FoundryTool, FoundryToolClient, FoundryToolRuntime, UserInfo, \ - UserInfoContextMiddleware +from ..tools import DefaultFoundryToolRuntime, UserInfoContextMiddleware from ..utils._credential import AsyncTokenCredentialAdapter logger = get_logger() @@ -405,24 +404,6 @@ def setup_otlp_exporter(self, endpoint, provider): provider.add_span_processor(processor) logger.info(f"Tracing setup with OTLP exporter: {endpoint}") - def get_tool_client( - self, tools: Optional[list[FoundryTool]], user_info: Optional[UserInfo] - ) -> FoundryToolClient: - # TODO: remove this method - logger.debug("Creating AzureAIToolClient with tools: %s", tools) - if not self.credentials: - raise ValueError("Credentials are required to create Tool Client.") - - tools_endpoint, agent_name = self._configure_endpoint() - - return FoundryToolClient( - endpoint=tools_endpoint, - credential=self.credentials, - tools=tools, - user=user_info, - agent_name=agent_name, - ) - def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: event_data = json.dumps(event.as_dict()) @@ -432,7 +413,11 @@ def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: def _keep_alive_comment() -> str: - """Generate a keep-alive SSE comment to maintain connection.""" + """Generate a keep-alive SSE comment to maintain connection. + + :return: The keep-alive comment string. + :rtype: str + """ return ": keep-alive\n\n" @@ -440,15 +425,20 @@ async def _iter_with_keep_alive( it: AsyncGenerator[ResponseStreamEvent, None] ) -> AsyncGenerator[Optional[ResponseStreamEvent], None]: """Wrap an async iterator with keep-alive mechanism. - + If no event is received within KEEP_ALIVE_INTERVAL seconds, yields None as a signal to send a keep-alive comment. The original iterator is protected with asyncio.shield to ensure it continues running even when timeout occurs. + + :param it: The async generator to wrap. + :type it: AsyncGenerator[ResponseStreamEvent, None] + :return: An async generator that yields events or None for keep-alive. + :rtype: AsyncGenerator[Optional[ResponseStreamEvent], None] """ it_anext = it.__anext__ pending_task: Optional[asyncio.Task] = None - + while True: try: # If there's a pending task from previous timeout, wait for it first @@ -457,14 +447,14 @@ async def _iter_with_keep_alive( pending_task = None yield event continue - + # Create a task for the next event next_event_task = asyncio.create_task(it_anext()) - + try: # Shield the task and wait with timeout event = await asyncio.wait_for( - asyncio.shield(next_event_task), + asyncio.shield(next_event_task), timeout=KEEP_ALIVE_INTERVAL ) yield event @@ -473,7 +463,7 @@ async def _iter_with_keep_alive( # Save task to check in next iteration pending_task = next_event_task yield None - + except StopAsyncIteration: # Iterator exhausted break diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py index 7d21ee7a31ff..6d4fb628a7f2 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py @@ -3,4 +3,4 @@ # --------------------------------------------------------- # Reserved function name for HITL. -HUMAN_IN_THE_LOOP_FUNCTION_NAME = "__hosted_agent_adapter_hitl__" \ No newline at end of file +HUMAN_IN_THE_LOOP_FUNCTION_NAME = "__hosted_agent_adapter_hitl__" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py index f158cd370990..5b356f38c825 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py @@ -5,13 +5,75 @@ __path__ = __import__('pkgutil').extend_path(__path__, __name__) from .client._client import FoundryToolClient -from ._exceptions import * -from .client._models import FoundryConnectedTool, FoundryHostedMcpTool, FoundryTool, FoundryToolProtocol, \ - FoundryToolSource, ResolvedFoundryTool, SchemaDefinition, SchemaProperty, SchemaType, UserInfo -from .runtime._catalog import * -from .runtime._facade import * -from .runtime._invoker import * -from .runtime._resolver import * -from .runtime._runtime import * -from .runtime._starlette import * -from .runtime._user import * \ No newline at end of file +from ._exceptions import ( + ToolInvocationError, + OAuthConsentRequiredError, + UnableToResolveToolInvocationError, + InvalidToolFacadeError, +) +from .client._models import ( + FoundryConnectedTool, + FoundryHostedMcpTool, + FoundryTool, + FoundryToolProtocol, + FoundryToolSource, + ResolvedFoundryTool, + SchemaDefinition, + SchemaProperty, + SchemaType, + UserInfo, +) +from .runtime._catalog import ( + FoundryToolCatalog, + CachedFoundryToolCatalog, + DefaultFoundryToolCatalog, +) +from .runtime._facade import FoundryToolFacade, FoundryToolLike, ensure_foundry_tool +from .runtime._invoker import FoundryToolInvoker, DefaultFoundryToolInvoker +from .runtime._resolver import FoundryToolInvocationResolver, DefaultFoundryToolInvocationResolver +from .runtime._runtime import FoundryToolRuntime, DefaultFoundryToolRuntime +from .runtime._starlette import UserInfoContextMiddleware +from .runtime._user import UserProvider, ContextVarUserProvider + +__all__ = [ + # Client + "FoundryToolClient", + # Exceptions + "ToolInvocationError", + "OAuthConsentRequiredError", + "UnableToResolveToolInvocationError", + "InvalidToolFacadeError", + # Models + "FoundryConnectedTool", + "FoundryHostedMcpTool", + "FoundryTool", + "FoundryToolProtocol", + "FoundryToolSource", + "ResolvedFoundryTool", + "SchemaDefinition", + "SchemaProperty", + "SchemaType", + "UserInfo", + # Catalog + "FoundryToolCatalog", + "CachedFoundryToolCatalog", + "DefaultFoundryToolCatalog", + # Facade + "FoundryToolFacade", + "FoundryToolLike", + "ensure_foundry_tool", + # Invoker + "FoundryToolInvoker", + "DefaultFoundryToolInvoker", + # Resolver + "FoundryToolInvocationResolver", + "DefaultFoundryToolInvocationResolver", + # Runtime + "FoundryToolRuntime", + "DefaultFoundryToolRuntime", + # Starlette + "UserInfoContextMiddleware", + # User + "UserProvider", + "ContextVarUserProvider", +] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py index b91c1f71c7a3..a5fe7726e9f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py @@ -72,5 +72,3 @@ class InvalidToolFacadeError(RuntimeError): This exception is raised when a tool facade does not conform to the expected structure or contains invalid data. """ - pass - diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py index cbd0dbba6aa6..030fbe26b5e7 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py @@ -1,26 +1,43 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import asyncio +import asyncio # pylint: disable=C4763 import itertools from collections import defaultdict -from typing import Any, AsyncContextManager, AsyncIterable, Awaitable, Callable, Collection, Coroutine, DefaultDict, Dict, \ - Iterable, List, \ - Mapping, Optional, \ - Tuple +from typing import ( + Any, + AsyncContextManager, + Awaitable, Collection, + DefaultDict, + Dict, + Iterable, + List, + Mapping, + Optional, + Tuple, + cast, +) from azure.core import AsyncPipelineClient from azure.core.credentials_async import AsyncTokenCredential from azure.core.tracing.decorator_async import distributed_trace_async from ._configuration import FoundryToolClientConfiguration -from ._models import FoundryTool, FoundryToolDetails, FoundryToolSource, ResolvedFoundryTool, UserInfo +from ._models import ( + FoundryConnectedTool, + FoundryHostedMcpTool, + FoundryTool, + FoundryToolDetails, + FoundryToolSource, + ResolvedFoundryTool, + UserInfo, +) from .operations._foundry_connected_tools import FoundryConnectedToolsOperations from .operations._foundry_hosted_mcp_tools import FoundryMcpToolsOperations -from .._exceptions import ToolInvocationError +from .._exceptions import OAuthConsentRequiredError, ToolInvocationError -class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): +class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: disable=C4748 """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. This client provides access to tools from both MCP (Model Context Protocol) servers @@ -33,15 +50,23 @@ class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): Credential for authenticating requests to the service. Use credentials from azure-identity like DefaultAzureCredential. :type credential: ~azure.core.credentials.TokenCredential + :param api_version: The API version to use for this operation. + :type api_version: str or None """ - def __init__(self, endpoint: str, credential: "AsyncTokenCredential"): + def __init__( # pylint: disable=C4718 + self, + endpoint: str, + credential: "AsyncTokenCredential", + ) -> None: """Initialize the asynchronous Azure AI Tool Client. :param endpoint: The service endpoint URL. :type endpoint: str :param credential: Credentials for authenticating requests. :type credential: ~azure.core.credentials.TokenCredential + :param api_version: The API version to use for this operation. + :type api_version: str or None """ # noinspection PyTypeChecker config = FoundryToolClientConfiguration(credential) @@ -51,10 +76,13 @@ def __init__(self, endpoint: str, credential: "AsyncTokenCredential"): self._connected_tools = FoundryConnectedToolsOperations(self._client) @distributed_trace_async - async def list_tools(self, - tools: Collection[FoundryTool], - agent_name, - user: Optional[UserInfo] = None) -> List[ResolvedFoundryTool]: + async def list_tools( + self, + tools: Collection[FoundryTool], + agent_name: str, + user: Optional[UserInfo] = None, + **kwargs: Any + ) -> List[ResolvedFoundryTool]: """List all available tools from configured sources. Retrieves tools from both MCP servers and Azure AI Tools API endpoints, @@ -72,6 +100,7 @@ async def list_tools(self, :raises ~azure.core.exceptions.HttpResponseError: Raised for HTTP communication failures. """ + _ = kwargs # Reserved for future use resolved_tools: List[ResolvedFoundryTool] = [] results = await self._list_tools_details_internal(tools, agent_name, user) for definition, details in results: @@ -79,10 +108,13 @@ async def list_tools(self, return resolved_tools @distributed_trace_async - async def list_tools_details(self, - tools: Collection[FoundryTool], - agent_name, - user: Optional[UserInfo] = None) -> Mapping[str, List[FoundryToolDetails]]: + async def list_tools_details( + self, + tools: Collection[FoundryTool], + agent_name: str, + user: Optional[UserInfo] = None, + **kwargs: Any + ) -> Mapping[str, List[FoundryToolDetails]]: """List all available tools from configured sources. Retrieves tools from both MCP servers and Azure AI Tools API endpoints, @@ -100,6 +132,7 @@ async def list_tools_details(self, :raises ~azure.core.exceptions.HttpResponseError: Raised for HTTP communication failures. """ + _ = kwargs # Reserved for future use resolved_tools: Dict[str, List[FoundryToolDetails]] = defaultdict(list) results = await self._list_tools_details_internal(tools, agent_name, user) for definition, details in results: @@ -109,33 +142,32 @@ async def list_tools_details(self, async def _list_tools_details_internal( self, tools: Collection[FoundryTool], - agent_name, + agent_name: str, user: Optional[UserInfo] = None, ) -> Iterable[Tuple[FoundryTool, FoundryToolDetails]]: tools_by_source: DefaultDict[FoundryToolSource, List[FoundryTool]] = defaultdict(list) for t in tools: tools_by_source[t.source].append(t) - listing_tools = [] + listing_tools: List[Awaitable[Iterable[Tuple[FoundryTool, FoundryToolDetails]]]] = [] if FoundryToolSource.HOSTED_MCP in tools_by_source: - # noinspection PyTypeChecker - listing_tools.append(asyncio.create_task( - self._hosted_mcp_tools.list_tools(tools_by_source[FoundryToolSource.HOSTED_MCP]) - )) + hosted_mcp_tools = cast(List[FoundryHostedMcpTool], tools_by_source[FoundryToolSource.HOSTED_MCP]) + listing_tools.append(self._hosted_mcp_tools.list_tools(hosted_mcp_tools)) if FoundryToolSource.CONNECTED in tools_by_source: - # noinspection PyTypeChecker - listing_tools.append(asyncio.create_task( - self._connected_tools.list_tools(tools_by_source[FoundryToolSource.CONNECTED], user, agent_name) - )) + connected_tools = cast(List[FoundryConnectedTool], tools_by_source[FoundryToolSource.CONNECTED]) + listing_tools.append(self._connected_tools.list_tools(connected_tools, user, agent_name)) iters = await asyncio.gather(*listing_tools) return itertools.chain.from_iterable(iters) @distributed_trace_async - async def invoke_tool(self, - tool: ResolvedFoundryTool, - arguments: Dict[str, Any], - agent_name: str, - user: Optional[UserInfo] = None) -> Any: + async def invoke_tool( + self, + tool: ResolvedFoundryTool, + arguments: Dict[str, Any], + agent_name: str, + user: Optional[UserInfo] = None, + **kwargs: Any + ) -> Any: """Invoke a tool by instance, name, or descriptor. :param tool: Tool to invoke, specified as an AzureAITool instance, @@ -149,13 +181,14 @@ async def invoke_tool(self, :type agent_name: str :return: The result of invoking the tool. :rtype: Any - :raises ~Tool_Client.exceptions.OAuthConsentRequiredError: + :raises ~OAuthConsentRequiredError: Raised when the service requires user OAuth consent. :raises ~azure.core.exceptions.HttpResponseError: Raised for HTTP communication failures. :raises ~ToolInvocationError: Raised when the tool invocation fails or source is not supported. """ + _ = kwargs # Reserved for future use if tool.source is FoundryToolSource.HOSTED_MCP: return await self._hosted_mcp_tools.invoke_tool(tool, arguments) if tool.source is FoundryToolSource.CONNECTED: diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py index 5c3f19a61d55..c496ef563216 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py @@ -9,27 +9,27 @@ class FoundryToolClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes - """Configuration for Azure AI Tool Client. + """Configuration for Azure AI Tool Client. - Manages authentication, endpoint configuration, and policy settings for the - Azure AI Tool Client. This class is used internally by the client and should - not typically be instantiated directly. + Manages authentication, endpoint configuration, and policy settings for the + Azure AI Tool Client. This class is used internally by the client and should + not typically be instantiated directly. - :param credential: - Azure TokenCredential for authentication. - :type credential: ~azure.core.credentials.TokenCredential - """ + :param credential: + Azure TokenCredential for authentication. + :type credential: ~azure.core.credentials.TokenCredential + """ - def __init__(self, credential: "AsyncTokenCredential"): - super().__init__() + def __init__(self, credential: "AsyncTokenCredential"): + super().__init__() - self.retry_policy = policies.AsyncRetryPolicy() - self.logging_policy = policies.NetworkTraceLoggingPolicy() - self.request_id_policy = policies.RequestIdPolicy() - self.http_logging_policy = policies.HttpLoggingPolicy() - self.user_agent_policy = policies.UserAgentPolicy( - base_user_agent=get_current_app().as_user_agent("FoundryToolClient")) - self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( - credential, "https://ai.azure.com/.default" - ) - self.redirect_policy = policies.AsyncRedirectPolicy() + self.retry_policy = policies.AsyncRetryPolicy() + self.logging_policy = policies.NetworkTraceLoggingPolicy() + self.request_id_policy = policies.RequestIdPolicy() + self.http_logging_policy = policies.HttpLoggingPolicy() + self.user_agent_policy = policies.UserAgentPolicy( + base_user_agent=get_current_app().as_user_agent("FoundryToolClient")) + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( + credential, "https://ai.azure.com/.default" + ) + self.redirect_policy = policies.AsyncRedirectPolicy() diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py index 8664e23c7c8b..c4d5c4d96a28 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py @@ -1,552 +1,598 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import asyncio -import inspect from abc import ABC, abstractmethod from dataclasses import dataclass, field from enum import Enum -from typing import Annotated, Any, Awaitable, Callable, ClassVar, Dict, Iterable, List, Literal, Mapping, Optional, Set, Type, Union +from typing import ( + Annotated, + Any, + Awaitable, + Callable, + ClassVar, + Dict, + Iterable, + List, + Literal, + Mapping, + Optional, + Set, + Type, + Union, +) + +from pydantic import ( + AliasChoices, + AliasPath, + BaseModel, + Discriminator, + Field, + ModelWrapValidatorHandler, + Tag, + TypeAdapter, + model_validator, +) from azure.core import CaseInsensitiveEnumMeta -from pydantic import AliasChoices, AliasPath, BaseModel, Discriminator, Field, ModelWrapValidatorHandler, Tag, \ - TypeAdapter, model_validator from .._exceptions import OAuthConsentRequiredError class FoundryToolSource(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Identifies the origin of a tool. + """Identifies the origin of a tool. - Specifies whether a tool comes from an MCP (Model Context Protocol) server - or from the Azure AI Tools API (remote tools). - """ + Specifies whether a tool comes from an MCP (Model Context Protocol) server + or from the Azure AI Tools API (remote tools). + """ - HOSTED_MCP = "hosted_mcp" - CONNECTED = "connected" + HOSTED_MCP = "hosted_mcp" + CONNECTED = "connected" class FoundryToolProtocol(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Identifies the protocol used by a connected tool.""" + """Identifies the protocol used by a connected tool.""" - MCP = "mcp" - A2A = "a2a" + MCP = "mcp" + A2A = "a2a" -@dataclass(frozen=True, kw_only=True, eq=False) +@dataclass(frozen=True, eq=False) class FoundryTool(ABC): - """Definition of a foundry tool including its parameters.""" - source: FoundryToolSource = field(init=False) + """Definition of a foundry tool including its parameters.""" + source: FoundryToolSource = field(init=False) - @property - @abstractmethod - def id(self) -> str: - """Unique identifier for the tool.""" - raise NotImplementedError + @property + @abstractmethod + def id(self) -> str: + """Unique identifier for the tool. - def __str__(self): - return self.id + :rtype: str + """ + raise NotImplementedError + def __str__(self): + return self.id -@dataclass(frozen=True, kw_only=True, eq=False) + +@dataclass(frozen=True, eq=False) class FoundryHostedMcpTool(FoundryTool): - """Foundry MCP tool definition. + """Foundry MCP tool definition. + + :ivar str name: Name of MCP tool. + :ivar Mapping[str, Any] configuration: Tools configuration. + """ + source: Literal[FoundryToolSource.HOSTED_MCP] = field(init=False, default=FoundryToolSource.HOSTED_MCP) + name: str + configuration: Optional[Mapping[str, Any]] = None - :ivar str name: Name of MCP tool. - :ivar Mapping[str, Any] configuration: Tools configuration. - """ - source: Literal[FoundryToolSource.HOSTED_MCP] = field(init=False, default=FoundryToolSource.HOSTED_MCP) - name: str - configuration: Optional[Mapping[str, Any]] = None + @property + def id(self) -> str: + """Unique identifier for the tool. - @property - def id(self) -> str: - """Unique identifier for the tool.""" - return f"{self.source}:{self.name}" + :rtype: str + """ + return f"{self.source}:{self.name}" -@dataclass(frozen=True, kw_only=True, eq=False) +@dataclass(frozen=True, eq=False) class FoundryConnectedTool(FoundryTool): - """Foundry connected tool definition. + """Foundry connected tool definition. - :ivar str project_connection_id: connection name of foundry tool. - """ - source: Literal[FoundryToolSource.CONNECTED] = field(init=False, default=FoundryToolSource.CONNECTED) - protocol: str - project_connection_id: str + :ivar str project_connection_id: connection name of foundry tool. + """ + source: Literal[FoundryToolSource.CONNECTED] = field(init=False, default=FoundryToolSource.CONNECTED) + protocol: str + project_connection_id: str - @property - def id(self) -> str: - return f"{self.source}:{self.protocol}:{self.project_connection_id}" + @property + def id(self) -> str: + return f"{self.source}:{self.protocol}:{self.project_connection_id}" @dataclass(frozen=True) class FoundryToolDetails: - """Details about a Foundry tool. + """Details about a Foundry tool. - :ivar str name: Name of the tool. - :ivar str description: Description of the tool. - :ivar SchemaDefinition input_schema: Input schema for the tool parameters. - :ivar Optional[SchemaDefinition] metadata: Optional metadata schema for the tool. - """ - name: str - description: str - input_schema: "SchemaDefinition" - metadata: Optional["SchemaDefinition"] = None + :ivar str name: Name of the tool. + :ivar str description: Description of the tool. + :ivar SchemaDefinition input_schema: Input schema for the tool parameters. + :ivar Optional[SchemaDefinition] metadata: Optional metadata schema for the tool. + """ + name: str + description: str + input_schema: "SchemaDefinition" + metadata: Optional["SchemaDefinition"] = None @dataclass(frozen=True) class ResolvedFoundryTool: - """Resolved Foundry tool with definition and details. + """Resolved Foundry tool with definition and details. + + :ivar ToolDefinition definition: + Optional tool definition object, or None. + :ivar FoundryToolDetails details: + Details about the tool, including name, description, and input schema. + """ + + definition: FoundryTool + details: FoundryToolDetails + + @property + def id(self) -> str: + return f"{self.definition.id}:{self.details.name}" + + @property + def source(self) -> FoundryToolSource: + """Origin of the tool. - :ivar ToolDefinition definition: - Optional tool definition object, or None. - :ivar FoundryToolDetails details: - Details about the tool, including name, description, and input schema. - """ + :rtype: FoundryToolSource + """ + return self.definition.source - definition: FoundryTool - details: FoundryToolDetails - invoker: Optional[Callable[..., Awaitable[Any]]] = None # TODO: deprecated + @property + def name(self) -> str: + """Name of the tool. - @property - def id(self) -> str: - return f"{self.definition.id}:{self.details.name}" + :rtype: str + """ + return self.details.name - @property - def source(self) -> FoundryToolSource: - """Origin of the tool.""" - return self.definition.source + @property + def description(self) -> str: + """Description of the tool. - @property - def name(self) -> str: - """Name of the tool.""" - return self.details.name + :rtype: str + """ + return self.details.description - @property - def description(self) -> str: - """Description of the tool.""" - return self.details.description + @property + def input_schema(self) -> "SchemaDefinition": + """Input schema of the tool. - @property - def input_schema(self) -> "SchemaDefinition": - """Input schema of the tool.""" - return self.details.input_schema + :rtype: SchemaDefinition + """ + return self.details.input_schema - @property - def metadata(self) -> Optional["SchemaDefinition"]: - """Metadata schema of the tool, if any.""" - return self.details.metadata + @property + def metadata(self) -> Optional["SchemaDefinition"]: + """Metadata schema of the tool, if any.""" + return self.details.metadata @dataclass(frozen=True) class UserInfo: - """Represents user information. + """Represents user information. - :ivar str object_id: User's object identifier. - :ivar str tenant_id: Tenant identifier. - """ + :ivar str object_id: User's object identifier. + :ivar str tenant_id: Tenant identifier. + """ - object_id: str - tenant_id: str + object_id: str + tenant_id: str -class SchemaType(str, Enum): - """ - Enumeration of possible schema types. +class SchemaType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ + Enumeration of possible schema types. - :ivar py_type: The corresponding Python runtime type for this schema type - (e.g., ``SchemaType.STRING.py_type is str``). - """ + :ivar py_type: The corresponding Python runtime type for this schema type + (e.g., ``SchemaType.STRING.py_type is str``). + """ - py_type: Type[Any] - """The corresponding Python runtime type for this schema type.""" + py_type: Type[Any] + """The corresponding Python runtime type for this schema type.""" - STRING = ("string", str) - """Schema type for string values (maps to ``str``).""" + STRING = ("string", str) + """Schema type for string values (maps to ``str``).""" - NUMBER = ("number", float) - """Schema type for numeric values with decimals (maps to ``float``).""" + NUMBER = ("number", float) + """Schema type for numeric values with decimals (maps to ``float``).""" - INTEGER = ("integer", int) - """Schema type for integer values (maps to ``int``).""" + INTEGER = ("integer", int) + """Schema type for integer values (maps to ``int``).""" - BOOLEAN = ("boolean", bool) - """Schema type for boolean values (maps to ``bool``).""" + BOOLEAN = ("boolean", bool) + """Schema type for boolean values (maps to ``bool``).""" - ARRAY = ("array", list) - """Schema type for array values (maps to ``list``).""" + ARRAY = ("array", list) + """Schema type for array values (maps to ``list``).""" - OBJECT = ("object", dict) - """Schema type for object/dictionary values (maps to ``dict``).""" + OBJECT = ("object", dict) + """Schema type for object/dictionary values (maps to ``dict``).""" - def __new__(cls, value: str, py_type: Type[Any]): - """ - Create an enum member whose value is the schema type string, while also - attaching the mapped Python type. + def __new__(cls, value: str, py_type: Type[Any]): + """ + Create an enum member whose value is the schema type string, while also + attaching the mapped Python type. - :param value: The serialized schema type string (e.g. ``"string"``). - :param py_type: The mapped Python runtime type (e.g. ``str``). - """ - obj = str.__new__(cls, value) - obj._value_ = value - obj.py_type = py_type - return obj + :param value: The serialized schema type string (e.g. ``"string"``). + :type value: str + :param py_type: The mapped Python runtime type (e.g. ``str``). + :type py_type: Type[Any] + :return: The created enum member. + :rtype: SchemaType + """ + obj = str.__new__(cls, value) + obj._value_ = value + obj.py_type = py_type + return obj - @classmethod - def from_python_type(cls, t: Type[Any]) -> "SchemaType": - """ - Get the matching :class:`SchemaType` for a given Python runtime type. + @classmethod + def from_python_type(cls, t: Type[Any]) -> "SchemaType": + """ + Get the matching :class:`SchemaType` for a given Python runtime type. - :param t: A Python runtime type (e.g. ``str``, ``int``, ``float``). - :returns: The corresponding :class:`SchemaType`. - :raises ValueError: If ``t`` is not supported by this enumeration. - """ - for member in cls: - if member.py_type is t: - return member - raise ValueError(f"Unsupported python type: {t!r}") + :param t: A Python runtime type (e.g. ``str``, ``int``, ``float``). + :type t: Type[Any] + :returns: The corresponding :class:`SchemaType`. + :rtype: SchemaType + :raises ValueError: If ``t`` is not supported by this enumeration. + """ + for member in cls: + if member.py_type is t: + return member + raise ValueError(f"Unsupported python type: {t!r}") class SchemaProperty(BaseModel): - """ - A JSON Schema-like description of a single property (field) or nested schema node. - - This model is intended to be recursively nestable via :attr:`items` (for arrays) - and :attr:`properties` (for objects). - - :ivar type: The schema node type (e.g., ``string``, ``object``, ``array``). - :ivar description: Optional human-readable description of the property. - :ivar items: The item schema for an ``array`` type. Typically set when - :attr:`type` is :data:`~SchemaType.ARRAY`. - :ivar properties: Nested properties for an ``object`` type. Typically set when - :attr:`type` is :data:`~SchemaType.OBJECT`. Keys are property names, values - are their respective schemas. - :ivar default: Optional default value for the property. - :ivar required: For an ``object`` schema node, the set of required property - names within :attr:`properties`. (This mirrors JSON Schema’s ``required`` - keyword; it is *not* “this property is required in a parent object”.) - """ - - type: SchemaType - description: Optional[str] = None - items: Optional["SchemaProperty"] = None - properties: Optional[Mapping[str, "SchemaProperty"]] = None - default: Any = None - required: Optional[Set[str]] = None - - def has_default(self) -> bool: - """ - Check if the property has a default value defined. - - :return: True if a default value is set, False otherwise. - :rtype: bool - """ - return "default" in self.model_fields_set + """ + A JSON Schema-like description of a single property (field) or nested schema node. + + This model is intended to be recursively nestable via :attr:`items` (for arrays) + and :attr:`properties` (for objects). + + :ivar type: The schema node type (e.g., ``string``, ``object``, ``array``). + :ivar description: Optional human-readable description of the property. + :ivar items: The item schema for an ``array`` type. Typically set when + :attr:`type` is :data:`~SchemaType.ARRAY`. + :ivar properties: Nested properties for an ``object`` type. Typically set when + :attr:`type` is :data:`~SchemaType.OBJECT`. Keys are property names, values + are their respective schemas. + :ivar default: Optional default value for the property. + :ivar required: For an ``object`` schema node, the set of required property + names within :attr:`properties`. (This mirrors JSON Schema’s ``required`` + keyword; it is *not* “this property is required in a parent object”.) + """ + + type: SchemaType + description: Optional[str] = None + items: Optional["SchemaProperty"] = None + properties: Optional[Mapping[str, "SchemaProperty"]] = None + default: Any = None + required: Optional[Set[str]] = None + + def has_default(self) -> bool: + """ + Check if the property has a default value defined. + + :return: True if a default value is set, False otherwise. + :rtype: bool + """ + return "default" in self.model_fields_set class SchemaDefinition(BaseModel): - """ - A top-level JSON Schema-like definition for an object. - - :ivar type: The schema type of the root. Typically :data:`~SchemaType.OBJECT`. - :ivar properties: Mapping of top-level property names to their schemas. - :ivar required: Set of required top-level property names within - :attr:`properties`. - """ - - type: SchemaType = SchemaType.OBJECT - properties: Mapping[str, SchemaProperty] - required: Optional[Set[str]] = None - - def extract_from(self, - datasource: Mapping[str, Any], - property_alias: Optional[Dict[str, List[str]]] = None) -> Dict[str, Any]: - return self._extract(datasource, self.properties, self.required, property_alias) - - @classmethod - def _extract(cls, - datasource: Mapping[str, Any], - properties: Mapping[str, SchemaProperty], - required: Optional[Set[str]] = None, - property_alias: Optional[Dict[str, List[str]]] = None) -> Dict[str, Any]: - result: Dict[str, Any] = {} - - for property_name, schema in properties.items(): - # Determine the keys to look for in the datasource - keys_to_check = [property_name] - if property_alias and property_name in property_alias: - keys_to_check.extend(property_alias[property_name]) - - # Find the first matching key in the datasource - value_found = False - for key in keys_to_check: - if key in datasource: - value = datasource[key] - value_found = True - break - - if not value_found and schema.has_default(): - value = schema.default - value_found = True - - if not value_found: - # If the property is required but not found, raise an error - if required and property_name in required: - raise KeyError(f"Required property '{property_name}' not found in datasource.") - # If not found and not required, skip to next property - continue - - # Process the value based on its schema type - if schema.type == SchemaType.OBJECT and schema.properties: - if isinstance(value, Mapping): - nested_value = cls._extract( - value, - schema.properties, - schema.required, - property_alias - ) - result[property_name] = nested_value - elif schema.type == SchemaType.ARRAY and schema.items: - if isinstance(value, Iterable): - nested_list = [] - for item in value: - if schema.items.type == SchemaType.OBJECT and schema.items.properties: - if isinstance(item, dict): - nested_item = SchemaDefinition._extract( - item, - schema.items.properties, - schema.items.required, - property_alias - ) - nested_list.append(nested_item) - else: - nested_list.append(item) - result[property_name] = nested_list - else: - result[property_name] = value - - return result + """ + A top-level JSON Schema-like definition for an object. + + :ivar type: The schema type of the root. Typically :data:`~SchemaType.OBJECT`. + :ivar properties: Mapping of top-level property names to their schemas. + :ivar required: Set of required top-level property names within + :attr:`properties`. + """ + + type: SchemaType = SchemaType.OBJECT + properties: Mapping[str, SchemaProperty] = field(default_factory=dict) + required: Optional[Set[str]] = None + + def extract_from(self, + datasource: Mapping[str, Any], + property_alias: Optional[Dict[str, List[str]]] = None) -> Dict[str, Any]: + return self._extract(datasource, self.properties, self.required, property_alias) + + @classmethod + def _extract(cls, + datasource: Mapping[str, Any], + properties: Mapping[str, SchemaProperty], + required: Optional[Set[str]] = None, + property_alias: Optional[Dict[str, List[str]]] = None) -> Dict[str, Any]: + result: Dict[str, Any] = {} + + for property_name, schema in properties.items(): + # Determine the keys to look for in the datasource + keys_to_check = [property_name] + if property_alias and property_name in property_alias: + keys_to_check.extend(property_alias[property_name]) + + # Find the first matching key in the datasource + value_found = False + for key in keys_to_check: + if key in datasource: + value = datasource[key] + value_found = True + break + + if not value_found and schema.has_default(): + value = schema.default + value_found = True + + if not value_found: + # If the property is required but not found, raise an error + if required and property_name in required: + raise KeyError(f"Required property '{property_name}' not found in datasource.") + # If not found and not required, skip to next property + continue + + # Process the value based on its schema type + if schema.type == SchemaType.OBJECT and schema.properties: + if isinstance(value, Mapping): + nested_value = cls._extract( + value, + schema.properties, + schema.required, + property_alias + ) + result[property_name] = nested_value + elif schema.type == SchemaType.ARRAY and schema.items: + if isinstance(value, Iterable): + nested_list = [] + for item in value: + if schema.items.type == SchemaType.OBJECT and schema.items.properties: + nested_item = SchemaDefinition._extract( + item, + schema.items.properties, + schema.items.required, + property_alias + ) + nested_list.append(nested_item) + else: + nested_list.append(item) + result[property_name] = nested_list + else: + result[property_name] = value + + return result class RawFoundryHostedMcpTool(BaseModel): - """Pydantic model for a single MCP tool. + """Pydantic model for a single MCP tool. - :ivar str name: Unique name identifier of the tool. - :ivar Optional[str] title: Display title of the tool, defaults to name if not provided. - :ivar str description: Human-readable description of the tool. - :ivar SchemaDefinition input_schema: JSON schema for tool input parameters. - :ivar Optional[SchemaDefinition] meta: Optional metadata for the tool. - """ + :ivar str name: Unique name identifier of the tool. + :ivar Optional[str] title: Display title of the tool, defaults to name if not provided. + :ivar str description: Human-readable description of the tool. + :ivar SchemaDefinition input_schema: JSON schema for tool input parameters. + :ivar Optional[SchemaDefinition] meta: Optional metadata for the tool. + """ - name: str - title: Optional[str] = None - description: str = "" - input_schema: SchemaDefinition = Field( - default_factory=SchemaDefinition, - validation_alias="inputSchema" - ) - meta: Optional[SchemaDefinition] = Field(default=None, validation_alias="_meta") + name: str + title: Optional[str] = None + description: str = "" + input_schema: SchemaDefinition = Field( + default_factory=SchemaDefinition, + validation_alias="inputSchema" + ) + meta: Optional[SchemaDefinition] = Field(default=None, validation_alias="_meta") - def model_post_init(self, __context: Any) -> None: - if self.title is None: - self.title = self.name + def model_post_init(self, __context: Any) -> None: + if self.title is None: + self.title = self.name class RawFoundryHostedMcpTools(BaseModel): - """Pydantic model for the result containing list of tools. + """Pydantic model for the result containing list of tools. - :ivar List[RawFoundryHostedMcpTool] tools: List of MCP tool definitions. - """ + :ivar List[RawFoundryHostedMcpTool] tools: List of MCP tool definitions. + """ - tools: List[RawFoundryHostedMcpTool] = Field(default_factory=list) + tools: List[RawFoundryHostedMcpTool] = Field(default_factory=list) class ListFoundryHostedMcpToolsResponse(BaseModel): - """Pydantic model for the complete MCP tools/list JSON-RPC response. + """Pydantic model for the complete MCP tools/list JSON-RPC response. - :ivar str jsonrpc: JSON-RPC version, defaults to "2.0". - :ivar int id: Request identifier, defaults to 0. - :ivar RawFoundryHostedMcpTools result: Result containing the list of tools. - """ + :ivar str jsonrpc: JSON-RPC version, defaults to "2.0". + :ivar int id: Request identifier, defaults to 0. + :ivar RawFoundryHostedMcpTools result: Result containing the list of tools. + """ - jsonrpc: str = "2.0" - id: int = 0 - result: RawFoundryHostedMcpTools = Field( - default_factory=RawFoundryHostedMcpTools - ) + jsonrpc: str = "2.0" + id: int = 0 + result: RawFoundryHostedMcpTools = Field( + default_factory=RawFoundryHostedMcpTools + ) class BaseConnectedToolsErrorResult(BaseModel, ABC): - """Base model for connected tools error responses.""" + """Base model for connected tools error responses.""" - @abstractmethod - def as_exception(self) -> Exception: - """Convert the error result to an appropriate exception. + @abstractmethod + def as_exception(self) -> Exception: + """Convert the error result to an appropriate exception. - :return: An exception representing the error. - :rtype: Exception - """ - raise NotImplementedError + :return: An exception representing the error. + :rtype: Exception + """ + raise NotImplementedError class OAuthConsentRequiredErrorResult(BaseConnectedToolsErrorResult): - """Model for OAuth consent required error responses. + """Model for OAuth consent required error responses. - :ivar Literal["OAuthConsentRequired"] type: Error type identifier. - :ivar Optional[str] consent_url: URL for user consent, if available. - :ivar Optional[str] message: Human-readable error message. - :ivar Optional[str] project_connection_id: Project connection ID related to the error. - """ + :ivar Literal["OAuthConsentRequired"] type: Error type identifier. + :ivar Optional[str] consent_url: URL for user consent, if available. + :ivar Optional[str] message: Human-readable error message. + :ivar Optional[str] project_connection_id: Project connection ID related to the error. + """ - type: Literal["OAuthConsentRequired"] - consent_url: str = Field( - validation_alias=AliasChoices( + type: Literal["OAuthConsentRequired"] + consent_url: str = Field( + validation_alias=AliasChoices( AliasPath("toolResult", "consentUrl"), AliasPath("toolResult", "message"), ), - ) - message: str = Field( - validation_alias=AliasPath("toolResult", "message"), - ) - project_connection_id: str = Field( - validation_alias=AliasPath("toolResult", "projectConnectionId"), - ) + ) + message: str = Field( + validation_alias=AliasPath("toolResult", "message"), + ) + project_connection_id: str = Field( + validation_alias=AliasPath("toolResult", "projectConnectionId"), + ) - def as_exception(self) -> Exception: - return OAuthConsentRequiredError(self.message, self.consent_url, self.project_connection_id) + def as_exception(self) -> Exception: + return OAuthConsentRequiredError(self.message, self.consent_url, self.project_connection_id) class RawFoundryConnectedTool(BaseModel): - """Pydantic model for a single connected tool. + """Pydantic model for a single connected tool. - :ivar str name: Name of the tool. - :ivar str description: Description of the tool. - :ivar Optional[SchemaDefinition] input_schema: Input schema for the tool parameters. - """ - name: str - description: str - input_schema: SchemaDefinition = Field( - default=SchemaDefinition, - validation_alias="parameters", - ) + :ivar str name: Name of the tool. + :ivar str description: Description of the tool. + :ivar Optional[SchemaDefinition] input_schema: Input schema for the tool parameters. + """ + name: str + description: str + input_schema: SchemaDefinition = Field( + default_factory=SchemaDefinition, + validation_alias="parameters", + ) class RawFoundryConnectedRemoteServer(BaseModel): - """Pydantic model for a connected remote server. - - :ivar str protocol: Protocol used by the remote server. - :ivar str project_connection_id: Project connection ID of the remote server. - :ivar List[RawFoundryConnectedTool] tools: List of connected tools from this server. - """ - protocol: str = Field( - validation_alias=AliasPath("remoteServer", "protocol"), - ) - project_connection_id: str = Field( - validation_alias=AliasPath("remoteServer", "projectConnectionId"), - ) - tools: List[RawFoundryConnectedTool] = Field( - default_factory=list, - validation_alias="manifest", - ) + """Pydantic model for a connected remote server. + + :ivar str protocol: Protocol used by the remote server. + :ivar str project_connection_id: Project connection ID of the remote server. + :ivar List[RawFoundryConnectedTool] tools: List of connected tools from this server. + """ + protocol: str = Field( + validation_alias=AliasPath("remoteServer", "protocol"), + ) + project_connection_id: str = Field( + validation_alias=AliasPath("remoteServer", "projectConnectionId"), + ) + tools: List[RawFoundryConnectedTool] = Field( + default_factory=list, + validation_alias="manifest", + ) class ListConnectedToolsResult(BaseModel): - """Pydantic model for the result of listing connected tools. + """Pydantic model for the result of listing connected tools. - :ivar List[ConnectedRemoteServer] servers: List of connected remote servers. - """ - servers: List[RawFoundryConnectedRemoteServer] = Field( - default_factory=list, - validation_alias="tools", - ) + :ivar List[ConnectedRemoteServer] servers: List of connected remote servers. + """ + servers: List[RawFoundryConnectedRemoteServer] = Field( + default_factory=list, + validation_alias="tools", + ) class ListFoundryConnectedToolsResponse(BaseModel): - """Pydantic model for the response of listing the connected tools. - - :ivar Optional[ConnectedToolsResult] result: Result containing connected tool servers. - :ivar Optional[BaseConnectedToolsErrorResult] error: Error result, if any. - """ - - result: Optional[ListConnectedToolsResult] = None - error: Optional[BaseConnectedToolsErrorResult] = None - - # noinspection DuplicatedCode - _TYPE_ADAPTER: ClassVar[TypeAdapter] = TypeAdapter( - Annotated[ - Union[ - Annotated[ - Annotated[ - Union[OAuthConsentRequiredErrorResult], - Field(discriminator="type") - ], - Tag("ErrorType") - ], - Annotated[ListConnectedToolsResult, Tag("ResultType")], - ], - Discriminator( - lambda payload: "ErrorType" if isinstance(payload, dict) and "type" in payload else "ResultType" - ), - ]) - - @model_validator(mode="wrap") - @classmethod - def _validator(cls, data: Any, handler: ModelWrapValidatorHandler) -> "ListFoundryConnectedToolsResponse": - parsed = cls._TYPE_ADAPTER.validate_python(data) - normalized = {} - if isinstance(parsed, ListConnectedToolsResult): - normalized["result"] = parsed - elif isinstance(parsed, BaseConnectedToolsErrorResult): - normalized["error"] = parsed - return handler(normalized) + """Pydantic model for the response of listing the connected tools. + + :ivar Optional[ConnectedToolsResult] result: Result containing connected tool servers. + :ivar Optional[BaseConnectedToolsErrorResult] error: Error result, if any. + """ + + result: Optional[ListConnectedToolsResult] = None + error: Optional[BaseConnectedToolsErrorResult] = None + + # noinspection DuplicatedCode + _TYPE_ADAPTER: ClassVar[TypeAdapter] = TypeAdapter( + Annotated[ + Union[ + Annotated[ + Annotated[ + Union[OAuthConsentRequiredErrorResult], + Field(discriminator="type") + ], + Tag("ErrorType") + ], + Annotated[ListConnectedToolsResult, Tag("ResultType")], + ], + Discriminator( + lambda payload: "ErrorType" if isinstance(payload, dict) and "type" in payload else "ResultType" + ), + ]) + + @model_validator(mode="wrap") + @classmethod + def _validator(cls, data: Any, handler: ModelWrapValidatorHandler) -> "ListFoundryConnectedToolsResponse": + parsed = cls._TYPE_ADAPTER.validate_python(data) + normalized = {} + if isinstance(parsed, ListConnectedToolsResult): + normalized["result"] = parsed + elif isinstance(parsed, BaseConnectedToolsErrorResult): + normalized["error"] = parsed # type: ignore[assignment] + return handler(normalized) class InvokeConnectedToolsResult(BaseModel): - """Pydantic model for the result of invoking a connected tool. + """Pydantic model for the result of invoking a connected tool. - :ivar Any value: The result value from the tool invocation. - """ - value: Any = Field(validation_alias="toolResult") + :ivar Any value: The result value from the tool invocation. + """ + value: Any = Field(validation_alias="toolResult") class InvokeFoundryConnectedToolsResponse(BaseModel): - """Pydantic model for the response of invoking a connected tool. - - :ivar Optional[InvokeConnectedToolsResult] result: Result of the tool invocation. - :ivar Optional[BaseConnectedToolsErrorResult] error: Error result, if any. - """ - result: Optional[InvokeConnectedToolsResult] = None - error: Optional[BaseConnectedToolsErrorResult] = None - - # noinspection DuplicatedCode - _TYPE_ADAPTER: ClassVar[TypeAdapter] = TypeAdapter( - Annotated[ - Union[ - Annotated[ - Annotated[ - Union[OAuthConsentRequiredErrorResult], - Field(discriminator="type") - ], - Tag("ErrorType") - ], - Annotated[InvokeConnectedToolsResult, Tag("ResultType")], - ], - Discriminator( - lambda payload: "ErrorType" if isinstance(payload, dict) and - # handle other error types in the future - payload.get("type") == "OAuthConsentRequired" - else "ResultType" - ), - ]) - - @model_validator(mode="wrap") - @classmethod - def _validator(cls, data: Any, handler: ModelWrapValidatorHandler) -> "InvokeFoundryConnectedToolsResponse": - parsed = cls._TYPE_ADAPTER.validate_python(data) - normalized = {} - if isinstance(parsed, InvokeConnectedToolsResult): - normalized["result"] = parsed - elif isinstance(parsed, BaseConnectedToolsErrorResult): - normalized["error"] = parsed - return handler(normalized) + """Pydantic model for the response of invoking a connected tool. + + :ivar Optional[InvokeConnectedToolsResult] result: Result of the tool invocation. + :ivar Optional[BaseConnectedToolsErrorResult] error: Error result, if any. + """ + result: Optional[InvokeConnectedToolsResult] = None + error: Optional[BaseConnectedToolsErrorResult] = None + + # noinspection DuplicatedCode + _TYPE_ADAPTER: ClassVar[TypeAdapter] = TypeAdapter( + Annotated[ + Union[ + Annotated[ + Annotated[ + Union[OAuthConsentRequiredErrorResult], + Field(discriminator="type") + ], + Tag("ErrorType") + ], + Annotated[InvokeConnectedToolsResult, Tag("ResultType")], + ], + Discriminator( + lambda payload: "ErrorType" if isinstance(payload, dict) and + # handle other error types in the future + payload.get("type") == "OAuthConsentRequired" + else "ResultType" + ), + ]) + + @model_validator(mode="wrap") + @classmethod + def _validator(cls, data: Any, handler: ModelWrapValidatorHandler) -> "InvokeFoundryConnectedToolsResponse": + parsed: Union[InvokeConnectedToolsResult, BaseConnectedToolsErrorResult] = (cls._TYPE_ADAPTER + .validate_python(data)) + normalized: Dict[str, Any] = {} + if isinstance(parsed, InvokeConnectedToolsResult): + normalized["result"] = parsed + elif isinstance(parsed, BaseConnectedToolsErrorResult): + normalized["error"] = parsed + return handler(normalized) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py index 5248ab7aa7fa..a3c552fe2575 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py @@ -10,7 +10,7 @@ from azure.core import AsyncPipelineClient from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, \ ResourceNotFoundError, ResourceNotModifiedError, map_error -from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest ErrorMapping = MutableMapping[int, Type[HttpResponseError]] @@ -67,7 +67,7 @@ def _extract_response_json(self, response: AsyncHttpResponse) -> Any: try: payload_text = response.text() payload_json = json.loads(payload_text) if payload_text else {} - except AttributeError as e: + except AttributeError: payload_bytes = response.body() payload_json = json.loads(payload_bytes.decode("utf-8")) if payload_bytes else {} return payload_json \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_hosted_mcp_tools.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_hosted_mcp_tools.py index 0c01164a6809..08587e274096 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_hosted_mcp_tools.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_foundry_hosted_mcp_tools.py @@ -2,9 +2,9 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from abc import ABC -from typing import Any, AsyncIterable, ClassVar, Dict, Iterable, List, Mapping, TYPE_CHECKING, Tuple, cast +from typing import Any, AsyncIterable, ClassVar, Dict, Iterable, List, Tuple, cast -from azure.core.rest import HttpRequest +from azure.core.pipeline.transport import HttpRequest from azure.core.tracing.decorator_async import distributed_trace_async from ._base import BaseOperations @@ -88,7 +88,7 @@ def _convert_listed_tools( def _build_invoke_tool_request(self, tool: ResolvedFoundryTool, arguments: Dict[str, Any]) -> HttpRequest: if tool.definition.source != FoundryToolSource.HOSTED_MCP: raise ToolInvocationError(f"Tool {tool.name} is not a Foundry-hosted MCP tool.", tool=tool) - definition = cast(FoundryHostedMcpTool, tool.definition) if TYPE_CHECKING else tool.definition + definition = cast(FoundryHostedMcpTool, tool.definition) payload = dict(self._INVOKE_TOOL_REQUEST_BODY_TEMPLATE) payload["params"] = { diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py index 17eb8c2eec48..2d50089fef8f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py @@ -1,14 +1,11 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import asyncio -import threading +import asyncio # pylint: disable=C4763 from abc import ABC, abstractmethod -from collections import defaultdict -from concurrent.futures import Future -from typing import Any, Awaitable, Collection, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union +from typing import Any, Awaitable, Collection, List, Mapping, MutableMapping, Optional, Union -from cachetools import TTLCache +from cachetools import TTLCache # type: ignore[import-untyped] from ._facade import FoundryToolLike, ensure_foundry_tool from ._user import UserProvider @@ -93,7 +90,7 @@ async def list(self, tools: List[FoundryToolLike]) -> List[ResolvedFoundryTool]: await asyncio.gather(*fetching_tasks) except: # exception can only be caused by fetching tasks, remove them from cache - for k in tools_to_fetch.keys(): + for k, _ in tools_to_fetch.items(): if k in self._cache: del self._cache[k] raise diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py index ebaca87cf1a7..f12d3f0db7b5 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py @@ -13,7 +13,8 @@ # Required: # - "type": str Discriminator, e.g. "mcp" | "a2a" | "code_interpreter" | ... # Optional: -# - "project_connection_id": str Project connection id of Foundry connected tools, required with "type" is "mcp" or a2a. +# - "project_connection_id": str Project connection id of Foundry connected tools, +# required when "type" is "mcp" or "a2a". # # Custom keys: # - Allowed, but MUST NOT shadow reserved keys. @@ -45,5 +46,5 @@ def ensure_foundry_tool(tool: FoundryToolLike) -> FoundryTool: raise InvalidToolFacadeError(f"project_connection_id is required for tool protocol {protocol}.") return FoundryConnectedTool(protocol=protocol, project_connection_id=project_connection_id) - except: + except ValueError: return FoundryHostedMcpTool(name=tool_type, configuration=tool) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py index 2764558b06bb..24eb0fabbb21 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from abc import ABC, abstractmethod -from typing import Awaitable, Union, overload +from typing import Optional, Union from ._catalog import FoundryToolCatalog from ._facade import FoundryToolLike, ensure_foundry_tool @@ -49,9 +49,12 @@ async def resolve(self, tool: Union[FoundryToolLike, ResolvedFoundryTool]) -> Fo :return: The resolved Foundry tool invoker. :rtype: FoundryToolInvoker """ - resolved_tool = (tool - if isinstance(tool, ResolvedFoundryTool) - else await self._catalog.get(ensure_foundry_tool(tool))) - if not resolved_tool: - raise UnableToResolveToolInvocationError(f"Unable to resolve tool {tool} from catalog", tool) - return DefaultFoundryToolInvoker(resolved_tool, self._client, self._user_provider, self._agent_name) \ No newline at end of file + if isinstance(tool, ResolvedFoundryTool): + resolved_tool = tool + else: + foundry_tool = ensure_foundry_tool(tool) + resolved_tool = await self._catalog.get(foundry_tool) # type: ignore[assignment] + if not resolved_tool: + raise UnableToResolveToolInvocationError(f"Unable to resolve tool {foundry_tool} from catalog", + foundry_tool) + return DefaultFoundryToolInvoker(resolved_tool, self._client, self._user_provider, self._agent_name) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py index 8ff723a6f7dc..8bc77759ecd6 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py @@ -71,12 +71,18 @@ def __init__(self, @property def catalog(self) -> FoundryToolCatalog: - """The tool catalog.""" + """The tool catalog. + + :rtype: FoundryToolCatalog + """ return self._catalog @property def invocation(self) -> FoundryToolInvocationResolver: - """The tool invocation resolver.""" + """The tool invocation resolver. + + :rtype: FoundryToolInvocationResolver + """ return self._invocation async def __aenter__(self) -> "DefaultFoundryToolRuntime": diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py index 17b25095a953..f60fb63f2cdc 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py @@ -39,7 +39,7 @@ def install(cls, If not provided, a default resolver will be used. :type user_resolver: Optional[Callable[[Request], Awaitable[Optional[UserInfo]]]] """ - app.add_middleware(UserInfoContextMiddleware, + app.add_middleware(UserInfoContextMiddleware, # type: ignore[arg-type] user_info_var=user_context or ContextVarUserProvider.default_user_info_context, user_resolver=user_resolver or cls._default_user_resolver) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_user.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_user.py index 14d8aad2690a..f72b30c0d3d3 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_user.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_user.py @@ -13,7 +13,11 @@ class UserProvider(ABC): @abstractmethod async def get_user(self) -> Optional[UserInfo]: - """Get the user information.""" + """Get the user information. + + :return: The user information or None if not found. + :rtype: Optional[UserInfo] + """ raise NotImplementedError @@ -25,7 +29,11 @@ def __init__(self, context: Optional[ContextVar[UserInfo]] = None): self.context = context or self.default_user_info_context async def get_user(self) -> Optional[UserInfo]: - """Get the user information from the context variable.""" + """Get the user information from the context variable. + + :return: The user information or None if not found. + :rtype: Optional[UserInfo] + """ return self.context.get(None) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/__init__.py index 41fc7e00dd6d..037fb1dc04de 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/__init__.py @@ -4,4 +4,8 @@ __path__ = __import__('pkgutil').extend_path(__path__, __name__) -from ._name_resolver import * +from ._name_resolver import ToolNameResolver + +__all__ = [ + "ToolNameResolver", +] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/_name_resolver.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/_name_resolver.py index ab9c87fd113c..9f1b7874f52c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/_name_resolver.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/utils/_name_resolver.py @@ -8,8 +8,8 @@ class ToolNameResolver: """Utility class for resolving tool names to be registered to model.""" def __init__(self): - self._count_by_name = dict() - self._stable_names = dict() + self._count_by_name = {} + self._stable_names = {} def resolve(self, tool: ResolvedFoundryTool) -> str: """Resolve a stable name for the given tool. diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py index 24de2e1345a4..398a8c46fd5d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py @@ -3,17 +3,27 @@ # --------------------------------------------------------- from __future__ import annotations -import asyncio +import asyncio # pylint: disable=C4763 import inspect from types import TracebackType -from typing import Any, Optional, Sequence, Type, Union +from typing import Any, Type, cast from azure.core.credentials import AccessToken, TokenCredential from azure.core.credentials_async import AsyncTokenCredential -async def _to_thread(func, *args, **kwargs): - """Compatibility wrapper for asyncio.to_thread (Python 3.8+).""" +async def _to_thread(func, *args, **kwargs): # pylint: disable=C4743 + """Compatibility wrapper for asyncio.to_thread (Python 3.8+). + + :param func: The function to run in a thread. + :type func: Callable + :param args: Positional arguments to pass to the function. + :type args: Any + :param kwargs: Keyword arguments to pass to the function. + :type kwargs: Any + :return: The result of the function call. + :rtype: Any + """ if hasattr(asyncio, "to_thread"): return await asyncio.to_thread(func, *args, **kwargs) # py>=3.9 loop = asyncio.get_running_loop() @@ -27,7 +37,7 @@ class AsyncTokenCredentialAdapter(AsyncTokenCredential): - azure.core.credentials_async.AsyncTokenCredential (async) """ - def __init__(self, credential: TokenCredential |AsyncTokenCredential) -> None: + def __init__(self, credential: TokenCredential | AsyncTokenCredential) -> None: if not hasattr(credential, "get_token"): raise TypeError("credential must have a get_token method") self._credential = credential @@ -44,11 +54,12 @@ async def get_token( **kwargs: Any, ) -> AccessToken: if self._is_async: - return await self._credential.get_token(*scopes, - claims=claims, - tenant_id=tenant_id, - enable_cae=enable_cae, - **kwargs) + cred = cast(AsyncTokenCredential, self._credential) + return await cred.get_token(*scopes, + claims=claims, + tenant_id=tenant_id, + enable_cae=enable_cae, + **kwargs) return await _to_thread(self._credential.get_token, *scopes, claims=claims, @@ -86,4 +97,4 @@ async def __aexit__( aexit = getattr(self._credential, "__aexit__", None) if aexit is not None and inspect.iscoroutinefunction(aexit): return await aexit(exc_type, exc_value, traceback) - await self.close() \ No newline at end of file + await self.close() diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py deleted file mode 100644 index a754c4d72772..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py +++ /dev/null @@ -1,108 +0,0 @@ -# mypy: ignore-errors -import datetime - -from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent -from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ( - ItemContentOutputText, - ResponseCompletedEvent, - ResponseCreatedEvent, - ResponseOutputItemAddedEvent, - ResponsesAssistantMessageItemResource, - ResponseTextDeltaEvent, - ResponseTextDoneEvent, -) - -from azure.identity import DefaultAzureCredential - -def stream_events(text: str, context: AgentRunContext): - item_id = context.id_generator.generate_message_id() - - assembled = "" - yield ResponseCreatedEvent(response=OpenAIResponse(output=[])) - yield ResponseOutputItemAddedEvent( - output_index=0, - item=ResponsesAssistantMessageItemResource( - id=item_id, - status="in_progress", - content=[ - ItemContentOutputText( - text="", - annotations=[], - ) - ], - ), - ) - for i, token in enumerate(text.split(" ")): - piece = token if i == len(text.split(" ")) - 1 else token + " " - assembled += piece - yield ResponseTextDeltaEvent(output_index=0, content_index=0, delta=piece) - # Done with text - yield ResponseTextDoneEvent(output_index=0, content_index=0, text=assembled) - yield ResponseCompletedEvent( - response=OpenAIResponse( - metadata={}, - temperature=0.0, - top_p=0.0, - user="me", - id=context.response_id, - created_at=datetime.datetime.now(), - output=[ - ResponsesAssistantMessageItemResource( - id=item_id, - status="completed", - content=[ - ItemContentOutputText( - text=assembled, - annotations=[], - ) - ], - ) - ], - ) - ) - - -async def agent_run(context: AgentRunContext): - agent = context.request.get("agent") - print(f"agent:{agent}") - - if context.stream: - return stream_events( - "I am mock agent with no intelligence in stream mode.", context - ) - - tool = await my_agent.get_tool_client().list_tools_details() - tool_list = [t.name for t in tool] - # Build assistant output content - output_content = [ - ItemContentOutputText( - text="I am mock agent with no intelligence with tools " + str(tool_list), - annotations=[], - ) - ] - my_agent.get_tool_client() # just to illustrate we can access tool client from context - response = OpenAIResponse( - metadata={}, - temperature=0.0, - top_p=0.0, - user="me", - id=context.response_id, - created_at=datetime.datetime.now(), - output=[ - ResponsesAssistantMessageItemResource( - id=context.id_generator.generate_message_id(), - status="completed", - content=output_content, - ) - ], - ) - return response - -credentials = DefaultAzureCredential() - -my_agent = FoundryCBAgent(credentials=credentials) -my_agent.agent_run = agent_run - -if __name__ == "__main__": - my_agent.run() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index fd190bc4d5cf..7fe934ae81c6 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -27,4 +27,4 @@ def from_langgraph( __all__ = ["from_langgraph", "LanggraphRunContext"] __version__ = VERSION -set_current_app(PackageMetadata.from_dist("azure-ai-agentserver-langgraph")) \ No newline at end of file +set_current_app(PackageMetadata.from_dist("azure-ai-agentserver-langgraph")) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py index 846133a7912c..0d9566707362 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py @@ -2,15 +2,16 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from dataclasses import dataclass -from typing import TYPE_CHECKING from langgraph.runtime import get_runtime +from azure.ai.agentserver.core import AgentRunContext from .tools._context import FoundryToolContext @dataclass class LanggraphRunContext: + agent_run: AgentRunContext tools: FoundryToolContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index b68d8d408eb5..7072f641850f 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -5,7 +5,7 @@ # mypy: disable-error-code="assignment,arg-type" import os import re -from typing import Optional, TYPE_CHECKING, Union +from typing import Optional, TYPE_CHECKING from langgraph.graph.state import CompiledStateGraph @@ -66,27 +66,26 @@ async def agent_run(self, context: AgentRunContext): # Resolve graph - always resolve if it's a factory function to get fresh graph each time # For factories, get a new graph instance per request to avoid concurrency issues try: - input_arguments = await self.converter.convert_request(context) - self.ensure_runnable_config(context, input_arguments) + lg_run_context = await self.setup_lg_run_context(context) + input_arguments = await self.converter.convert_request(lg_run_context) + self.ensure_runnable_config(input_arguments) - lg_run_context = await self.setup_lg_run_context() if not context.stream: - response = await self.agent_run_non_stream(input_arguments, context, lg_run_context) + response = await self.agent_run_non_stream(input_arguments) return response - # For streaming, pass tool_client to be closed after streaming completes - return self.agent_run_astream(input_arguments, context, lg_run_context) + return self.agent_run_astream(input_arguments) except OAuthConsentRequiredError as e: if not context.stream: response = await self.respond_with_oauth_consent(context, e) return response return self.respond_with_oauth_consent_astream(context, e) - except Exception: - raise - async def setup_lg_run_context(self): + async def setup_lg_run_context(self, agent_run_context: AgentRunContext) -> LanggraphRunContext: resolved = await self._tool_resolver.resolve_from_registry() - return LanggraphRunContext(FoundryToolContext(resolved)) + return LanggraphRunContext( + agent_run_context, + FoundryToolContext(resolved)) def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=None): # set env vars for langsmith @@ -116,68 +115,57 @@ def get_trace_attributes(self): attrs["service.namespace"] = "azure.ai.agentserver.langgraph" return attrs - async def agent_run_non_stream(self, input_arguments: GraphInputArguments, context: AgentRunContext, - lg_run_context: LanggraphRunContext): + async def agent_run_non_stream(self, input_arguments: GraphInputArguments): """ Run the agent with non-streaming response. :param input_arguments: The input data to run the agent with. :type input_arguments: GraphInputArguments - :param context: The context for the agent run. - :type context: AgentRunContext - :param lg_run_context: The tool context for the agent run. - :type lg_run_context: FoundryToolContext :return: The response of the agent run. :rtype: dict """ try: - result = await self._graph.ainvoke(**input_arguments, context=lg_run_context) - output = self.converter.convert_response_non_stream(result, context) + result = await self._graph.ainvoke(**input_arguments) + output = self.converter.convert_response_non_stream(result, input_arguments["context"]) return output except Exception as e: logger.error(f"Error during agent run: {e}", exc_info=True) raise e async def agent_run_astream(self, - input_arguments: GraphInputArguments, - context: AgentRunContext, - lg_run_context: LanggraphRunContext): + input_arguments: GraphInputArguments): """ Run the agent with streaming response. :param input_arguments: The input data to run the agent with. :type input_arguments: GraphInputArguments - :param context: The context for the agent run. - :type context: AgentRunContext - :param lg_run_context: The tool context for the agent run. - :type lg_run_context: FoundryToolContext :return: An async generator yielding the response stream events. :rtype: AsyncGenerator[dict] """ try: - logger.info(f"Starting streaming agent run {context.response_id}") - stream = self._graph.astream(**input_arguments, context=lg_run_context) - async for output_event in self.converter.convert_response_stream(stream, context): + logger.info(f"Starting streaming agent run {input_arguments['context'].agent_run.response_id}") + stream = self._graph.astream(**input_arguments) + async for output_event in self.converter.convert_response_stream( + stream, + input_arguments["context"]): yield output_event except Exception as e: logger.error(f"Error during streaming agent run: {e}", exc_info=True) raise e - def ensure_runnable_config(self, context: AgentRunContext, input_arguments: GraphInputArguments): + def ensure_runnable_config(self, input_arguments: GraphInputArguments): """ Ensure the RunnableConfig is set in the input arguments. - :param context: The context for the agent run. - :type context: AgentRunContext :param input_arguments: The input arguments for the agent run. :type input_arguments: GraphInputArguments """ config = input_arguments.get("config", {}) configurable = config.get("configurable", {}) - configurable["thread_id"] = context.conversation_id + configurable["thread_id"] = input_arguments["context"].agent_run.conversation_id config["configurable"] = configurable callbacks = config.get("callbacks", []) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py index 8c4a453180ed..9f3c693800a1 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py @@ -12,28 +12,38 @@ from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.models.openai import ( - ResponseInputParam, - ResponseInputItemParam, -) -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from azure.ai.agentserver.core.models.openai import (ResponseInputItemParam, ResponseInputParam) +from .._context import LanggraphRunContext INTERRUPT_NODE_NAME = "__interrupt__" logger = get_logger() + class HumanInTheLoopHelper: """Helper class for managing human-in-the-loop interactions in LangGraph.""" - def __init__(self, context: AgentRunContext = None): + def __init__(self, context: LanggraphRunContext): self.context = context - def has_interrupt(self, state: StateSnapshot) -> bool: - """Check if the LangGraph state contains an interrupt node.""" + def has_interrupt(self, state: Optional[StateSnapshot]) -> bool: + """Check if the LangGraph state contains an interrupt node. + + :param state: The LangGraph state snapshot. + :type state: Optional[StateSnapshot] + :return: True if the state contains an interrupt, False otherwise. + :rtype: bool + """ if not state or not isinstance(state, StateSnapshot): return False return state.interrupts is not None and len(state.interrupts) > 0 def convert_interrupts(self, interrupts: tuple) -> list[project_models.ItemResource]: - """Convert LangGraph interrupts to ItemResource objects.""" + """Convert LangGraph interrupts to ItemResource objects. + + :param interrupts: A tuple of interrupt objects. + :type interrupts: tuple + :return: A list of ItemResource objects. + :rtype: list[project_models.ItemResource] + """ if not interrupts or not isinstance(interrupts, tuple): return [] result = [] @@ -46,8 +56,8 @@ def convert_interrupts(self, interrupts: tuple) -> list[project_models.ItemResou if item: result.append(item) return result - - def convert_interrupt(self, interrupt_info: Interrupt) -> project_models.ItemResource: + + def convert_interrupt(self, interrupt_info: Interrupt) -> Optional[project_models.ItemResource]: """Convert a single LangGraph Interrupt to an ItemResource object. :param interrupt_info: The interrupt information from LangGraph. @@ -59,60 +69,75 @@ def convert_interrupt(self, interrupt_info: Interrupt) -> project_models.ItemRes raise NotImplementedError("Subclasses must implement convert_interrupt method.") def validate_and_convert_human_feedback( - self, state: StateSnapshot, input: Union[str, ResponseInputParam] - ) -> Optional[Command]: + self, state: Optional[StateSnapshot], input_data: Union[str, ResponseInputParam] + ) -> Optional[Command]: """Validate if the human feedback input corresponds to the interrupt in state. If valid, convert the input to a LangGraph Command. :param state: The current LangGraph state snapshot. - :type state: StateSnapshot - :param input: The human feedback input from the request. - :type input: Union[str, ResponseInputParam] + :type state: Optional[StateSnapshot] + :param input_data: The human feedback input from the request. + :type input_data: Union[str, ResponseInputParam] :return: Command if valid feedback is provided, else None. :rtype: Union[Command, None] """ + # Validate interrupt exists in state if not self.has_interrupt(state): - # No interrupt in state logger.info("No interrupt found in state.") return None - interrupt_obj = state.interrupts[0] # Assume single interrupt for simplicity + + interrupt_obj = state.interrupts[0] # type: ignore[union-attr] # Assume single interrupt for simplicity if not interrupt_obj or not isinstance(interrupt_obj, Interrupt): - logger.warning(f"No interrupt object found in state") + logger.warning("No interrupt object found in state") return None - - logger.info(f"Retrived interrupt from state, validating and converting human feedback.") - if isinstance(input, str): - # expect a list of function call output items - logger.warning(f"Expecting function call output item, got string: {input}") + + logger.info("Retrieved interrupt from state, validating and converting human feedback.") + + # Validate input format and extract item + item = self._validate_input_format(input_data, interrupt_obj) + if item is None: return None - if isinstance(input, list): - if len(input) != 1: - # expect exactly one function call output item - logger.warning(f"Expected exactly one interrupt input item, got {len(input)} items.") - return None - item = input[0] - # validate item type - item_type = item.get("type", None) - if item_type != project_models.ItemType.FUNCTION_CALL_OUTPUT: - logger.warning(f"Invalid interrupt input item type: {item_type}, expected FUNCTION_CALL_OUTPUT.") - return None - - # validate call_id matches - if item.get("call_id") != interrupt_obj.id: - logger.warning(f"Interrupt input call_id {item.call_id} does not match interrupt id {interrupt_obj.id}.") - return None - - return self.convert_input_item_to_command(item) - else: - logger.error(f"Unsupported interrupt input type: {type(input)}, {input}") + + return self.convert_input_item_to_command(item) + + def _validate_input_format( + self, input_data: Union[str, ResponseInputParam], interrupt_obj: Interrupt + ) -> Optional[ResponseInputItemParam]: + if isinstance(input_data, str): + logger.warning("Expecting function call output item, got string: %s", input_data) + return None + + if not isinstance(input_data, list): + logger.error("Unsupported interrupt input type: %s, %s", type(input_data), input_data) + return None + + if len(input_data) != 1: + logger.warning("Expected exactly one interrupt input item, got %d items.", len(input_data)) return None - def convert_input_item_to_command(self, input: ResponseInputItemParam) -> Union[Command, None]: + item = input_data[0] + item_type = item.get("type", None) + if item_type != project_models.ItemType.FUNCTION_CALL_OUTPUT: + logger.warning( + "Invalid interrupt input item type: %s, expected FUNCTION_CALL_OUTPUT.", item_type + ) + return None + + if item.get("call_id") != interrupt_obj.id: + logger.warning( + "Interrupt input call_id %s does not match interrupt id %s.", + item.get("call_id"), interrupt_obj.id + ) + return None + + return item + + def convert_input_item_to_command(self, input_item: ResponseInputItemParam) -> Union[Command, None]: """Convert ItemParams to a LangGraph Command for interrupt handling. - :param input: The item parameters containing interrupt information. - :type input: ResponseInputItemParam + :param input_item: The item parameters containing interrupt information. + :type input_item: ResponseInputItemParam :return: The LangGraph Command. :rtype: Union[Command, None] """ diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py index 11c2f4a5ac03..e1396ba90577 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py @@ -3,18 +3,16 @@ # --------------------------------------------------------- import json -from typing import Union +from typing import Optional, Union from langgraph.types import ( Command, Interrupt, - StateSnapshot, ) from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import projects as project_models from azure.ai.agentserver.core.models.openai import ( - ResponseInputParam, ResponseInputItemParam, ) from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME @@ -32,19 +30,19 @@ class HumanInTheLoopJsonHelper(HumanInTheLoopHelper): All values are serialized as JSON strings. """ - def convert_interrupt(self, interrupt_info: Interrupt) -> project_models.ItemResource: + def convert_interrupt(self, interrupt_info: Interrupt) -> Optional[project_models.ItemResource]: if not isinstance(interrupt_info, Interrupt): - logger.warning(f"Interrupt is not of type Interrupt: {interrupt_info}") + logger.warning("Interrupt is not of type Interrupt: %s", interrupt_info) return None name, call_id, arguments = self.interrupt_to_function_call(interrupt_info) return project_models.FunctionToolCallItemResource( call_id=call_id, name=name, arguments=arguments, - id=self.context.id_generator.generate_function_call_id(), - status="inprogress", + id=self.context.agent_run.id_generator.generate_function_call_id(), + status="in_progress", ) - + def interrupt_to_function_call(self, interrupt: Interrupt) : """ Convert an Interrupt to a function call tuple. @@ -61,22 +59,25 @@ def interrupt_to_function_call(self, interrupt: Interrupt) : try: arguments = json.dumps(interrupt.value) except Exception as e: # pragma: no cover - fallback # pylint: disable=broad-exception-caught - logger.error(f"Failed to serialize interrupt value to JSON: {interrupt.value}, error: {e}") + logger.error("Failed to serialize interrupt value to JSON: %s, error: %s", interrupt.value, e) arguments = str(interrupt.value) return HUMAN_IN_THE_LOOP_FUNCTION_NAME, interrupt.id, arguments - def convert_input_item_to_command(self, input: ResponseInputItemParam) -> Union[Command, None]: - output_str = input.get("output") + def convert_input_item_to_command(self, input_item: ResponseInputItemParam) -> Union[Command, None]: + output_str = input_item.get("output") + if not isinstance(output_str, str): + logger.error("Invalid output type in function call output: %s", input_item) + return None try: output = json.loads(output_str) - except json.JSONDecodeError as e: - logger.error(f"Invalid JSON in function call output: {input}") + except json.JSONDecodeError: + logger.error("Invalid JSON in function call output: %s", input_item) return None resume = output.get("resume", None) update = output.get("update", None) goto = output.get("goto", None) if resume is None and update is None and goto is None: - logger.warning(f"No valid Command fields found in function call output: {input}") + logger.warning("No valid Command fields found in function call output: %s", input_item) return None return Command( resume=resume, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py index c93d922a97de..4d72ab22060f 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py @@ -21,19 +21,19 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Any, AsyncGenerator, AsyncIterator, Dict, TypedDict, Union +from typing import Any, AsyncIterable, AsyncIterator, Dict, TypedDict, Union from langgraph.types import Command from azure.ai.agentserver.core.models import Response, ResponseStreamEvent -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from .._context import LanggraphRunContext class GraphInputArguments(TypedDict): """TypedDict for LangGraph input arguments.""" input: Union[Dict[str, Any], Command, None] config: Dict[str, Any] - context: Dict[str, Any] + context: LanggraphRunContext stream_mode: str @@ -46,28 +46,30 @@ class ResponseAPIConverter(ABC): :meta private: """ @abstractmethod - async def convert_request(self, context: AgentRunContext) -> GraphInputArguments: + async def convert_request(self, context: LanggraphRunContext) -> GraphInputArguments: """Convert the incoming request to a serializable dict for LangGraph. This is a convenience wrapper around request_to_state that only returns dict states, raising ValueError if a Command is returned instead. :param context: The context for the agent run. - :type context: AgentRunContext + :type context: LanggraphRunContext :return: The initial LangGraph arguments :rtype: GraphInputArguments """ @abstractmethod - async def convert_response_non_stream(self, output: Any, context: AgentRunContext) -> Response: + async def convert_response_non_stream(self, output: Any, context: LanggraphRunContext) -> Response: """Convert the completed LangGraph state into a final non-streaming Response object. This is a convenience wrapper around state_to_response that retrieves the current state snapshot asynchronously. + :param output: The LangGraph output to convert. + :type output: Any :param context: The context for the agent run. - :type context: AgentRunContext + :type context: LanggraphRunContext :return: The final non-streaming Response object. :rtype: Response @@ -76,9 +78,9 @@ async def convert_response_non_stream(self, output: Any, context: AgentRunContex @abstractmethod async def convert_response_stream( self, - output: AsyncIterator[Dict[str, Any] | Any], - context: AgentRunContext, - ) -> AsyncGenerator[ResponseStreamEvent, None]: + output: AsyncIterator[Union[Dict[str, Any], Any]], + context: LanggraphRunContext, + ) -> AsyncIterable[ResponseStreamEvent]: """Convert an async iterator of LangGraph stream events into stream events. This is a convenience wrapper around state_to_response_stream that retrieves @@ -87,8 +89,9 @@ async def convert_response_stream( :param output: An async iterator yielding LangGraph stream events :type output: AsyncIterator[Dict[str, Any] | Any] :param context: The context for the agent run. - :type context: AgentRunContext + :type context: LanggraphRunContext - :return: An async generator yielding ResponseStreamEvent objects. - :rtype: AsyncGenerator[ResponseStreamEvent, None] + :return: An async iterable yielding ResponseStreamEvent objects. + :rtype: AsyncIterable[ResponseStreamEvent] """ + raise NotImplementedError diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py index 9f2af30c4937..eb725ed23ef8 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py @@ -1,20 +1,26 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + import time -from typing import Any, AsyncGenerator, AsyncIterator, Dict, Optional, TypedDict, Union +from collections.abc import Callable +from typing import Any, AsyncIterable, AsyncIterator, Dict, Optional, Union from langchain_core.runnables import RunnableConfig -from langgraph.types import Command, Interrupt, StateSnapshot from langgraph.graph.state import CompiledStateGraph +from langgraph.types import Command, StateSnapshot from azure.ai.agentserver.core.models import Response, ResponseStreamEvent -from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext - -from .response_api_request_converter import ResponseAPIRequestConverter, ResponseAPIMessageRequestConverter -from .response_api_stream_response_converter import ResponseAPIStreamResponseConverter, ResponseAPIMessagesStreamResponseConverter -from .response_api_non_stream_response_converter import ResponseAPINonStreamResponseConverter, ResponseAPIMessagesNonStreamResponseConverter from .human_in_the_loop_helper import HumanInTheLoopHelper from .human_in_the_loop_json_helper import HumanInTheLoopJsonHelper -from .response_api_converter import ResponseAPIConverter, GraphInputArguments +from .response_api_converter import GraphInputArguments, ResponseAPIConverter +from .response_api_non_stream_response_converter import (ResponseAPIMessagesNonStreamResponseConverter, + ResponseAPINonStreamResponseConverter) +from .response_api_request_converter import ResponseAPIMessageRequestConverter, ResponseAPIRequestConverter +from .response_api_stream_response_converter import ResponseAPIMessagesStreamResponseConverter +from .._context import LanggraphRunContext + class ResponseAPIDefaultConverter(ResponseAPIConverter): """ @@ -23,89 +29,109 @@ class ResponseAPIDefaultConverter(ResponseAPIConverter): """ def __init__(self, graph: CompiledStateGraph, - create_request_converter=None, - create_stream_response_converter=None, - create_non_stream_response_converter=None, - create_human_in_the_loop_helper=None): + create_request_converter: Callable[[LanggraphRunContext], ResponseAPIRequestConverter] | None = None, + create_stream_response_converter: Callable[ + [LanggraphRunContext], + ResponseAPIMessagesStreamResponseConverter + ] | None = None, + create_non_stream_response_converter: Callable[ + [LanggraphRunContext], + ResponseAPINonStreamResponseConverter + ] | None = None, + create_human_in_the_loop_helper: Callable[[LanggraphRunContext], HumanInTheLoopHelper] | None = None): self._graph = graph - if create_request_converter: - self._create_request_converter = create_request_converter - if create_stream_response_converter: - self._create_stream_response_converter = create_stream_response_converter - if create_non_stream_response_converter: - self._create_non_stream_response_converter = create_non_stream_response_converter - if create_human_in_the_loop_helper: - self._create_human_in_the_loop_helper = create_human_in_the_loop_helper - - async def convert_request(self, context: AgentRunContext) -> GraphInputArguments: + self._custom_request_converter_factory = create_request_converter + self._custom_stream_response_converter_factory = create_stream_response_converter + self._custom_non_stream_response_converter_factory = create_non_stream_response_converter + self._custom_human_in_the_loop_helper_factory = create_human_in_the_loop_helper + + async def convert_request(self, context: LanggraphRunContext) -> GraphInputArguments: prev_state = await self._aget_state(context) input_data = self._convert_request_input(context, prev_state) stream_mode = self.get_stream_mode(context) - return GraphInputArguments({ - "input": input_data, - "stream_mode": stream_mode}) + return GraphInputArguments( + input=input_data, + stream_mode=stream_mode, + config={}, + context=context, + ) - async def convert_response_non_stream(self, output: Any, context: AgentRunContext) -> Response: + async def convert_response_non_stream(self, output: Any, context: LanggraphRunContext) -> Response: + agent_run_context = context.agent_run converter = self._create_non_stream_response_converter(context) - output = converter.convert(output) + converted_output = converter.convert(output) - agent_id = context.get_agent_id_object() - conversation = context.get_conversation_object() - response = Response( + agent_id = agent_run_context.get_agent_id_object() + conversation = agent_run_context.get_conversation_object() + response = Response( # type: ignore[call-overload] object="response", - id=context.response_id, + id=agent_run_context.response_id, agent=agent_id, conversation=conversation, - metadata=context.request.get("metadata"), + metadata=agent_run_context.request.get("metadata"), created_at=int(time.time()), - output=output, + output=converted_output, ) return response - async def convert_response_stream( + async def convert_response_stream( # type: ignore[override] self, - output: AsyncIterator[Dict[str, Any] | Any], - context: AgentRunContext, - ) -> AsyncGenerator[ResponseStreamEvent, None]: + output: AsyncIterator[Union[Dict[str, Any], Any]], + context: LanggraphRunContext, + ) -> AsyncIterable[ResponseStreamEvent]: converter = self._create_stream_response_converter(context) async for event in output: - output = converter.convert(event) - for e in output: + converted_output = converter.convert(event) + for e in converted_output: yield e - + state = await self._aget_state(context) - output = converter.finalize(state) # finalize the response with graph state after stream - for event in output: + finalized_output = converter.finalize(state) # finalize the response with graph state after stream + for event in finalized_output: yield event - def get_stream_mode(self, context: AgentRunContext) -> str: - if context.stream: + def get_stream_mode(self, context: LanggraphRunContext) -> str: + if context.agent_run.stream: return "messages" return "updates" - def _create_request_converter(self, context: AgentRunContext) -> ResponseAPIRequestConverter: - data = context.request + def _create_request_converter(self, context: LanggraphRunContext) -> ResponseAPIRequestConverter: + if self._custom_request_converter_factory: + return self._custom_request_converter_factory(context) + data = context.agent_run.request return ResponseAPIMessageRequestConverter(data) - - def _create_stream_response_converter(self, context: AgentRunContext) -> ResponseAPIMessagesStreamResponseConverter: + + def _create_stream_response_converter( + self, context: LanggraphRunContext + ) -> ResponseAPIMessagesStreamResponseConverter: + if self._custom_stream_response_converter_factory: + return self._custom_stream_response_converter_factory(context) hitl_helper = self._create_human_in_the_loop_helper(context) return ResponseAPIMessagesStreamResponseConverter(context, hitl_helper=hitl_helper) - - def _create_non_stream_response_converter(self, context: AgentRunContext) -> ResponseAPINonStreamResponseConverter: + + def _create_non_stream_response_converter( + self, context: LanggraphRunContext + ) -> ResponseAPINonStreamResponseConverter: + if self._custom_non_stream_response_converter_factory: + return self._custom_non_stream_response_converter_factory(context) hitl_helper = self._create_human_in_the_loop_helper(context) return ResponseAPIMessagesNonStreamResponseConverter(context, hitl_helper) - def _create_human_in_the_loop_helper(self, context: AgentRunContext) -> HumanInTheLoopHelper: + def _create_human_in_the_loop_helper(self, context: LanggraphRunContext) -> HumanInTheLoopHelper: + if self._custom_human_in_the_loop_helper_factory: + return self._custom_human_in_the_loop_helper_factory(context) return HumanInTheLoopJsonHelper(context) - def _convert_request_input(self, context: AgentRunContext, prev_state: StateSnapshot) -> Union[Dict[str, Any], Command]: + def _convert_request_input( + self, context: LanggraphRunContext, prev_state: Optional[StateSnapshot] + ) -> Union[Dict[str, Any], Command]: """ Convert the CreateResponse input to LangGraph input format, handling HITL if needed. :param context: The context for the agent run. - :type context: AgentRunContext + :type context: LanggraphRunContext :param prev_state: The previous LangGraph state snapshot. - :type prev_state: StateSnapshot + :type prev_state: Optional[StateSnapshot] :return: The converted LangGraph input data or Command for HITL. :rtype: Union[Dict[str, Any], Command] @@ -113,16 +139,16 @@ def _convert_request_input(self, context: AgentRunContext, prev_state: StateSnap hitl_helper = self._create_human_in_the_loop_helper(context) if hitl_helper: command = hitl_helper.validate_and_convert_human_feedback( - prev_state, context.request.get("input") + prev_state, context.agent_run.request.get("input") ) if command is not None: return command converter = self._create_request_converter(context) return converter.convert() - - async def _aget_state(self, context: AgentRunContext) -> Optional[StateSnapshot]: + + async def _aget_state(self, context: LanggraphRunContext) -> Optional[StateSnapshot]: config = RunnableConfig( - configurable={"thread_id": context.conversation_id}, + configurable={"thread_id": context.agent_run.conversation_id}, ) if self._graph.checkpointer: state = await self._graph.aget_state(config=config) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py index 24e61244e846..c776fad3dcad 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py @@ -3,24 +3,21 @@ # --------------------------------------------------------- # pylint: disable=logging-fstring-interpolation,broad-exception-caught,logging-not-lazy # mypy: disable-error-code="valid-type,call-overload,attr-defined" -from abc import ABC, abstractmethod import copy -import json -from typing import Any, Collection, List +from abc import ABC, abstractmethod +from typing import Any, Collection, Iterable, List from langchain_core import messages from langchain_core.messages import AnyMessage -from langgraph.types import Interrupt from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext - from .human_in_the_loop_helper import ( HumanInTheLoopHelper, INTERRUPT_NODE_NAME, ) from .utils import extract_function_call +from .._context import LanggraphRunContext logger = get_logger() @@ -41,40 +38,45 @@ def convert(self, output: dict[str, Any]) -> list[project_models.ItemResource]: :return: A list of ItemResource objects representing the converted output. :rtype: list[project_models.ItemResource] """ - pass + raise NotImplementedError -class ResponseAPIMessagesNonStreamResponseConverter(ResponseAPINonStreamResponseConverter): +class ResponseAPIMessagesNonStreamResponseConverter(ResponseAPINonStreamResponseConverter): # pylint: disable=C4751 """ Convert Langgraph MessageState output to ItemResource objects. """ def __init__(self, - context: AgentRunContext, + context: LanggraphRunContext, hitl_helper: HumanInTheLoopHelper): self.context = context self.hitl_helper = hitl_helper def convert(self, output: dict[str, Any]) -> list[project_models.ItemResource]: - res = [] - for step in output: - for node_name, node_output in step.items(): - if node_name == INTERRUPT_NODE_NAME: - interrupt_messages = self.hitl_helper.convert_interrupts(node_output) - res.extend(interrupt_messages) - else: - message_arr = node_output.get("messages") - if not message_arr or not isinstance(message_arr, Collection): - logger.warning(f"No messages found in node {node_name} output: {node_output}") - continue - for message in message_arr: - try: - converted = self.convert_output_message(message) - if converted: - res.append(converted) - except Exception as e: - logger.error(f"Error converting message {message}: {e}") + res: list[project_models.ItemResource] = [] + for node_name, node_output in output.items(): + node_results = self._convert_node_output(node_name, node_output) + res.extend(node_results) return res + def _convert_node_output( + self, node_name: str, node_output: Any + ) -> Iterable[project_models.ItemResource]: + if node_name == INTERRUPT_NODE_NAME: + yield from self.hitl_helper.convert_interrupts(node_output) + + message_arr = node_output.get("messages") + if not message_arr or not isinstance(message_arr, Collection): + logger.warning(f"No messages found in node {node_name} output: {node_output}") + return + + for message in message_arr: + try: + converted = self.convert_output_message(message) + if converted: + yield converted + except Exception as e: + logger.error(f"Error converting message {message}: {e}") + def convert_output_message(self, output_message: AnyMessage): # pylint: disable=inconsistent-return-statements # Implement the conversion logic for inner inputs if isinstance(output_message, messages.HumanMessage): @@ -82,7 +84,7 @@ def convert_output_message(self, output_message: AnyMessage): # pylint: disable content=self.convert_MessageContent( output_message.content, role=project_models.ResponsesMessageRole.USER ), - id=self.context.id_generator.generate_message_id(), + id=self.context.agent_run.id_generator.generate_message_id(), status="completed", # temporary status, can be adjusted based on actual logic ) if isinstance(output_message, messages.SystemMessage): @@ -90,7 +92,7 @@ def convert_output_message(self, output_message: AnyMessage): # pylint: disable content=self.convert_MessageContent( output_message.content, role=project_models.ResponsesMessageRole.SYSTEM ), - id=self.context.id_generator.generate_message_id(), + id=self.context.agent_run.id_generator.generate_message_id(), status="completed", ) if isinstance(output_message, messages.AIMessage): @@ -107,21 +109,21 @@ def convert_output_message(self, output_message: AnyMessage): # pylint: disable call_id=call_id, name=name, arguments=argument, - id=self.context.id_generator.generate_function_call_id(), + id=self.context.agent_run.id_generator.generate_function_call_id(), status="completed", ) return project_models.ResponsesAssistantMessageItemResource( content=self.convert_MessageContent( output_message.content, role=project_models.ResponsesMessageRole.ASSISTANT ), - id=self.context.id_generator.generate_message_id(), + id=self.context.agent_run.id_generator.generate_message_id(), status="completed", ) if isinstance(output_message, messages.ToolMessage): return project_models.FunctionToolCallOutputItemResource( call_id=output_message.tool_call_id, output=output_message.content, - id=self.context.id_generator.generate_function_output_id(), + id=self.context.agent_run.id_generator.generate_function_output_id(), ) logger.warning(f"Unsupported message type: {type(output_message)}, {output_message}") @@ -171,4 +173,3 @@ def convert_MessageContentItem( content_dict["annotations"] = [] # annotation is required for output_text return project_models.ItemContent(content_dict) - diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py index afc4af4048c6..f62d4c2b62f9 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py @@ -50,7 +50,7 @@ def convert(self) -> dict: :return: The converted LangGraph request. :rtype: dict """ - pass + raise NotImplementedError class ResponseAPIMessageRequestConverter(ResponseAPIRequestConverter): diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py index 85d01a656a67..02f79c589a96 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation +# pylint: disable=logging-fstring-interpolation,C4751 # mypy: disable-error-code="assignment,valid-type" from abc import ABC, abstractmethod from typing import Any, List, Union @@ -10,14 +10,13 @@ from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import ResponseStreamEvent -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext - from .human_in_the_loop_helper import HumanInTheLoopHelper from .response_event_generators import ( ResponseEventGenerator, ResponseStreamEventGenerator, StreamEventState, ) +from .._context import LanggraphRunContext logger = get_logger() @@ -28,28 +27,31 @@ class ResponseAPIStreamResponseConverter(ABC): One converter instance handles one response stream. """ @abstractmethod - async def convert(self, event: Union[AnyMessage, dict, Any, None]): + def convert(self, event: Union[AnyMessage, dict, Any, None]): """ Convert the Langgraph streamed output to ResponseStreamEvent objects. + :param event: The event to convert. + :type event: Union[AnyMessage, dict, Any, None] :return: An asynchronous generator yielding ResponseStreamEvent objects. :rtype: AsyncGenerator[ResponseStreamEvent, None] """ - pass + raise NotImplementedError @abstractmethod - async def finalize(self, args=None): + def finalize(self, graph_state=None): """ Finalize the conversion process after the stream ends. + :param graph_state: The final graph state. + :type graph_state: Any :return: An asynchronous generator yielding final ResponseStreamEvent objects. :rtype: AsyncGenerator[ResponseStreamEvent, None] """ - pass class ResponseAPIMessagesStreamResponseConverter(ResponseAPIStreamResponseConverter): - def __init__(self, context: AgentRunContext, *, hitl_helper: HumanInTheLoopHelper): + def __init__(self, context: LanggraphRunContext, *, hitl_helper: HumanInTheLoopHelper): # self.stream = stream self.context = context self.hitl_helper = hitl_helper @@ -57,23 +59,25 @@ def __init__(self, context: AgentRunContext, *, hitl_helper: HumanInTheLoopHelpe self.stream_state = StreamEventState() self.current_generator: ResponseEventGenerator = None - def convert(self, output_event: Union[AnyMessage, dict, Any, None]): + def convert(self, event: Union[AnyMessage, dict, Any, None]): try: if self.current_generator is None: self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) - message= output_event[0] # expect a tuple + if event is None or not hasattr(event, '__getitem__'): + raise ValueError(f"Event is not indexable: {event}") + message = event[0] # expect a tuple converted = self.try_process_message(message, self.context) return converted except Exception as e: - logger.error(f"Error converting message {message}: {e}") - raise ValueError(f"Error converting message {message}") from e - + logger.error(f"Error converting message {event}: {e}") + raise ValueError(f"Error converting message {event}") from e + def finalize(self, graph_state=None): logger.info("Stream ended, finalizing response.") res = [] # check and convert interrupts if self.hitl_helper.has_interrupt(graph_state): - interrupt = graph_state.interrupts[0] # should have only one interrupt + interrupt = graph_state.interrupts[0] # should have only one interrupt converted = self.try_process_message(interrupt, self.context) res.extend(converted) # finalize the stream @@ -81,7 +85,9 @@ def finalize(self, graph_state=None): res.extend(converted) return res - def try_process_message(self, event: Union[AnyMessage, Any, None], context: AgentRunContext) -> List[ResponseStreamEvent]: + def try_process_message( + self, event: Union[AnyMessage, Any, None], context: LanggraphRunContext + ) -> List[ResponseStreamEvent]: if event and not self.current_generator: self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py index 6c3ef5505aa9..8502ec13069b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py @@ -2,16 +2,18 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # mypy: disable-error-code="assignment" +from typing import Optional + from langgraph.types import Interrupt from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME from ..human_in_the_loop_helper import HumanInTheLoopHelper from ..utils import extract_function_call + class ItemResourceHelper: - def __init__(self, item_type: str, item_id: str = None): + def __init__(self, item_type: str, item_id: Optional[str] = None): self.item_type = item_type self.item_id = item_id @@ -63,17 +65,20 @@ def get_aggregated_content(self): class FunctionCallInterruptItemResourceHelper(ItemResourceHelper): def __init__(self, - item_id: str = None, - hitl_helper: HumanInTheLoopHelper = None, - interrupt: Interrupt = None): + item_id: Optional[str] = None, + hitl_helper: Optional[HumanInTheLoopHelper] = None, + interrupt: Optional[Interrupt] = None): super().__init__(project_models.ItemType.FUNCTION_CALL, item_id) self.hitl_helper = hitl_helper self.interrupt = interrupt def create_item_resource(self, is_done: bool): + if self.hitl_helper is None or self.interrupt is None: + return None item_resource = self.hitl_helper.convert_interrupt(self.interrupt) - if not is_done: - item_resource.arguments = "" + if item_resource is not None and not is_done: + if hasattr(item_resource, 'arguments'): + item_resource.arguments = "" # type: ignore[union-attr] return item_resource def add_aggregate_content(self, item): diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py index 2bea925ef2ed..cd161b99d152 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py @@ -8,7 +8,7 @@ from langchain_core.messages import AnyMessage from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from ..._context import LanggraphRunContext class StreamEventState: @@ -35,7 +35,7 @@ def __init__(self, logger, parent): def try_process_message( self, message: AnyMessage, # mypy: ignore[valid-type] - context: AgentRunContext, + context: LanggraphRunContext, stream_state: StreamEventState, ): # mypy: ignore[empty-body] """ @@ -44,7 +44,7 @@ def try_process_message( :param message: The incoming message to process. :type message: AnyMessage :param context: The agent run context. - :type context: AgentRunContext + :type context: LanggraphRunContext :param stream_state: The current stream event state. :type stream_state: StreamEventState @@ -63,7 +63,7 @@ def on_start(self) -> tuple[bool, List[project_models.ResponseStreamEvent]]: return False, [] def on_end( - self, message: AnyMessage, context: AgentRunContext, stream_state: StreamEventState + self, message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: """ Generate the ending events for this layer. @@ -72,7 +72,7 @@ def on_end( :param message: The incoming message to process. :type message: AnyMessage :param context: The agent run context. - :type context: AgentRunContext + :type context: LanggraphRunContext :param stream_state: The current stream event state. :type stream_state: StreamEventState diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py index ef4e14c0ba95..56c3bde68632 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py @@ -10,11 +10,10 @@ from langgraph.types import Interrupt from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext - +from . import ResponseEventGenerator, StreamEventState from ..human_in_the_loop_helper import HumanInTheLoopHelper from ..utils import extract_function_call -from . import ResponseEventGenerator, StreamEventState +from ..._context import LanggraphRunContext class ResponseFunctionCallArgumentEventGenerator(ResponseEventGenerator): @@ -36,7 +35,7 @@ def __init__( self.hitl_helper = hitl_helper def try_process_message( - self, message, context: AgentRunContext, stream_state: StreamEventState + self, message, context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: is_processed = False events = [] @@ -74,7 +73,10 @@ def process( argument = None if isinstance(message, Interrupt): - _, _, argument = self.hitl_helper.interrupt_to_function_call(message) if self.hitl_helper else (None, None, None) + if self.hitl_helper: + _, _, argument = self.hitl_helper.interrupt_to_function_call(message) + else: + argument = None else: tool_call = self.get_tool_call_info(message) if tool_call: @@ -113,7 +115,7 @@ def should_end(self, event: AnyMessage) -> bool: return False def on_end( - self, message: AnyMessage, context: AgentRunContext, stream_state: StreamEventState + self, message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: done_event = project_models.ResponseFunctionCallArgumentsDoneEvent( item_id=self.item_id, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py index 8394854cd493..14eee3c571b2 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py @@ -10,13 +10,12 @@ from langgraph.types import Interrupt from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext from azure.ai.agentserver.core.server.common.id_generator.id_generator import IdGenerator - -from ..human_in_the_loop_helper import HumanInTheLoopHelper from . import ResponseEventGenerator, StreamEventState, item_resource_helpers from .response_content_part_event_generator import ResponseContentPartEventGenerator from .response_function_call_argument_event_generator import ResponseFunctionCallArgumentEventGenerator +from ..human_in_the_loop_helper import HumanInTheLoopHelper +from ..._context import LanggraphRunContext class ResponseOutputItemEventGenerator(ResponseEventGenerator): @@ -30,13 +29,13 @@ def __init__(self, logger, parent: ResponseEventGenerator, self.hitl_helper = hitl_helper def try_process_message( - self, message: Union[AnyMessage, Interrupt, None], context: AgentRunContext, stream_state: StreamEventState + self, message: Union[AnyMessage, Interrupt, None], context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: is_processed = False next_processor = self events = [] if self.item_resource_helper is None: - if not self.try_create_item_resource_helper(message, context.id_generator): + if not self.try_create_item_resource_helper(message, context.agent_run.id_generator): # cannot create item resource, skip this message self.logger.warning(f"Cannot create item resource helper for message: {message}, skipping.") return True, self, [] @@ -70,7 +69,7 @@ def try_process_message( return is_processed, next_processor, events def on_start( - self, event: Union[AnyMessage, Interrupt], context: AgentRunContext, stream_state: StreamEventState + self, event: Union[AnyMessage, Interrupt], context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if self.started: return True, [] @@ -97,7 +96,7 @@ def should_end(self, event: Union[AnyMessage, Interrupt]) -> bool: return False def on_end( - self, message: Union[AnyMessage, Interrupt], context: AgentRunContext, stream_state: StreamEventState + self, message: Union[AnyMessage, Interrupt], context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if not self.started: # should not happen return [] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py index c65eda157bbd..8d0e62650a2d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py @@ -6,12 +6,11 @@ from typing import List from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext - from .response_event_generator import ( ResponseEventGenerator, StreamEventState, ) +from ..._context import LanggraphRunContext class ResponseOutputTextEventGenerator(ResponseEventGenerator): @@ -93,7 +92,7 @@ def should_end(self, message) -> bool: return False def on_end( # mypy: ignore[override] - self, message, context: AgentRunContext, stream_state: StreamEventState + self, message, context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if not self.started: return False, [] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py index a99e541cff11..72737e0774bb 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py @@ -9,13 +9,12 @@ from langchain_core import messages as langgraph_messages from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext - from .response_event_generator import ( ResponseEventGenerator, StreamEventState, ) from .response_output_item_event_generator import ResponseOutputItemEventGenerator +from ..._context import LanggraphRunContext class ResponseStreamEventGenerator(ResponseEventGenerator): @@ -30,18 +29,18 @@ def __init__(self, logger, parent, *, hitl_helper=None): self.aggregated_contents: List[project_models.ItemResource] = [] def on_start( - self, context: AgentRunContext, stream_state: StreamEventState + self, context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if self.started: return True, [] - agent_id = context.get_agent_id_object() - conversation = context.get_conversation_object() + agent_id = context.agent_run.get_agent_id_object() + conversation = context.agent_run.get_conversation_object() # response create event response_dict = { "object": "response", "agent_id": agent_id, "conversation": conversation, - "id": context.response_id, + "id": context.agent_run.response_id, "status": "in_progress", "created_at": int(time.time()), } @@ -56,7 +55,7 @@ def on_start( "object": "response", "agent_id": agent_id, "conversation": conversation, - "id": context.response_id, + "id": context.agent_run.response_id, "status": "in_progress", "created_at": int(time.time()), } @@ -75,7 +74,7 @@ def should_complete(self, event: langgraph_messages.AnyMessage) -> bool: return False def try_process_message( - self, message: langgraph_messages.AnyMessage, context: AgentRunContext, stream_state: StreamEventState + self, message: langgraph_messages.AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: is_processed = False next_processor = self @@ -107,14 +106,15 @@ def should_end(self, event: langgraph_messages.AnyMessage) -> bool: return True return False - def on_end(self, message: langgraph_messages.AnyMessage, context: AgentRunContext, stream_state: StreamEventState): - agent_id = context.get_agent_id_object() - conversation = context.get_conversation_object() + def on_end(self, message: langgraph_messages.AnyMessage, context: LanggraphRunContext, + stream_state: StreamEventState): + agent_id = context.agent_run.get_agent_id_object() + conversation = context.agent_run.get_conversation_object() response_dict = { "object": "response", "agent_id": agent_id, "conversation": conversation, - "id": context.response_id, + "id": context.agent_run.response_id, "status": "completed", "created_at": int(time.time()), "output": self.aggregated_contents, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/__init__.py index daf51382381d..bd74de0f4e38 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/__init__.py @@ -4,7 +4,15 @@ __path__ = __import__('pkgutil').extend_path(__path__, __name__) -from ._builder import * +from ._builder import use_foundry_tools from ._chat_model import FoundryToolLateBindingChatModel from ._middleware import FoundryToolBindingMiddleware -from ._tool_node import FoundryToolNodeWrappers, FoundryToolCallWrapper \ No newline at end of file +from ._tool_node import FoundryToolCallWrapper, FoundryToolNodeWrappers + +__all__ = [ + "use_foundry_tools", + "FoundryToolLateBindingChatModel", + "FoundryToolBindingMiddleware", + "FoundryToolCallWrapper", + "FoundryToolNodeWrappers", +] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py index afba02e26a0a..828a8b42ae45 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py @@ -12,7 +12,7 @@ @overload -def use_foundry_tools(tools: List[FoundryToolLike], /) -> FoundryToolBindingMiddleware: +def use_foundry_tools(tools: List[FoundryToolLike], /) -> FoundryToolBindingMiddleware: # pylint: disable=C4743 """Use foundry tools as middleware. :param tools: A list of foundry tools to bind. @@ -24,7 +24,7 @@ def use_foundry_tools(tools: List[FoundryToolLike], /) -> FoundryToolBindingMidd @overload -def use_foundry_tools(model: BaseChatModel, tools: List[FoundryToolLike], /) -> FoundryToolLateBindingChatModel: +def use_foundry_tools(model: BaseChatModel, tools: List[FoundryToolLike], /) -> FoundryToolLateBindingChatModel: # pylint: disable=C4743 """Use foundry tools with a chat model. :param model: The chat model to bind the tools to. @@ -37,7 +37,7 @@ def use_foundry_tools(model: BaseChatModel, tools: List[FoundryToolLike], /) -> ... -def use_foundry_tools( +def use_foundry_tools( # pylint: disable=C4743 model_or_tools: Union[BaseChatModel, List[FoundryToolLike]], tools: Optional[List[FoundryToolLike]] = None, /, @@ -56,6 +56,5 @@ def use_foundry_tools( raise ValueError("Tools must be provided when a model is given.") get_registry().extend(tools) return FoundryToolLateBindingChatModel(model_or_tools, foundry_tools=tools) - else: - get_registry().extend(model_or_tools) - return FoundryToolBindingMiddleware(model_or_tools) + get_registry().extend(model_or_tools) + return FoundryToolBindingMiddleware(model_or_tools) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py index 2ab97a4b0269..2d67f1938190 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py @@ -59,14 +59,24 @@ def tool_node_wrapper(self) -> FoundryToolNodeWrappers: """ return FoundryToolCallWrapper(self._foundry_tools_to_bind).as_wrappers() - def bind_tools(self, + def bind_tools(self, # pylint: disable=C4758 tools: Sequence[ Dict[str, Any] | type | Callable | BaseTool # noqa: UP006 ], *, tool_choice: str | None = None, **kwargs: Any) -> Runnable[LanguageModelInput, AIMessage]: - """Record tools to be bound later during invocation.""" + """Record tools to be bound later during invocation. + + :param tools: A sequence of tools to bind. + :type tools: Sequence[Dict[str, Any] | type | Callable | BaseTool] + :keyword tool_choice: Optional tool choice strategy. + :type tool_choice: str | None + :keyword kwargs: Additional keyword arguments for tool binding. + :type kwargs: Any + :return: A Runnable with the tools bound for later invocation. + :rtype: Runnable[LanguageModelInput, AIMessage] + """ self._bound_tools.extend(tools) if tool_choice is not None: @@ -103,10 +113,9 @@ async def astream(self, input: Any, config: Optional[RunnableConfig] = None, **k @property def _llm_type(self) -> str: - return f"foundry_tool_binding_model({getattr(self.delegate, '_llm_type', type(self.delegate).__name__)})" + return f"foundry_tool_binding_model({getattr(self._delegate, '_llm_type', type(self._delegate).__name__)})" def _generate(self, messages: list[BaseMessage], stop: list[str] | None = None, run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any) -> ChatResult: # should never be called as invoke/ainvoke/stream/astream are redirected to delegate raise NotImplementedError() - diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py index d3b95e95e9c6..951c02adf728 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py @@ -5,21 +5,20 @@ from typing import Awaitable, Callable, List -from langchain_core.tools import BaseTool, Tool -from langgraph.typing import ContextT, StateT_co - -from azure.ai.agentserver.core.tools import FoundryToolLike from langchain.agents.middleware import AgentMiddleware, ModelRequest, ModelResponse from langchain.agents.middleware.types import ModelCallResult from langchain_core.messages import ToolMessage +from langchain_core.tools import BaseTool, Tool from langgraph.prebuilt.tool_node import ToolCallRequest from langgraph.types import Command +from azure.ai.agentserver.core.tools import FoundryToolLike + from ._chat_model import FoundryToolLateBindingChatModel from ._tool_node import FoundryToolCallWrapper -class FoundryToolBindingMiddleware(AgentMiddleware[StateT_co, ContextT]): +class FoundryToolBindingMiddleware(AgentMiddleware): """Middleware that binds foundry tools to tool calls in the agent. :param foundry_tools: A list of foundry tools to bind. diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py index 6915148a45af..1e5b5d4e351f 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py @@ -8,7 +8,7 @@ from pydantic import BaseModel, Field, create_model from azure.ai.agentserver.core import AgentServerContext -from azure.ai.agentserver.core.tools import FoundryTool, FoundryToolLike, ResolvedFoundryTool, SchemaDefinition, ensure_foundry_tool +from azure.ai.agentserver.core.tools import FoundryToolLike, ResolvedFoundryTool, SchemaDefinition, ensure_foundry_tool from azure.ai.agentserver.core.tools.utils import ToolNameResolver @@ -24,7 +24,7 @@ def __init__(self, tools: Iterable[Tuple[ResolvedFoundryTool, BaseTool]]): self._by_source_id[rt.definition.id].append(t) @overload - def get(self, tool: FoundryToolLike, /) -> Iterable[BaseTool]: + def get(self, tool: FoundryToolLike, /) -> Iterable[BaseTool]: # pylint: disable=C4743 """Get the LangChain tools for the given foundry tool. :param tool: The foundry tool to get the LangChain tools for. @@ -35,7 +35,7 @@ def get(self, tool: FoundryToolLike, /) -> Iterable[BaseTool]: ... @overload - def get(self, tools: Iterable[FoundryToolLike], /) -> Iterable[BaseTool]: + def get(self, tools: Iterable[FoundryToolLike], /) -> Iterable[BaseTool]: # pylint: disable=C4743 """Get the LangChain tools for the given foundry tools. :param tools: The foundry tools to get the LangChain tools for. @@ -66,9 +66,9 @@ def get(self, tool: Union[FoundryToolLike, Iterable[FoundryToolLike], None] = No yield from self return - tool_list = [tool] if not isinstance(tool, Iterable) else tool + tool_list = [tool] if not isinstance(tool, Iterable) else tool # type: ignore[assignment] for t in tool_list: - ft = ensure_foundry_tool(t) + ft = ensure_foundry_tool(t) # type: ignore[arg-type] yield from self._by_source_id.get(ft.id, []) def __iter__(self): @@ -125,15 +125,15 @@ async def _tool_func(**kwargs: Any) -> str: @classmethod def _create_pydantic_model(cls, tool_name: str, input_schema: SchemaDefinition) -> type[BaseModel]: - field_definitions = {} - required_fields = input_schema.required + field_definitions: Dict[str, Any] = {} + required_fields = input_schema.required or set() for prop_name, prop in input_schema.properties.items(): py_type = prop.type.py_type default = ... if prop_name in required_fields else None field_definitions[prop_name] = (py_type, Field(default, description=prop.description)) model_name = f"{tool_name.replace('-', '_').replace(' ', '_').title()}-Input" - return create_model(model_name, **field_definitions) + return create_model(model_name, **field_definitions) # type: ignore[call-overload] _tool_registry: List[FoundryToolLike] = [] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py index 9dac2ec3a731..55273e391940 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py @@ -3,11 +3,12 @@ # --------------------------------------------------------- from typing import Awaitable, Callable, List, TypedDict, Union -from azure.ai.agentserver.core.tools import FoundryToolLike from langchain_core.messages import ToolMessage from langgraph.prebuilt.tool_node import AsyncToolCallWrapper, ToolCallRequest, ToolCallWrapper from langgraph.types import Command +from azure.ai.agentserver.core.tools import FoundryToolLike + ToolInvocationResult = Union[ToolMessage, Command] ToolInvocation = Callable[[ToolCallRequest], ToolInvocationResult] AsyncToolInvocation = Callable[[ToolCallRequest], Awaitable[ToolInvocationResult]] @@ -27,9 +28,9 @@ class FoundryToolNodeWrappers(TypedDict): :type awrap_tool_call: AsyncToolCallWrapper """ - wrap_tool_call: ToolCallWrapper + wrap_tool_call: ToolCallWrapper # type: ignore[valid-type] - awrap_tool_call: AsyncToolCallWrapper + awrap_tool_call: AsyncToolCallWrapper # type: ignore[valid-type] class FoundryToolCallWrapper: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py index a4d469449d09..ec45dceccfc8 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py @@ -1,18 +1,17 @@ from __future__ import annotations -import os import json +import os import time from dataclasses import dataclass -from typing import Any, AsyncGenerator, AsyncIterator, Dict, List, TypedDict +from typing import Any, AsyncIterable, AsyncIterator, Dict, List, TypedDict from dotenv import load_dotenv -from langgraph.graph import StateGraph, START, END +from langgraph.graph import END, START, StateGraph from openai import OpenAI, OpenAIError -from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.models import Response, ResponseStreamEvent -from azure.ai.agentserver.langgraph import from_langgraph +from azure.ai.agentserver.langgraph import LanggraphRunContext, from_langgraph from azure.ai.agentserver.langgraph.models.response_api_default_converter import ResponseAPIDefaultConverter from azure.ai.agentserver.langgraph.models.response_api_request_converter import ResponseAPIRequestConverter @@ -101,11 +100,11 @@ def retrieve_docs(question: str, k: int = 2) -> List[Dict[str, Any]]: class RAGRequestConverter(ResponseAPIRequestConverter): """Converter implementing mini RAG logic.""" - def __init__(self, context: AgentRunContext): + def __init__(self, context: LanggraphRunContext): self.context = context def convert(self) -> dict: - req = self.context.request + req = self.context.agent_run.request user_input = req.get("input") if isinstance(user_input, list): for item in user_input: @@ -141,12 +140,12 @@ def __init__(self, graph: StateGraph): super().__init__(graph=graph, create_request_converter=lambda context: RAGRequestConverter(context)) - def get_stream_mode(self, context: AgentRunContext) -> str: # noqa: D401 - if context.request.get("stream", False): # type: ignore[attr-defined] + def get_stream_mode(self, context: LanggraphRunContext) -> str: # noqa: D401 + if context.agent_run.request.get("stream", False): # type: ignore[attr-defined] raise NotImplementedError("Streaming not supported in this sample.") return "values" - async def convert_response_non_stream(self, state: Any, context: AgentRunContext) -> Response: + async def convert_response_non_stream(self, state: Any, context: LanggraphRunContext) -> Response: final_answer = state.get("final_answer") or "(no answer generated)" print(f"convert state to response, state: {state}") citations = state.get("retrieved", []) @@ -170,9 +169,9 @@ async def convert_response_non_stream(self, state: Any, context: AgentRunContext } base = { "object": "response", - "id": context.response_id, - "agent": context.get_agent_id_object(), - "conversation": context.get_conversation_object(), + "id": context.agent_run.response_id, + "agent": context.agent_run.get_agent_id_object(), + "conversation": context.agent_run.get_conversation_object(), "status": "completed", "created_at": int(time.time()), "output": [output_item], @@ -182,8 +181,8 @@ async def convert_response_non_stream(self, state: Any, context: AgentRunContext async def convert_response_stream( # noqa: D401 self, stream_state: AsyncIterator[Dict[str, Any] | Any], - context: AgentRunContext, - ) -> AsyncGenerator[ResponseStreamEvent, None]: + context: LanggraphRunContext, + ) -> AsyncIterable[ResponseStreamEvent]: raise NotImplementedError("Streaming not supported in this sample.") diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py deleted file mode 100644 index 4b95f4d98801..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -"""Example showing how to use a graph factory function with ToolClient. - -This sample demonstrates how to pass a factory function to LangGraphAdapter -that receives a ToolClient and returns a CompiledStateGraph. This pattern -allows the graph to be created dynamically with access to tools from -Azure AI Tool Client at runtime. -""" - -import asyncio -import os -from typing import List -from dotenv import load_dotenv -from importlib.metadata import version -from langchain_openai import AzureChatOpenAI -from langgraph.checkpoint.memory import MemorySaver -from langgraph.graph.state import CompiledStateGraph -from langchain_core.tools import StructuredTool - -from azure.ai.agentserver.langgraph import from_langgraph -from azure.identity.aio import DefaultAzureCredential - -load_dotenv() - - -def create_agent(model, tools, checkpointer): - """Create a LangGraph agent based on the version.""" - # for different langgraph versions - langgraph_version = version("langgraph") - if langgraph_version < "1.0.0": - from langgraph.prebuilt import create_react_agent - - return create_react_agent(model, tools, checkpointer=checkpointer) - else: - from langchain.agents import create_agent - - return create_agent(model, tools, checkpointer=checkpointer) - - -def create_graph_factory(): - """Create a factory function that builds a graph with ToolClient. - - This function returns a factory that takes a ToolClient and returns - a CompiledStateGraph. The graph is created at runtime for every request, - allowing it to access the latest tool configuration dynamically. - """ - - async def graph_factory(tools: List[StructuredTool]) -> CompiledStateGraph: - """Factory function that creates a graph using the provided tools. - - :param tools: The list of StructuredTool instances. - :type tools: List[StructuredTool] - :return: A compiled LangGraph state graph. - :rtype: CompiledStateGraph - """ - # Get configuration from environment - deployment_name = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "gpt-4o") - - # List all available tools from the ToolClient - print(f"Found {len(tools)} tools:") - for tool in tools: - print(f" - {tool.name}: {tool.description}") - - if not tools: - print("\nNo tools found!") - print("Make sure your Azure AI project has tools configured.") - raise ValueError("No tools available to create agent") - - # Create the language model - model = AzureChatOpenAI(model=deployment_name) - - # Create a memory checkpointer for conversation history - memory = MemorySaver() - - # Create the LangGraph agent with the tools - print("\nCreating LangGraph agent with tools from factory...") - agent = create_agent(model, tools, memory) - - print("Agent created successfully!") - return agent - - return graph_factory - - -async def quickstart(): - """Build and return a LangGraphAdapter using a graph factory function.""" - - # Get configuration from environment - project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") - - if not project_endpoint: - raise ValueError( - "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " - "Set it to your Azure AI project endpoint, e.g., " - "https://.services.ai.azure.com/api/projects/" - ) - - # Create Azure credentials - credential = DefaultAzureCredential() - - # Create a factory function that will build the graph at runtime - # The factory will receive a ToolClient when the agent first runs - graph_factory = create_graph_factory() - - # Pass the factory function to from_langgraph instead of a compiled graph - # The graph will be created on every agent run with access to ToolClient - print("Creating LangGraph adapter with factory function...") - # Get project connection ID from environment - tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") - - adapter = from_langgraph(graph_factory, credentials=credential, tools=[{"type": "mcp", "project_connection_id": tool_connection_id}]) - - print("Adapter created! Graph will be built on every request.") - return adapter - - -async def main(): # pragma: no cover - sample entrypoint - """Main function to run the agent.""" - adapter = await quickstart() - - if adapter: - print("\nStarting agent server...") - print("The graph factory will be called for every request that arrives.") - await adapter.run_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py deleted file mode 100644 index 9084b07872a9..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -"""Enhanced MCP example using ToolClient with AzureAIToolClient. - -This sample demonstrates how to use the ToolClient to integrate Azure AI -Tool Client (which supports both MCP tools and Azure AI Tools API) with -LangGraph's create_react_agent. -""" - -import asyncio -import os - -from dotenv import load_dotenv -from importlib.metadata import version -from langchain_openai import AzureChatOpenAI -from langgraph.checkpoint.memory import MemorySaver - -from azure.ai.agentserver.core.tools import FoundryToolClient -from azure.ai.agentserver.langgraph import ToolClient, from_langgraph -from azure.identity.aio import DefaultAzureCredential - -load_dotenv() - - -def create_agent(model, tools, checkpointer): - """Create a LangGraph agent based on available imports.""" - try: - from langgraph.prebuilt import create_react_agent - return create_react_agent(model, tools, checkpointer=checkpointer) - except ImportError: - from langchain.agents import create_agent - return create_agent(model, tools, checkpointer=checkpointer) - - -async def quickstart(): - """Build and return a LangGraph agent wired to Azure AI Tool Client.""" - - # Get configuration from environment - project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") - deployment_name = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "gpt-4o") - - if not project_endpoint: - raise ValueError( - "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " - "Set it to your Azure AI project endpoint, e.g., " - "https://.services.ai.azure.com/api/projects/" - ) - tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") - - # Create Azure credentials - credential = DefaultAzureCredential() - tool_definitions = [ - { - "type": "mcp", - "project_connection_id": tool_connection_id - }, - { - "type": "code_interpreter", - } - ] - # Create the AzureAIToolClient - # This client supports both MCP tools and Azure AI Tools API - tool_client = FoundryToolClient( - endpoint=project_endpoint, - credential=credential, - tools=tool_definitions - ) - - # Create the ToolClient - client = ToolClient(tool_client) - - # List all available tools and convert to LangChain format - print("Fetching tools from Azure AI Tool Client...") - tools = await client.list_tools_details() - print(f"Found {len(tools)} tools:") - for tool in tools: - print(f" - {tool.name}: {tool.description}") - - if not tools: - print("\nNo tools found!") - print("Make sure your Azure AI project has tools configured.") - print("This can include:") - print(" - MCP (Model Context Protocol) servers") - print(" - Foundry AI Tools") - return None - - # Create the language model - model = AzureChatOpenAI(model=deployment_name) - - # Create a memory checkpointer for conversation history - memory = MemorySaver() - - # Create the LangGraph agent with the tools - print("\nCreating LangGraph agent with tools...") - agent = create_agent(model, tools, memory) - - print("Agent created successfully!") - return agent - - -async def main(): # pragma: no cover - sample entrypoint - """Main function to run the agent.""" - agent = await quickstart() - - if agent: - print("\nStarting agent server...") - await from_langgraph(agent).run_async() - - -if __name__ == "__main__": - asyncio.run(main()) From 7d2f9267cad5437794531402768220d73e61693f Mon Sep 17 00:00:00 2001 From: junanchen Date: Wed, 21 Jan 2026 13:36:09 -0800 Subject: [PATCH 52/94] Fix context propagate issue in lg + python 3.10 --- .../ai/agentserver/langgraph/_context.py | 16 +++++++----- .../ai/agentserver/langgraph/langgraph.py | 8 +++--- .../models/response_api_converter.py | 7 +++--- .../models/response_api_default_converter.py | 4 +-- .../langgraph/tools/_chat_model.py | 10 +++++--- .../langgraph/tools/_middleware.py | 25 ++++++++++++++----- .../agentserver/langgraph/tools/_tool_node.py | 10 +++++--- 7 files changed, 53 insertions(+), 27 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py index 0d9566707362..81f0e0f0b545 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py @@ -2,10 +2,12 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from dataclasses import dataclass +from typing import Optional, Union -from langgraph.runtime import get_runtime -from azure.ai.agentserver.core import AgentRunContext +from langgraph.prebuilt import ToolRuntime +from langgraph.runtime import Runtime +from azure.ai.agentserver.core import AgentRunContext from .tools._context import FoundryToolContext @@ -15,7 +17,9 @@ class LanggraphRunContext: tools: FoundryToolContext - @classmethod - def get_current(cls) -> "LanggraphRunContext": - lg_runtime = get_runtime(cls) - return lg_runtime.context + @staticmethod + def from_runtime(runtime: Union[Runtime, ToolRuntime]) -> Optional["LanggraphRunContext"]: + context = runtime.context + if isinstance(context, LanggraphRunContext): + return context + return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 7072f641850f..efa0a1a84959 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -68,7 +68,7 @@ async def agent_run(self, context: AgentRunContext): try: lg_run_context = await self.setup_lg_run_context(context) input_arguments = await self.converter.convert_request(lg_run_context) - self.ensure_runnable_config(input_arguments) + self.ensure_runnable_config(input_arguments, lg_run_context) if not context.stream: response = await self.agent_run_non_stream(input_arguments) @@ -156,17 +156,19 @@ async def agent_run_astream(self, logger.error(f"Error during streaming agent run: {e}", exc_info=True) raise e - def ensure_runnable_config(self, input_arguments: GraphInputArguments): + def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: LanggraphRunContext): """ Ensure the RunnableConfig is set in the input arguments. :param input_arguments: The input arguments for the agent run. :type input_arguments: GraphInputArguments + :param context: The Langgraph run context. + :type context: LanggraphRunContext """ config = input_arguments.get("config", {}) configurable = config.get("configurable", {}) - configurable["thread_id"] = input_arguments["context"].agent_run.conversation_id config["configurable"] = configurable + configurable["thread_id"] = input_arguments["context"].agent_run.conversation_id callbacks = config.get("callbacks", []) if self.azure_ai_tracer and self.azure_ai_tracer not in callbacks: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py index 4d72ab22060f..d1c5531993a1 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py @@ -23,7 +23,8 @@ from abc import ABC, abstractmethod from typing import Any, AsyncIterable, AsyncIterator, Dict, TypedDict, Union -from langgraph.types import Command +from langchain_core.runnables import RunnableConfig +from langgraph.types import Command, StreamMode from azure.ai.agentserver.core.models import Response, ResponseStreamEvent from .._context import LanggraphRunContext @@ -32,9 +33,9 @@ class GraphInputArguments(TypedDict): """TypedDict for LangGraph input arguments.""" input: Union[Dict[str, Any], Command, None] - config: Dict[str, Any] + config: RunnableConfig context: LanggraphRunContext - stream_mode: str + stream_mode: StreamMode class ResponseAPIConverter(ABC): diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py index eb725ed23ef8..9bc237c87cf1 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py @@ -9,7 +9,7 @@ from langchain_core.runnables import RunnableConfig from langgraph.graph.state import CompiledStateGraph -from langgraph.types import Command, StateSnapshot +from langgraph.types import Command, StateSnapshot, StreamMode from azure.ai.agentserver.core.models import Response, ResponseStreamEvent from .human_in_the_loop_helper import HumanInTheLoopHelper @@ -90,7 +90,7 @@ async def convert_response_stream( # type: ignore[override] for event in finalized_output: yield event - def get_stream_mode(self, context: LanggraphRunContext) -> str: + def get_stream_mode(self, context: LanggraphRunContext) -> StreamMode: if context.agent_run.stream: return "messages" return "updates" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py index 2d67f1938190..e511f5bbf915 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- from __future__ import annotations -from typing import Any, Callable, Dict, List, Optional, Sequence +from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models import BaseChatModel, LanguageModelInput @@ -12,6 +12,7 @@ from langchain_core.runnables import Runnable, RunnableConfig from langchain_core.tools import BaseTool from langgraph.prebuilt import ToolNode +from langgraph.runtime import Runtime from azure.ai.agentserver.core.tools import FoundryToolLike from ._tool_node import FoundryToolCallWrapper, FoundryToolNodeWrappers @@ -29,9 +30,10 @@ class FoundryToolLateBindingChatModel(BaseChatModel): :type foundry_tools: List[FoundryToolLike] """ - def __init__(self, delegate: BaseChatModel, foundry_tools: List[FoundryToolLike]): + def __init__(self, delegate: BaseChatModel, runtime: Runtime, foundry_tools: List[FoundryToolLike]): super().__init__() self._delegate = delegate + self._runtime = runtime self._foundry_tools_to_bind = foundry_tools self._bound_tools: List[Dict[str, Any] | type | Callable | BaseTool] = [] self._bound_kwargs: dict[str, Any] = {} @@ -88,7 +90,9 @@ def bind_tools(self, # pylint: disable=C4758 def _bound_delegate_for_call(self) -> Runnable[LanguageModelInput, AIMessage]: from .._context import LanggraphRunContext - foundry_tools = LanggraphRunContext.get_current().tools.resolved_tools.get(self._foundry_tools_to_bind) + foundry_tools: Iterable[BaseTool] = [] + if (context := LanggraphRunContext.from_runtime(self._runtime)) is not None: + foundry_tools = context.tools.resolved_tools.get(self._foundry_tools_to_bind) all_tools = self._bound_tools.copy() all_tools.extend(foundry_tools) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py index 951c02adf728..c226e51e72ac 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- from __future__ import annotations -from typing import Awaitable, Callable, List +from typing import Awaitable, Callable, ClassVar, List from langchain.agents.middleware import AgentMiddleware, ModelRequest, ModelResponse from langchain.agents.middleware.types import ModelCallResult @@ -24,6 +24,7 @@ class FoundryToolBindingMiddleware(AgentMiddleware): :param foundry_tools: A list of foundry tools to bind. :type foundry_tools: List[FoundryToolLike] """ + _DummyToolName: ClassVar[str] = "__dummy_tool_by_foundry_middleware__" def __init__(self, foundry_tools: List[FoundryToolLike]): super().__init__() @@ -35,9 +36,9 @@ def __init__(self, foundry_tools: List[FoundryToolLike]): self._foundry_tools_to_bind = foundry_tools self._tool_call_wrapper = FoundryToolCallWrapper(self._foundry_tools_to_bind) - @staticmethod - def _dummy_tool() -> BaseTool: - return Tool(name="__dummy_tool_by_foundry_middleware__", + @classmethod + def _dummy_tool(cls) -> BaseTool: + return Tool(name=cls._DummyToolName, func=lambda x: None, description="__dummy_tool_by_foundry_middleware__") @@ -77,8 +78,20 @@ def _wrap_model(self, request: ModelRequest) -> ModelRequest: """ if not self._foundry_tools_to_bind: return request - wrapper = FoundryToolLateBindingChatModel(request.model, self._foundry_tools_to_bind) - return request.override(model=wrapper) + wrapper = FoundryToolLateBindingChatModel(request.model, request.runtime, self._foundry_tools_to_bind) + return request.override(model=wrapper, tools=self._remove_dummy_tool(request)) + + def _remove_dummy_tool(self, request: ModelRequest) -> list: + """Remove the dummy tool from the request's tools if present. + + :param request: The model request. + :type request: ModelRequest + :return: The list of tools without the dummy tool. + :rtype: list + """ + if not request.tools: + return [] + return [tool for tool in request.tools if not isinstance(tool, Tool) or tool.name != self._DummyToolName] def wrap_tool_call(self, request: ToolCallRequest, handler: Callable[[ToolCallRequest], ToolMessage | Command]) -> ToolMessage | Command: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py index 55273e391940..e66e1c554ba1 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py @@ -74,14 +74,16 @@ async def call_tool_async(self, request: ToolCallRequest, invocation: AsyncToolI return await invocation(self._maybe_calling_foundry_tool(request)) def _maybe_calling_foundry_tool(self, request: ToolCallRequest) -> ToolCallRequest: - if request.tool or not self._allowed_foundry_tools: + from .._context import LanggraphRunContext + + if (request.tool + or not self._allowed_foundry_tools + or (context := LanggraphRunContext.from_runtime(request.runtime)) is None): # tool is already resolved return request - from .._context import LanggraphRunContext - tool_name = request.tool_call["name"] - for t in LanggraphRunContext.get_current().tools.resolved_tools.get(self._allowed_foundry_tools): + for t in context.tools.resolved_tools.get(self._allowed_foundry_tools): if t.name == tool_name: return ToolCallRequest( tool_call=request.tool_call, From f5d15c2fbe600ea3471aca7a93312b56ad6d4f8f Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Wed, 21 Jan 2026 14:55:47 -0800 Subject: [PATCH 53/94] [AgentServer] subclasses for agent-framework AIAgent and WorkflowAgent (#44759) * remove unused code in af * created subclasses for agent-framework AIAgent and WorkflowAgent * remove unused code * validate core with tox * refining agent framework adapters * refining adapters * updated minors * update from_agent_framework * fix samples --------- Co-authored-by: junanchen --- .../ai/agentserver/agentframework/__init__.py | 99 +++++- .../agentframework/_agent_framework.py | 253 ++++++-------- .../agentframework/_ai_agent_adapter.py | 88 +++++ .../agentframework/_workflow_agent_adapter.py | 142 ++++++++ ...ramework_output_non_streaming_converter.py | 2 +- ...nt_framework_output_streaming_converter.py | 13 +- .../persistence/agent_thread_repository.py | 21 +- .../samples/basic_simple/minimal_example.py | 4 +- .../chat_client_with_foundry_tool.py | 2 +- .../human_in_the_loop_ai_function/main.py | 2 +- .../.gitignore | 1 + .../README.md | 2 +- .../human_in_the_loop_workflow_agent/main.py | 15 +- .../samples/mcp_apikey/mcp_apikey.py | 2 +- .../samples/mcp_simple/mcp_simple.py | 2 +- .../simple_async/minimal_async_example.py | 2 +- .../samples/workflow_agent_simple/README.md | 286 ++-------------- .../workflow_agent_simple.py | 310 ++---------------- 18 files changed, 523 insertions(+), 723 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/.gitignore diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index 20a41df7ef73..1a276a14ff9e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -3,10 +3,14 @@ # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any, Callable, Optional, Union, overload + +from agent_framework import AgentProtocol, BaseAgent, Workflow, WorkflowBuilder from azure.ai.agentserver.agentframework._version import VERSION -from azure.ai.agentserver.agentframework._agent_framework import AgentFrameworkCBAgent +from azure.ai.agentserver.agentframework._agent_framework import AgentFrameworkAgent +from azure.ai.agentserver.agentframework._ai_agent_adapter import AgentFrameworkAIAgentAdapter +from azure.ai.agentserver.agentframework._workflow_agent_adapter import AgentFrameworkWorkflowAdapter from azure.ai.agentserver.agentframework._foundry_tools import FoundryToolsChatMiddleware from azure.ai.agentserver.core.application import PackageMetadata, set_current_app @@ -14,13 +18,98 @@ from azure.core.credentials_async import AsyncTokenCredential +@overload +def from_agent_framework( + *, + agent: Union[BaseAgent, AgentProtocol], + credentials: Optional["AsyncTokenCredential"] = None, + **kwargs: Any, + ) -> "AgentFrameworkAIAgentAdapter": + """ + Create an Agent Framework AI Agent Adapter from an AgentProtocol or BaseAgent. + + :param agent: The agent to adapt. + :type agent: Union[BaseAgent, AgentProtocol] + :param credentials: Optional asynchronous token credential for authentication. + :type credentials: Optional[AsyncTokenCredential] + :param kwargs: Additional keyword arguments to pass to the adapter. + :type kwargs: Any + + :return: An instance of AgentFrameworkAIAgentAdapter. + :rtype: AgentFrameworkAIAgentAdapter + """ + ... + +@overload def from_agent_framework( - agent, + *, + workflow: Union[WorkflowBuilder, Callable[[], Workflow]], + credentials: Optional["AsyncTokenCredential"] = None, + **kwargs: Any, + ) -> "AgentFrameworkWorkflowAdapter": + """ + Create an Agent Framework Workflow Adapter. + The arugument `workflow` can be either a WorkflowBuilder or a factory function + that returns a Workflow. + It will be called to create a new Workflow instance and `.as_agent()` will be + called as well for each incoming CreateResponse request. Please ensure that the + workflow definition can be converted to a WorkflowAgent. For more information, + see the agent-framework samples and documentation. + + :param workflow: The workflow builder or factory function to adapt. + :type workflow: Union[WorkflowBuilder, Callable[[], Workflow]] + :param credentials: Optional asynchronous token credential for authentication. + :type credentials: Optional[AsyncTokenCredential] + :param kwargs: Additional keyword arguments to pass to the adapter. + :type kwargs: Any + :return: An instance of AgentFrameworkWorkflowAdapter. + :rtype: AgentFrameworkWorkflowAdapter + """ + ... + +def from_agent_framework( + *, + agent: Optional[Union[BaseAgent, AgentProtocol]] = None, + workflow: Optional[Union[WorkflowBuilder, Callable[[], Workflow]]] = None, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any, -) -> "AgentFrameworkCBAgent": +) -> "AgentFrameworkAgent": + """ + Create an Agent Framework Adapter from either an AgentProtocol/BaseAgent or a + WorkflowAgent. + One of agent or workflow must be provided. + + :param agent: The agent to adapt. + :type agent: Optional[Union[BaseAgent, AgentProtocol]] + :param workflow: The workflow builder or factory function to adapt. + :type workflow: Optional[Union[WorkflowBuilder, Callable[[], Workflow]]] + :param credentials: Optional asynchronous token credential for authentication. + :type credentials: Optional[AsyncTokenCredential] + :param kwargs: Additional keyword arguments to pass to the adapter. + :type kwargs: Any + :return: An instance of AgentFrameworkAgent. + :rtype: AgentFrameworkAgent + :raises TypeError: If neither or both of agent and workflow are provided, or if + the provided types are incorrect. + """ + + provided = sum(value is not None for value in (agent, workflow)) + if provided != 1: + raise TypeError("from_agent_framework requires exactly one of 'agent' or 'workflow' keyword arguments") + + if workflow is not None: + if isinstance(workflow, WorkflowBuilder): + def workflow_factory() -> Workflow: + return workflow.build() + + return AgentFrameworkWorkflowAdapter(workflow_factory=workflow_factory, credentials=credentials, **kwargs) + if isinstance(workflow, Callable): + return AgentFrameworkWorkflowAdapter(workflow_factory=workflow, credentials=credentials, **kwargs) + raise TypeError("workflow must be a WorkflowBuilder or callable returning a Workflow") - return AgentFrameworkCBAgent(agent, credentials=credentials, **kwargs) + if isinstance(agent, AgentProtocol) or isinstance(agent, BaseAgent): + return AgentFrameworkAIAgentAdapter(agent, credentials=credentials, **kwargs) + raise TypeError("agent must be an instance of AgentProtocol or BaseAgent") __all__ = [ diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py index 40e1e72b70a4..ece0198285bf 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py @@ -5,10 +5,9 @@ from __future__ import annotations import os -from typing import Any, AsyncGenerator, Optional, TYPE_CHECKING, Union +from typing import Any, AsyncGenerator, Optional, TYPE_CHECKING, Union, Callable -from agent_framework import AgentProtocol, CheckpointStorage, WorkflowCheckpoint -from agent_framework._workflows import get_checkpoint_summary +from agent_framework import AgentProtocol, AgentThread, WorkflowAgent from agent_framework.azure import AzureAIClient # pylint: disable=no-name-in-module from opentelemetry import trace @@ -21,13 +20,10 @@ ) from azure.ai.agentserver.core.models.projects import ResponseErrorEvent, ResponseFailedEvent from azure.ai.agentserver.core.tools import OAuthConsentRequiredError -from .models.agent_framework_input_converters import AgentFrameworkInputConverter -from .models.agent_framework_output_non_streaming_converter import ( - AgentFrameworkOutputNonStreamingConverter, -) + from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter from .models.human_in_the_loop_helper import HumanInTheLoopHelper -from .persistence import AgentThreadRepository, CheckpointRepository +from .persistence import AgentThreadRepository if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential @@ -35,7 +31,7 @@ logger = get_logger() -class AgentFrameworkCBAgent(FoundryCBAgent): +class AgentFrameworkAgent(FoundryCBAgent): """ Adapter class for integrating Agent Framework agents with the FoundryCB agent interface. @@ -55,14 +51,12 @@ class AgentFrameworkCBAgent(FoundryCBAgent): def __init__(self, agent: AgentProtocol, credentials: "Optional[AsyncTokenCredential]" = None, *, - thread_repository: AgentThreadRepository = None, - checkpoint_repository: CheckpointRepository = None, + thread_repository: Optional[AgentThreadRepository] = None, **kwargs: Any, ): - """Initialize the AgentFrameworkCBAgent with an AgentProtocol or a factory function. + """Initialize the AgentFrameworkAgent with an AgentProtocol. - :param agent: The Agent Framework agent to adapt, or a callable that takes ToolClient - and returns AgentProtocol (sync or async). + :param agent: The Agent Framework agent to adapt. :type agent: AgentProtocol :param credentials: Azure credentials for authentication. :type credentials: Optional[AsyncTokenCredential] @@ -71,9 +65,8 @@ def __init__(self, agent: AgentProtocol, """ super().__init__(credentials=credentials, **kwargs) # pylint: disable=unexpected-keyword-arg self._agent: AgentProtocol = agent - self._hitl_helper = HumanInTheLoopHelper() - self._checkpoint_repository = checkpoint_repository self._thread_repository = thread_repository + self._hitl_helper = HumanInTheLoopHelper() @property def agent(self) -> "AgentProtocol": @@ -188,153 +181,91 @@ async def agent_run( # pylint: disable=too-many-statements OpenAIResponse, AsyncGenerator[ResponseStreamEvent, Any], ]: - try: - logger.info(f"Starting agent_run with stream={context.stream}") - request_input = context.request.get("input") - - agent_thread = None - checkpoint_storage = None - last_checkpoint = None - if self._thread_repository: - agent_thread = await self._thread_repository.get(context.conversation_id) - if agent_thread: - logger.info(f"Loaded agent thread for conversation: {context.conversation_id}") - else: - agent_thread = self.agent.get_new_thread() - - if self._checkpoint_repository: - checkpoint_storage = await self._checkpoint_repository.get_or_create(context.conversation_id) - last_checkpoint = await self._get_latest_checkpoint(checkpoint_storage) - if last_checkpoint: - summary = get_checkpoint_summary(last_checkpoint) - if summary.status == "completed": - logger.warning("Last checkpoint is completed. Will not resume from it.") - last_checkpoint = None # Do not resume from completed checkpoints - if last_checkpoint: - await self._load_checkpoint(self.agent, last_checkpoint, checkpoint_storage) - logger.info(f"Loaded checkpoint with ID: {last_checkpoint.checkpoint_id}") - - input_converter = AgentFrameworkInputConverter(hitl_helper=self._hitl_helper) - message = await input_converter.transform_input( - request_input, - agent_thread=agent_thread, - checkpoint=last_checkpoint) - logger.debug(f"Transformed input message type: {type(message)}") - - # Use split converters - if context.stream: - logger.info("Running agent in streaming mode") - streaming_converter = AgentFrameworkOutputStreamingConverter(context, hitl_helper=self._hitl_helper) - - async def stream_updates(): - try: - update_count = 0 - try: - updates = self.agent.run_stream( - message, - thread=agent_thread, - checkpoint_storage=checkpoint_storage, - ) - async for event in streaming_converter.convert(updates): - update_count += 1 - yield event - - if agent_thread and self._thread_repository: - await self._thread_repository.set(context.conversation_id, agent_thread, checkpoint_storage) - logger.info(f"Saved agent thread for conversation: {context.conversation_id}") - - logger.info("Streaming completed with %d updates", update_count) - except OAuthConsentRequiredError as e: - logger.info("OAuth consent required during streaming updates") - if update_count == 0: - async for event in self.respond_with_oauth_consent_astream(context, e): - yield event - else: - # If we've already emitted events, we cannot safely restart a new - # OAuth-consent stream (it would reset sequence numbers). - yield ResponseErrorEvent( - sequence_number=streaming_converter.next_sequence(), - code="server_error", - message=f"OAuth consent required: {e.consent_url}", - param="agent_run", - ) - yield ResponseFailedEvent( - sequence_number=streaming_converter.next_sequence(), - response=streaming_converter._build_response(status="failed"), # pylint: disable=protected-access - ) - except Exception as e: # pylint: disable=broad-exception-caught - logger.error("Unhandled exception during streaming updates: %s", e, exc_info=True) - - # Emit well-formed error events instead of terminating the stream. - yield ResponseErrorEvent( - sequence_number=streaming_converter.next_sequence(), - code="server_error", - message=str(e), - param="agent_run", - ) - yield ResponseFailedEvent( - sequence_number=streaming_converter.next_sequence(), - response=streaming_converter._build_response(status="failed"), # pylint: disable=protected-access - ) - finally: - # No request-scoped resources to clean up here today. - # Keep this block as a hook for future request-scoped cleanup. - pass - - return stream_updates() - - # Non-streaming path - logger.info("Running agent in non-streaming mode") - non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context) - result = await self.agent.run( - message, - thread=agent_thread, - checkpoint_storage=checkpoint_storage) - logger.debug(f"Agent run completed, result type: {type(result)}") - - if agent_thread and self._thread_repository: - await self._thread_repository.set(context.conversation_id, agent_thread) - logger.info(f"Saved agent thread for conversation: {context.conversation_id}") - - transformed_result = non_streaming_converter.transform_output_for_response(result) - logger.info("Agent run and transformation completed successfully") - return transformed_result - except OAuthConsentRequiredError as e: - logger.info("OAuth consent required during agent run") - if context.stream: - # Yield OAuth consent response events - # Capture e in the closure by passing it as a default argument - async def oauth_consent_stream(error=e): - async for event in self.respond_with_oauth_consent_astream(context, error): - yield event - return oauth_consent_stream() - return await self.respond_with_oauth_consent(context, e) - finally: - pass - - async def _get_latest_checkpoint(self, - checkpoint_storage: CheckpointStorage) -> Optional[Any]: - """Load the latest checkpoint from the given storage. - - :param checkpoint_storage: The checkpoint storage to load from. - :type checkpoint_storage: CheckpointStorage - - :return: The latest checkpoint if available, None otherwise. - :rtype: Optional[Any] + raise NotImplementedError("This method is implemented in the base class.") + + async def _load_agent_thread(self, context: AgentRunContext, agent: Union[AgentProtocol, WorkflowAgent]) -> Optional[AgentThread]: + """Load the agent thread for a given conversation ID. + + :param context: The agent run context. + :type context: AgentRunContext + :param agent: The agent instance. + :type agent: AgentProtocol | WorkflowAgent + + :return: The loaded AgentThread if available, None otherwise. + :rtype: Optional[AgentThread] """ - checkpoints = await checkpoint_storage.list_checkpoints() - if checkpoints: - latest_checkpoint = max(checkpoints, key=lambda cp: cp.timestamp) - return latest_checkpoint + if self._thread_repository: + agent_thread = await self._thread_repository.get(context.conversation_id) + if agent_thread: + logger.info(f"Loaded agent thread for conversation: {context.conversation_id}") + return agent_thread + return agent.get_new_thread() return None - async def _load_checkpoint(self, agent: AgentProtocol, - checkpoint: WorkflowCheckpoint, - checkpoint_storage: CheckpointStorage) -> None: - """Load the checkpoint data from the given WorkflowCheckpoint. + async def _save_agent_thread(self, context: AgentRunContext, agent_thread: AgentThread) -> None: + """Save the agent thread for a given conversation ID. - :param checkpoint: The WorkflowCheckpoint to load data from. - :type checkpoint: WorkflowCheckpoint + :param context: The agent run context. + :type context: AgentRunContext + :param agent_thread: The agent thread to save. + :type agent_thread: AgentThread """ - await agent.run(checkpoint_id=checkpoint.checkpoint_id, - checkpoint_storage=checkpoint_storage) \ No newline at end of file + if agent_thread and self._thread_repository: + await self._thread_repository.set(context.conversation_id, agent_thread) + logger.info(f"Saved agent thread for conversation: {context.conversation_id}") + + def _run_streaming_updates( + self, + *, + context: AgentRunContext, + run_stream: Callable[[], AsyncGenerator[Any, None]], + agent_thread: Optional[AgentThread] = None, + ) -> AsyncGenerator[ResponseStreamEvent, Any]: + """Execute a streaming run with shared OAuth/error handling.""" + logger.info("Running agent in streaming mode") + streaming_converter = AgentFrameworkOutputStreamingConverter(context, hitl_helper=self._hitl_helper) + + async def stream_updates(): + try: + update_count = 0 + try: + updates = run_stream() + async for event in streaming_converter.convert(updates): + update_count += 1 + yield event + + await self._save_agent_thread(context, agent_thread) + logger.info("Streaming completed with %d updates", update_count) + except OAuthConsentRequiredError as e: + logger.info("OAuth consent required during streaming updates") + if update_count == 0: + async for event in self.respond_with_oauth_consent_astream(context, e): + yield event + else: + yield ResponseErrorEvent( + sequence_number=streaming_converter.next_sequence(), + code="server_error", + message=f"OAuth consent required: {e.consent_url}", + param="agent_run", + ) + yield ResponseFailedEvent( + sequence_number=streaming_converter.next_sequence(), + response=streaming_converter._build_response(status="failed"), # pylint: disable=protected-access + ) + except Exception as e: # pylint: disable=broad-exception-caught + logger.error("Unhandled exception during streaming updates: %s", e, exc_info=True) + yield ResponseErrorEvent( + sequence_number=streaming_converter.next_sequence(), + code="server_error", + message=str(e), + param="agent_run", + ) + yield ResponseFailedEvent( + sequence_number=streaming_converter.next_sequence(), + response=streaming_converter._build_response(status="failed"), # pylint: disable=protected-access + ) + finally: + # No request-scoped resources to clean up today, but keep hook for future use. + pass + + return stream_updates() \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py new file mode 100644 index 000000000000..6105470dbdc9 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py @@ -0,0 +1,88 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +from typing import Any, AsyncGenerator, Optional, Union + +from agent_framework import AgentProtocol + +from azure.ai.agentserver.core import AgentRunContext +from azure.ai.agentserver.core.tools import OAuthConsentRequiredError +from azure.ai.agentserver.core.logger import get_logger +from azure.ai.agentserver.core.models import ( + Response as OpenAIResponse, + ResponseStreamEvent, +) + +from .models.agent_framework_input_converters import AgentFrameworkInputConverter +from .models.agent_framework_output_non_streaming_converter import ( + AgentFrameworkOutputNonStreamingConverter, +) +from ._agent_framework import AgentFrameworkAgent +from .persistence import AgentThreadRepository + +logger = get_logger() + +class AgentFrameworkAIAgentAdapter(AgentFrameworkAgent): + def __init__(self, agent: AgentProtocol, + *, + thread_repository: Optional[AgentThreadRepository]=None, + **kwargs) -> None: + super().__init__(agent=agent, **kwargs) + self._agent = agent + self._thread_repository = thread_repository + + async def agent_run( # pylint: disable=too-many-statements + self, context: AgentRunContext + ) -> Union[ + OpenAIResponse, + AsyncGenerator[ResponseStreamEvent, Any], + ]: + try: + logger.info(f"Starting AIAgent agent_run with stream={context.stream}") + request_input = context.request.get("input") + + agent_thread = await self._load_agent_thread(context, self._agent) + + input_converter = AgentFrameworkInputConverter(hitl_helper=self._hitl_helper) + message = await input_converter.transform_input( + request_input, + agent_thread=agent_thread) + logger.debug(f"Transformed input message type: {type(message)}") + + # Use split converters + if context.stream: + return self._run_streaming_updates( + context=context, + run_stream=lambda: self.agent.run_stream( + message, + thread=agent_thread, + ), + agent_thread=agent_thread, + ) + + # Non-streaming path + logger.info("Running agent in non-streaming mode") + result = await self.agent.run( + message, + thread=agent_thread) + logger.debug(f"Agent run completed, result type: {type(result)}") + await self._save_agent_thread(context, agent_thread) + + non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context, hitl_helper=self._hitl_helper) + transformed_result = non_streaming_converter.transform_output_for_response(result) + logger.info("Agent run and transformation completed successfully") + return transformed_result + except OAuthConsentRequiredError as e: + logger.info("OAuth consent required during agent run") + if context.stream: + # Yield OAuth consent response events + # Capture e in the closure by passing it as a default argument + async def oauth_consent_stream(error=e): + async for event in self.respond_with_oauth_consent_astream(context, error): + yield event + return oauth_consent_stream() + return await self.respond_with_oauth_consent(context, e) + finally: + pass diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py new file mode 100644 index 000000000000..92667eb7cbcc --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py @@ -0,0 +1,142 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Optional, Protocol, Union, List + +from agent_framework import Workflow, CheckpointStorage, WorkflowAgent, WorkflowCheckpoint +from agent_framework._workflows import get_checkpoint_summary + +from azure.ai.agentserver.core.tools import OAuthConsentRequiredError +from azure.ai.agentserver.core import AgentRunContext +from azure.ai.agentserver.core.logger import get_logger +from azure.ai.agentserver.core.models import ( + Response as OpenAIResponse, + ResponseStreamEvent, +) + +from ._agent_framework import AgentFrameworkAgent +from .models.agent_framework_input_converters import AgentFrameworkInputConverter +from .models.agent_framework_output_non_streaming_converter import ( + AgentFrameworkOutputNonStreamingConverter, +) +from .persistence.agent_thread_repository import AgentThreadRepository +from .persistence.checkpoint_repository import CheckpointRepository + +logger = get_logger() + +class AgentFrameworkWorkflowAdapter(AgentFrameworkAgent): + """Adapter to run WorkflowBuilder agents within the Agent Framework CBAgent structure.""" + def __init__(self, + workflow_factory: Callable[[], Workflow], + *, + thread_repository: Optional[AgentThreadRepository] = None, + checkpoint_repository: Optional[CheckpointRepository] = None, + **kwargs: Any) -> None: + super().__init__(agent=None, **kwargs) + self._workflow_factory = workflow_factory + self._thread_repository = thread_repository + self._checkpoint_repository = checkpoint_repository + + async def agent_run( # pylint: disable=too-many-statements + self, context: AgentRunContext + ) -> Union[ + OpenAIResponse, + AsyncGenerator[ResponseStreamEvent, Any], + ]: + try: + agent = self._build_agent() + + logger.info(f"Starting WorkflowAgent agent_run with stream={context.stream}") + request_input = context.request.get("input") + + agent_thread = await self._load_agent_thread(context, agent) + + checkpoint_storage = None + selected_checkpoint = None + if self._checkpoint_repository: + checkpoint_storage = await self._checkpoint_repository.get_or_create(context.conversation_id) + selected_checkpoint = await self._get_latest_checkpoint(checkpoint_storage) + if selected_checkpoint: + summary = get_checkpoint_summary(selected_checkpoint) + if summary.status == "completed": + logger.warning(f"Selected checkpoint {selected_checkpoint.checkpoint_id} is completed. Will not resume from it.") + selected_checkpoint = None # Do not resume from completed checkpoints + else: + await self._load_checkpoint(agent, selected_checkpoint, checkpoint_storage) + logger.info(f"Loaded checkpoint with ID: {selected_checkpoint.checkpoint_id}") + + input_converter = AgentFrameworkInputConverter(hitl_helper=self._hitl_helper) + message = await input_converter.transform_input( + request_input, + agent_thread=agent_thread, + checkpoint=selected_checkpoint) + logger.debug(f"Transformed input message type: {type(message)}") + + # Use split converters + if context.stream: + return self._run_streaming_updates( + context=context, + run_stream=lambda: agent.run_stream( + message, + thread=agent_thread, + checkpoint_storage=checkpoint_storage, + ), + agent_thread=agent_thread, + ) + + # Non-streaming path + logger.info("Running WorkflowAgent in non-streaming mode") + result = await agent.run( + message, + thread=agent_thread, + checkpoint_storage=checkpoint_storage) + logger.debug(f"WorkflowAgent run completed, result type: {type(result)}") + + await self._save_agent_thread(context, agent_thread) + + non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context, hitl_helper=self._hitl_helper) + transformed_result = non_streaming_converter.transform_output_for_response(result) + logger.info("Agent run and transformation completed successfully") + return transformed_result + except OAuthConsentRequiredError as e: + logger.info("OAuth consent required during agent run") + if context.stream: + # Yield OAuth consent response events + # Capture e in the closure by passing it as a default argument + async def oauth_consent_stream(error=e): + async for event in self.respond_with_oauth_consent_astream(context, error): + yield event + return oauth_consent_stream() + return await self.respond_with_oauth_consent(context, e) + finally: + pass + + def _build_agent(self) -> WorkflowAgent: + return self._workflow_factory().as_agent() + + async def _get_latest_checkpoint(self, + checkpoint_storage: CheckpointStorage) -> Optional[Any]: + """Load the latest checkpoint from the given storage. + + :param checkpoint_storage: The checkpoint storage to load from. + :type checkpoint_storage: CheckpointStorage + + :return: The latest checkpoint if available, None otherwise. + :rtype: Optional[Any] + """ + checkpoints = await checkpoint_storage.list_checkpoints() + if checkpoints: + latest_checkpoint = max(checkpoints, key=lambda cp: cp.timestamp) + return latest_checkpoint + return None + + async def _load_checkpoint(self, agent: WorkflowAgent, + checkpoint: WorkflowCheckpoint, + checkpoint_storage: CheckpointStorage) -> None: + """Load the checkpoint data from the given WorkflowCheckpoint. + + :param checkpoint: The WorkflowCheckpoint to load data from. + :type checkpoint: WorkflowCheckpoint + """ + await agent.run(checkpoint_id=checkpoint.checkpoint_id, + checkpoint_storage=checkpoint_storage) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index 08db24adfae0..95c7bb7acc6b 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -36,7 +36,7 @@ class AgentFrameworkOutputNonStreamingConverter: # pylint: disable=name-too-long """Non-streaming converter: AgentRunResponse -> OpenAIResponse.""" - def __init__(self, context: AgentRunContext, *, hitl_helper: HumanInTheLoopHelper): + def __init__(self, context: AgentRunContext, *, hitl_helper: HumanInTheLoopHelper=None): self._context = context self._response_id = None self._response_created_at = None diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 23d8702e38ec..253b0fc7aa9e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -186,7 +186,7 @@ async def convert_contents( for call_id, content in content_by_call_id.items(): item_id, output_index = ids_by_call_id[call_id] - args = content.arguments if isinstance(content.arguments, str) else json.dumps(content.arguments) + args = self._serialize_arguments(content.arguments) yield ResponseFunctionCallArgumentsDoneEvent( sequence_number=self._parent.next_sequence(), item_id=item_id, @@ -255,6 +255,14 @@ async def convert_contents( ) self._parent.add_completed_output_item(item) + def _serialize_arguments(self, arguments: Any) -> str: + if isinstance(arguments, str): + return arguments + try: + return json.dumps(arguments) + except Exception as e: + return str(arguments) + class _FunctionCallOutputStreamingState(_BaseStreamingState): """Handles function_call_output items streaming (non-chunked simple output).""" @@ -355,8 +363,7 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async is_changed = ( lambda a, b: a is not None \ and b is not None \ - and (a.message_id != b.message_id \ - or type(a.content[0]) != type(b.content[0])) # pylint: disable=unnecessary-lambda-assignment + and a.message_id != b.message_id # pylint: disable=unnecessary-lambda-assignment ) async for group in chunk_on_change(updates, is_changed): diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py index ea3de29385e1..294d7d0948fc 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py @@ -1,20 +1,22 @@ from abc import ABC, abstractmethod import json import os -from typing import Any, Optional +from typing import Any, Optional, Union -from agent_framework import AgentThread, AgentProtocol +from agent_framework import AgentThread, AgentProtocol, WorkflowAgent class AgentThreadRepository(ABC): """AgentThread repository to manage saved thread messages of agent threads and workflows.""" @abstractmethod - async def get(self, conversation_id: str) -> Optional[AgentThread]: + async def get(self, conversation_id: str, agent: Optional[Union[AgentProtocol, WorkflowAgent]]=None) -> Optional[AgentThread]: """Retrieve the savedt thread for a given conversation ID. :param conversation_id: The conversation ID. :type conversation_id: str + :param agent: The agent instance. If provided, it can be used to deserialize the thread. + :type agent: Optional[Union[AgentProtocol, WorkflowAgent]] :return: The saved AgentThread if available, None otherwise. :rtype: Optional[AgentThread] @@ -36,12 +38,13 @@ class InMemoryAgentThreadRepository(AgentThreadRepository): def __init__(self) -> None: self._inventory: dict[str, AgentThread] = {} - async def get(self, conversation_id: str) -> Optional[AgentThread]: + async def get(self, conversation_id: str, agent: Optional[Union[AgentProtocol, WorkflowAgent]]=None) -> Optional[AgentThread]: """Retrieve the saved thread for a given conversation ID. :param conversation_id: The conversation ID. :type conversation_id: str - + :param agent: The agent instance. It will be used for in-memory repository for interface consistency. + :type agent: Optional[Union[AgentProtocol, WorkflowAgent]] :return: The saved AgentThread if available, None otherwise. :rtype: Optional[AgentThread] """ @@ -72,18 +75,22 @@ def __init__(self, agent: AgentProtocol) -> None: """ self._agent = agent - async def get(self, conversation_id: str) -> Optional[AgentThread]: + async def get(self, conversation_id: str, agent: Optional[Union[AgentProtocol, WorkflowAgent]]=None) -> Optional[AgentThread]: """Retrieve the saved thread for a given conversation ID. :param conversation_id: The conversation ID. :type conversation_id: str + :param agent: The agent instance. If provided, it can be used to deserialize the thread. + Otherwise, the repository's agent will be used. + :type agent: Optional[Union[AgentProtocol, WorkflowAgent]] :return: The saved AgentThread if available, None otherwise. :rtype: Optional[AgentThread] """ serialized_thread = await self.read_from_storage(conversation_id) if serialized_thread: - thread = await self._agent.deserialize_thread(serialized_thread) + agent_to_use = agent or self._agent + thread = await agent_to_use.deserialize_thread(serialized_thread) return thread return None diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/basic_simple/minimal_example.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/basic_simple/minimal_example.py index 15afa52f42b8..1d5aab07ae8a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/basic_simple/minimal_example.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/basic_simple/minimal_example.py @@ -6,10 +6,10 @@ from agent_framework.azure import AzureOpenAIChatClient from azure.identity import DefaultAzureCredential from dotenv import load_dotenv +load_dotenv() from azure.ai.agentserver.agentframework import from_agent_framework -load_dotenv() def get_weather( @@ -26,7 +26,7 @@ def main() -> None: tools=get_weather, ) - from_agent_framework(agent).run() + from_agent_framework(agent=agent).run() if __name__ == "__main__": diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py index cb9c3cd2c9c6..08e7e8bdffc7 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py @@ -28,7 +28,7 @@ def main(): instructions="You are a helpful assistant with access to various tools.", ) - from_agent_framework(agent).run() + from_agent_framework(agent=agent).run() if __name__ == "__main__": main() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py index 56dc5fca8860..db81c1091336 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py @@ -78,7 +78,7 @@ def build_agent(): async def main() -> None: agent = build_agent() thread_repository = JsonLocalFileAgentThreadRepository(agent=agent, storage_path="./thread_storage") - await from_agent_framework(agent, thread_repository=thread_repository).run_async() + await from_agent_framework(agent=agent, thread_repository=thread_repository).run_async() if __name__ == "__main__": asyncio.run(main()) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/.gitignore b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/.gitignore new file mode 100644 index 000000000000..9a5b0b4f8d68 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/.gitignore @@ -0,0 +1 @@ +checkpoints \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md index aed6deee122a..172422f87c7f 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md @@ -132,7 +132,7 @@ Respond by sending a `CreateResponse` request with `function_call_output` messag "input": [ { "call_id": "", - "output": "{\"request_id\":\"\",\"approved\":true}", + "output": "{\"request_id\":\"\",\"approved\":true,\"feedback\":\"approve\"}", "type": "function_call_output", } ] diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/main.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/main.py index b5deef145920..e749a4a62fc6 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/main.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/main.py @@ -23,7 +23,7 @@ ) from azure.ai.agentserver.agentframework import from_agent_framework -from azure.ai.agentserver.agentframework.persistence import InMemoryCheckpointRepository +from azure.ai.agentserver.agentframework.persistence import FileCheckpointRepository load_dotenv() @@ -84,10 +84,10 @@ async def accept_human_review( print("Reviewer: Forwarding human review back to worker...") await ctx.send_message(response, target_id=self._worker_id) -def build_agent(tools): +def create_builder(): # Build a workflow with bidirectional communication between Worker and Reviewer, # and escalation paths for human review. - agent = ( + builder = ( WorkflowBuilder() .register_executor( lambda: Worker( @@ -103,17 +103,16 @@ def build_agent(tools): .add_edge("worker", "reviewer") # Worker sends requests to Reviewer .add_edge("reviewer", "worker") # Reviewer sends feedback to Worker .set_start_executor("worker") - .build() - .as_agent() # Convert workflow into an agent interface ) - return agent + return builder async def run_agent() -> None: """Run the workflow inside the agent server adapter.""" + builder = create_builder() await from_agent_framework( - build_agent, # pass WorkflowAgent factory to adapter, build a new instance per request - checkpoint_repository=InMemoryCheckpointRepository(), # for checkpoint storage + workflow=builder, # pass workflow builder to adapter + checkpoint_repository=FileCheckpointRepository(storage_path="./checkpoints"), # for checkpoint storage ).run_async() if __name__ == "__main__": diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_apikey/mcp_apikey.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_apikey/mcp_apikey.py index 985d7fd01e0c..2a1058e7f468 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_apikey/mcp_apikey.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_apikey/mcp_apikey.py @@ -35,7 +35,7 @@ async def main() -> None: ) async with agent: - await from_agent_framework(agent).run_async() + await from_agent_framework(agent=agent).run_async() if __name__ == "__main__": diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_simple/mcp_simple.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_simple/mcp_simple.py index 6b59771fe0da..ce5bb37eea4f 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_simple/mcp_simple.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_simple/mcp_simple.py @@ -22,7 +22,7 @@ async def main() -> None: ) async with agent: - await from_agent_framework(agent).run_async() + await from_agent_framework(agent=agent).run_async() if __name__ == "__main__": diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/simple_async/minimal_async_example.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/simple_async/minimal_async_example.py index 4c69c8afa84d..ac781d4d39ab 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/simple_async/minimal_async_example.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/simple_async/minimal_async_example.py @@ -28,7 +28,7 @@ async def main() -> None: ) async with agent: - await from_agent_framework(agent).run_async() + await from_agent_framework(agent=agent).run_async() if __name__ == "__main__": diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/README.md index 59bb6b9f19ec..cc82d1f19171 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/README.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/README.md @@ -1,287 +1,63 @@ -## Workflow Agent Reflection Sample (Python) +## Workflow Agent Simple Sample (Python) -This sample demonstrates how to wrap an Agent Framework workflow (with iterative review + improvement) as an agent using the Container Agents Adapter. It implements a "reflection" pattern consisting of two executors: +This sample hosts a two-step Agent Framework workflow—`Writer` followed by `Reviewer`—through the Azure AI Agent Server Adapter. The writer creates content, the reviewer provides the final response, and the adapter exposes the workflow through the same HTTP surface as any hosted agent. -- Worker: Produces an initial answer (and revised answers after feedback) -- Reviewer: Evaluates the answer against quality criteria and either approves or returns constructive feedback +### What `workflow_agent_simple.py` does +- Builds a workflow with `WorkflowBuilder` +- Passes the builder to `from_agent_framework(...).run_async()` so the adapter spins up an HTTP server (defaults to `0.0.0.0:8088`). +- A example of passing with a factory of `Workflow` is shown in comments. -The workflow cycles until the Reviewer approves the response. Only approved content is emitted externally (streamed the same way as a normal agent response). This pattern is useful for quality‑controlled assistance, gated tool use, evaluative chains, or iterative refinement. - -### Key Concepts Shown -- `WorkflowBuilder` + `.as_agent()` to expose a workflow as a standard agent -- Bidirectional edges enabling cyclical review (Worker ↔ Reviewer) -- Structured output parsing (Pydantic model) for review feedback -- Emitting `AgentRunUpdateEvent` to stream only approved messages -- Managing pending requests and re‑submission with incorporated feedback - -File: `workflow_agent_simple.py` +Please note that the `WorkflowBuilder` or `Workflow` factory will be called for each incoming request. The `Workflow` will be converted to `WorkflowAgent` by `.as_agent()`. The workflow definition need ot be valid for `WorkflowAgent`. --- ## Prerequisites - -> **Azure sign-in:** Run `az login` before starting the sample so `DefaultAzureCredential` can acquire a CLI token. - -Dependencies used by `workflow_agent_simple.py`: -- agent-framework-azure-ai (published package with workflow abstractions) -- agents_adapter -- azure-identity (for `DefaultAzureCredential`) -- python-dotenv (loads `.env` for local credentials) -- pydantic (pulled transitively; listed for clarity) - -Install from PyPI (from the repo root: `container_agents/`): -```bash -pip install agent-framework-azure-ai azure-identity python-dotenv - -pip install -e src/adapter/python -``` +- Python 3.10+ +- Azure CLI authenticated with `az login` (required for `AzureCliCredential`). +- An Azure AI project that already hosts a chat model deployment supported by the Agent Framework Azure client. --- -## Additional Requirements - -1. Azure AI project with a model deployment (supports Microsoft hosted, Azure OpenAI, or custom models exposed via Azure AI Foundry). - ---- - -## Configuration - -Copy `.envtemplate` to `.env` and fill in real values: -``` -AZURE_AI_PROJECT_ENDPOINT= -AZURE_AI_MODEL_DEPLOYMENT_NAME= -AGENT_PROJECT_NAME= -``` -`AGENT_PROJECT_NAME` lets you override the default Azure AI agent project for this workflow; omit it to fall back to the SDK default. +## Setup +1. Copy `.envtemplate` to `.env` and fill in your project details: + ``` + AZURE_AI_PROJECT_ENDPOINT= + AZURE_AI_MODEL_DEPLOYMENT_NAME= + ``` +2. Install the sample dependencies: + ```bash + pip install -r requirements.txt + ``` --- ## Run the Workflow Agent - From this folder: ```bash python workflow_agent_simple.py ``` -The server (via the adapter) will start on `0.0.0.0:8088` by default. - ---- - -## Send a Non‑Streaming Request - -```bash -curl -sS \ - -H "Content-Type: application/json" \ - -X POST http://localhost:8088/runs \ - -d '{"input":"Explain the concept of reflection in this workflow sample.","stream":false}' -``` - -Sample output (non‑streaming): - -``` -Processing 1 million files in parallel and writing their contents into a sorted output file can be a computationally and resource-intensive task. To handle it effectively, you can use Python with libraries like `concurrent.futures` for parallelism and `heapq` for the sorting and merging. - -Below is an example implementation: - -import os -from concurrent.futures import ThreadPoolExecutor -import heapq - -def read_file(file_path): - """Read the content of a single file and return it as a list of lines.""" - with open(file_path, 'r') as file: - return file.readlines() - -def parallel_read_files(file_paths, max_workers=8): - """ - Read files in parallel and return all the lines in memory. - :param file_paths: List of file paths to read. - :param max_workers: Number of worker threads to use for parallelism. - """ - all_lines = [] - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Submit tasks to read each file in parallel - results = executor.map(read_file, file_paths) - # Collect the results - for lines in results: - all_lines.extend(lines) - return all_lines - -def write_sorted_output(lines, output_file_path): - """ - Write sorted lines to the output file. - :param lines: List of strings to be sorted and written. - :param output_file_path: File path to write the sorted result. - """ - sorted_lines = sorted(lines) - with open(output_file_path, 'w') as output_file: - output_file.writelines(sorted_lines) - -def main(directory_path, output_file_path): - """ - Main function to read files in parallel and write sorted output. - :param directory_path: Path to the directory containing input files. - :param output_file_path: File path to write the sorted output. - """ - # Get a list of all the file paths in the given directory - file_paths = [os.path.join(directory_path, f) for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))] - - print(f"Found {len(file_paths)} files. Reading files in parallel...") - - # Read all lines from the files in parallel - all_lines = parallel_read_files(file_paths) - - print(f"Total lines read: {len(all_lines)}. Sorting and writing to output file...") - - # Write the sorted lines to the output file - write_sorted_output(all_lines, output_file_path) - - print(f"Sorted output written to: {output_file_path}") - -if __name__ == "__main__": - # Replace these paths with the appropriate input directory and output file path - input_directory = "path/to/input/directory" # Directory containing 1 million files - output_file = "path/to/output/sorted_output.txt" # Output file path - - main(input_directory, output_file) - -### Key Features and Steps: - -1. **Parallel Reading with `ThreadPoolExecutor`**: - - Files are read in parallel using threads to improve I/O performance since reading many files is mostly I/O-bound. - -2. **Sorting and Writing**: - - Once all lines are aggregated into memory, they are sorted using Python's `sorted()` function and written to the output file in one go. - -3. **Handles Large Number of Files**: - - The program uses threads to manage the potentially massive number of files in parallel, saving time instead of processing them serially. - -### Considerations: -- **Memory Usage**: This script reads all file contents into memory. If the total size of the files is too large, you may encounter memory issues. In such cases, consider processing the files in smaller chunks. -- **Sorting**: For extremely large data, consider using an external/merge sort technique to handle sorting in smaller chunks. -- **I/O Performance**: Ensure that your I/O subsystem and disk can handle the load. - -Let me know if you'd like an optimized version to handle larger datasets with limited memory! - -Usage (if provided): None -``` - ---- - -## Send a Streaming Request (Server-Sent Events) - -```bash -curl -N \ - -H "Content-Type: application/json" \ - -X POST http://localhost:8088/runs \ - -d '{"input":"How does the reviewer decide to approve?","stream":true}' -``` - -Sample output (streaming): - -``` -Here is a Python script that demonstrates parallel reading of 1 million files using `concurrent.futures` for parallelism and `heapq` to write the outputs to a sorted file. This approach ensures efficiency when dealing with such a large number of files. - - -import os -import heapq -from concurrent.futures import ThreadPoolExecutor - -def read_file(file_path): - """ - Read the content of a single file and return it as a list of lines. - """ - with open(file_path, 'r') as file: - return file.readlines() - -def parallel_read_files(file_paths, max_workers=4): - """ - Read multiple files in parallel. - """ - all_lines = [] - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Submit reading tasks to the thread pool - futures = [executor.submit(read_file, file_path) for file_path in file_paths] - - # Gather results as they are completed - for future in futures: - all_lines.extend(future.result()) - - return all_lines - -def write_sorted_output(lines, output_file): - """ - Write sorted lines to an output file. - """ - sorted_lines = sorted(lines) - with open(output_file, 'w') as file: - file.writelines(sorted_lines) - -if __name__ == "__main__": - # Set the directory containing your input files - input_directory = 'path_to_your_folder_with_files' - - # Get the list of all input files - file_paths = [os.path.join(input_directory, f) for f in os.listdir(input_directory) if os.path.isfile(os.path.join(input_directory, f))] - - # Specify the number of threads for parallel processing - max_threads = 8 # Adjust according to your system's capabilities - - # Step 1: Read all files in parallel - print("Reading files in parallel...") - all_lines = parallel_read_files(file_paths, max_workers=max_threads) - - # Step 2: Write the sorted data to the output file - output_file = 'sorted_output.txt' - print(f"Writing sorted output to {output_file}...") - write_sorted_output(all_lines, output_file) - - print("Operation complete.") - -[comment]: # ( cspell:ignore pysort ) - -### Key Points: -1. **Parallel Read**: The reading of files is handled using `concurrent.futures.ThreadPoolExecutor`, allowing multiple files to be processed simultaneously. - -2. **Sorted Output**: After collecting all lines from the files, the `sorted()` function is used to sort the content in memory. This ensures that the final output file will have all data in sorted order. - -3. **Adjustable Parallelism**: The `max_threads` parameter can be modified to control the number of threads used for file reading. The value should match your system's capabilities for optimal performance. - -4. **Large Data Handling**: If the data from 1 million files is too large to fit into memory, consider using an external merge sort algorithm or a library like `pysort` for efficient external sorting. - -Let me know if you'd like improvements or adjustments for more specific scenarios! -Final usage (if provided): None -``` - -> Only the final approved assistant content is emitted as normal output deltas; intermediate review feedback stays internal. - ---- -## How the Reflection Loop Works -1. User query enters the workflow (Worker start executor) -2. Worker produces an answer with model call -3. Reviewer evaluates using a structured schema (`feedback`, `approved`) -4. If not approved: Worker augments context with feedback + regeneration instruction, then re‑answers -5. Loop continues until `approved=True` -6. Approved content is emitted as `AgentRunResponseUpdate` (streamed externally) +The adapter starts the server on `http://0.0.0.0:8088` by default. --- -## Troubleshooting -| Issue | Resolution | -|-------|------------| -| `DefaultAzureCredential` errors | Run `az login` or configure a service principal. | -| Empty / no streaming | Confirm `stream` flag in request JSON and that the event loop is healthy. | -| Model 404 / deployment error | Verify `AZURE_AI_MODEL_DEPLOYMENT_NAME` exists in the Azure AI project configured by `AZURE_AI_PROJECT_ENDPOINT`. | -| `.env` not loading | Ensure `.env` sits beside the script (or set `dotenv_path`) and that `python-dotenv` is installed. | +## Send Requests +- **Non-streaming:** + ```bash + curl -sS \ + -H "Content-Type: application/json" \ + -X POST http://localhost:8088/runs \ + -d '{"input":"Create a slogan for a new electric SUV that is affordable and fun to drive","stream":false}' + ``` --- ## Related Resources - Agent Framework repo: https://github.com/microsoft/agent-framework -- Basic simple sample README (same folder structure) for installation reference +- Adapter package docs: `azure.ai.agentserver.agentframework` in this SDK --- ## License & Support -This sample follows the repository's LICENSE. For questions about unreleased Agent Framework features, contact the Agent Framework team via its GitHub repository. +This sample follows the repository LICENSE. For questions about the Agent Framework itself, open an issue in the Agent Framework GitHub repository. diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py index 4d2569c38932..a79e24f1a3fb 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py @@ -1,291 +1,51 @@ -# Copyright (c) Microsoft. All rights reserved. - import asyncio -from dataclasses import dataclass -from uuid import uuid4 - -from agent_framework import ( - AgentRunResponseUpdate, - AgentRunUpdateEvent, - BaseChatClient, - ChatMessage, - Contents, - Executor, - Role as ChatRole, - WorkflowBuilder, - WorkflowContext, - handler, -) -from agent_framework_azure_ai import AzureAIAgentClient -from azure.identity.aio import DefaultAzureCredential from dotenv import load_dotenv -from pydantic import BaseModel - -from azure.ai.agentserver.agentframework import from_agent_framework - -""" -The following sample demonstrates how to wrap a workflow as an agent using WorkflowAgent. - -This sample shows how to: -1. Create a workflow with a reflection pattern (Worker + Reviewer executors) -2. Wrap the workflow as an agent using the .as_agent() method -3. Stream responses from the workflow agent like a regular agent -4. Implement a review-retry mechanism where responses are iteratively improved - -The example implements a quality-controlled AI assistant where: -- Worker executor generates responses to user queries -- Reviewer executor evaluates the responses and provides feedback -- If not approved, the Worker incorporates feedback and regenerates the response -- The cycle continues until the response is approved -- Only approved responses are emitted to the external consumer - -Key concepts demonstrated: -- WorkflowAgent: Wraps a workflow to make it behave as an agent -- Bidirectional workflow with cycles (Worker ↔ Reviewer) -- AgentRunUpdateEvent: How workflows communicate with external consumers -- Structured output parsing for review feedback -- State management with pending requests tracking -""" - - -@dataclass -class ReviewRequest: - request_id: str - user_messages: list[ChatMessage] - agent_messages: list[ChatMessage] - - -@dataclass -class ReviewResponse: - request_id: str - feedback: str - approved: bool - load_dotenv() +from agent_framework import ChatAgent, Workflow, WorkflowBuilder +from agent_framework.azure import AzureAIAgentClient +from azure.identity.aio import AzureCliCredential -class Reviewer(Executor): - """An executor that reviews messages and provides feedback.""" - - def __init__(self, chat_client: BaseChatClient) -> None: - super().__init__(id="reviewer") - self._chat_client = chat_client - - @handler - async def review( - self, request: ReviewRequest, ctx: WorkflowContext[ReviewResponse] - ) -> None: - print( - f"🔍 Reviewer: Evaluating response for request {request.request_id[:8]}..." - ) - - # Use the chat client to review the message and use structured output. - # NOTE: this can be modified to use an evaluation framework. - - class _Response(BaseModel): - feedback: str - approved: bool - - # Define the system prompt. - messages = [ - ChatMessage( - role=ChatRole.SYSTEM, - text="You are a reviewer for an AI agent, please provide feedback on the " - "following exchange between a user and the AI agent, " - "and indicate if the agent's responses are approved or not.\n" - "Use the following criteria for your evaluation:\n" - "- Relevance: Does the response address the user's query?\n" - "- Accuracy: Is the information provided correct?\n" - "- Clarity: Is the response easy to understand?\n" - "- Completeness: Does the response cover all aspects of the query?\n" - "Be critical in your evaluation and provide constructive feedback.\n" - "Do not approve until all criteria are met.", - ) - ] - - # Add user and agent messages to the chat history. - messages.extend(request.user_messages) - - # Add agent messages to the chat history. - messages.extend(request.agent_messages) - - # Add add one more instruction for the assistant to follow. - messages.append( - ChatMessage( - role=ChatRole.USER, - text="Please provide a review of the agent's responses to the user.", - ) - ) - - print("🔍 Reviewer: Sending review request to LLM...") - # Get the response from the chat client. - response = await self._chat_client.get_response( - messages=messages, response_format=_Response - ) - - # Parse the response. - parsed = _Response.model_validate_json(response.messages[-1].text) - - print(f"🔍 Reviewer: Review complete - Approved: {parsed.approved}") - print(f"🔍 Reviewer: Feedback: {parsed.feedback}") - - # Send the review response. - await ctx.send_message( - ReviewResponse( - request_id=request.request_id, - feedback=parsed.feedback, - approved=parsed.approved, - ) - ) - - -class Worker(Executor): - """An executor that performs tasks for the user.""" - - def __init__(self, chat_client: BaseChatClient) -> None: - super().__init__(id="worker") - self._chat_client = chat_client - self._pending_requests: dict[str, tuple[ReviewRequest, list[ChatMessage]]] = {} - - @handler - async def handle_user_messages( - self, user_messages: list[ChatMessage], ctx: WorkflowContext[ReviewRequest] - ) -> None: - print("🔧 Worker: Received user messages, generating response...") - - # Handle user messages and prepare a review request for the reviewer. - # Define the system prompt. - messages = [ - ChatMessage(role=ChatRole.SYSTEM, text="You are a helpful assistant.") - ] - - # Add user messages. - messages.extend(user_messages) - - print("🔧 Worker: Calling LLM to generate response...") - # Get the response from the chat client. - response = await self._chat_client.get_response(messages=messages) - print(f"🔧 Worker: Response generated: {response.messages[-1].text}") - - # Add agent messages. - messages.extend(response.messages) - - # Create the review request. - request = ReviewRequest( - request_id=str(uuid4()), - user_messages=user_messages, - agent_messages=response.messages, - ) - - print( - f"🔧 Worker: Generated response, sending to reviewer (ID: {request.request_id[:8]})" - ) - # Send the review request. - await ctx.send_message(request) - - # Add to pending requests. - self._pending_requests[request.request_id] = (request, messages) - - @handler - async def handle_review_response( - self, review: ReviewResponse, ctx: WorkflowContext[ReviewRequest] - ) -> None: - print( - f"🔧 Worker: Received review for request {review.request_id[:8]} - Approved: {review.approved}" - ) - - # Handle the review response. Depending on the approval status, - # either emit the approved response as AgentRunUpdateEvent, or - # retry given the feedback. - if review.request_id not in self._pending_requests: - raise ValueError( - f"Received review response for unknown request ID: {review.request_id}" - ) - # Remove the request from pending requests. - request, messages = self._pending_requests.pop(review.request_id) - - if review.approved: - print("✅ Worker: Response approved! Emitting to external consumer...") - # If approved, emit the agent run response update to the workflow's - # external consumer. - contents: list[Contents] = [] - for message in request.agent_messages: - contents.extend(message.contents) - # Emitting an AgentRunUpdateEvent in a workflow wrapped by a WorkflowAgent - # will send the AgentRunResponseUpdate to the WorkflowAgent's - # event stream. - await ctx.add_event( - AgentRunUpdateEvent( - self.id, - data=AgentRunResponseUpdate( - contents=contents, role=ChatRole.ASSISTANT, author_name=self.id - ), - ) - ) - return - - print(f"❌ Worker: Response not approved. Feedback: {review.feedback}") - print("🔧 Worker: Incorporating feedback and regenerating response...") - - # Construct new messages with feedback. - messages.append(ChatMessage(role=ChatRole.SYSTEM, text=review.feedback)) - - # Add additional instruction to address the feedback. - messages.append( - ChatMessage( - role=ChatRole.SYSTEM, - text="Please incorporate the feedback above, and provide a response to user's next message.", - ) - ) - messages.extend(request.user_messages) - - # Get the new response from the chat client. - response = await self._chat_client.get_response(messages=messages) - print( - f"🔧 Worker: New response generated after feedback: {response.messages[-1].text}" - ) - - # Process the response. - messages.extend(response.messages) - - print( - f"🔧 Worker: Generated improved response, sending for re-review (ID: {review.request_id[:8]})" - ) - # Send an updated review request. - new_request = ReviewRequest( - request_id=review.request_id, - user_messages=request.user_messages, - agent_messages=response.messages, - ) - await ctx.send_message(new_request) +from azure.ai.agentserver.agentframework import from_agent_framework - # Add to pending requests. - self._pending_requests[new_request.request_id] = (new_request, messages) +def create_writer_agent(client: AzureAIAgentClient) -> ChatAgent: + return client.create_agent( + name="Writer", + instructions=( + "You are an excellent content writer. You create new content and edit contents based on the feedback." + ), + ) -def build_agent(chat_client: BaseChatClient): - reviewer = Reviewer(chat_client=chat_client) - worker = Worker(chat_client=chat_client) - return ( - WorkflowBuilder() - .add_edge( - worker, reviewer - ) # <--- This edge allows the worker to send requests to the reviewer - .add_edge( - reviewer, worker - ) # <--- This edge allows the reviewer to send feedback back to the worker - .set_start_executor(worker) - .build() - .as_agent() # Convert the workflow to an agent. +def create_reviewer_agent(client: AzureAIAgentClient) -> ChatAgent: + return client.create_agent( + name="Reviewer", + instructions=( + "You are an excellent content reviewer. " + "Provide actionable feedback to the writer about the provided content. " + "Provide the feedback in the most concise manner possible." + ), ) async def main() -> None: - async with DefaultAzureCredential() as credential: - async with AzureAIAgentClient(async_credential=credential) as chat_client: - agent = build_agent(chat_client) - await from_agent_framework(agent).run_async() + async with AzureCliCredential() as cred, AzureAIAgentClient(credential=cred) as client: + builder = ( + WorkflowBuilder() + .register_agent(lambda: create_writer_agent(client), name="writer") + .register_agent(lambda: create_reviewer_agent(client), name="reviewer", output_response=True) + .set_start_executor("writer") + .add_edge("writer", "reviewer") + ) + + # Pass the WorkflowBuilder to the adapter and run it + # await from_agent_framework(workflow=builder).run_async() + + # Or create a factory function for the workflow pass the workflow factory to the adapter + def workflow_factory() -> Workflow: + return builder.build() + await from_agent_framework(workflow=workflow_factory).run_async() if __name__ == "__main__": From a28417322326b2615022340da4513ddf181003ee Mon Sep 17 00:00:00 2001 From: junanchen Date: Wed, 21 Jan 2026 16:30:05 -0800 Subject: [PATCH 54/94] fix async issue in non stream converter --- .../azure/ai/agentserver/langgraph/langgraph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index efa0a1a84959..6827cced1902 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -128,7 +128,7 @@ async def agent_run_non_stream(self, input_arguments: GraphInputArguments): try: result = await self._graph.ainvoke(**input_arguments) - output = self.converter.convert_response_non_stream(result, input_arguments["context"]) + output = await self.converter.convert_response_non_stream(result, input_arguments["context"]) return output except Exception as e: logger.error(f"Error during agent run: {e}", exc_info=True) From 12bb68a66d2531e01a1ad61e7e707e596dc8d444 Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Thu, 22 Jan 2026 20:01:34 -0800 Subject: [PATCH 55/94] [AgentServer] fix build (#44796) * remove unused code in af * created subclasses for agent-framework AIAgent and WorkflowAgent * remove unused code * validate core with tox * refining agent framework adapters * refining adapters * updated minors * update from_agent_framework * resolve trailing whitespace * fix samples * resolve unused import and long lines * fixed pylint * fix unittest in langgraph * fix sphinx for core * fix minors * fix pylint * add cachetools min version * fix bugs for non stream converter * fix pylint * add output type check * fix minors * updated version and changelog * update required core package * fix langgraph sphinx --------- Co-authored-by: junanchen --- .../CHANGELOG.md | 19 +++++ .../ai/agentserver/agentframework/__init__.py | 45 +++++----- .../agentframework/_agent_framework.py | 59 +++++++++---- .../agentframework/_ai_agent_adapter.py | 8 +- .../agentframework/_foundry_tools.py | 6 +- .../ai/agentserver/agentframework/_version.py | 2 +- .../agentframework/_workflow_agent_adapter.py | 36 ++++++-- .../agent_framework_input_converters.py | 22 ++--- ...ramework_output_non_streaming_converter.py | 36 +++++--- ...nt_framework_output_streaming_converter.py | 51 ++++++++---- .../models/human_in_the_loop_helper.py | 21 +++-- .../agentframework/persistence/__init__.py | 2 +- .../persistence/agent_thread_repository.py | 33 ++++++-- .../persistence/checkpoint_repository.py | 9 +- .../pyproject.toml | 2 +- .../azure-ai-agentserver-core/CHANGELOG.md | 12 +++ .../azure/ai/agentserver/core/_version.py | 2 +- .../core/server/common/agent_run_context.py | 3 + .../agentserver/core/tools/client/_client.py | 26 ++++-- .../core/tools/runtime/_starlette.py | 7 +- .../azure.ai.agentserver.core.application.rst | 7 ++ ...zure.ai.agentserver.core.models.openai.rst | 8 ++ ...re.ai.agentserver.core.models.projects.rst | 8 ++ .../doc/azure.ai.agentserver.core.models.rst | 17 ++++ .../doc/azure.ai.agentserver.core.rst | 4 + ...zure.ai.agentserver.core.server.common.rst | 9 +- ...azure.ai.agentserver.core.tools.client.rst | 7 ++ .../doc/azure.ai.agentserver.core.tools.rst | 18 ++++ ...zure.ai.agentserver.core.tools.runtime.rst | 7 ++ .../azure.ai.agentserver.core.tools.utils.rst | 7 ++ .../doc/azure.ai.agentserver.core.utils.rst | 7 ++ .../azure-ai-agentserver-core/pyproject.toml | 2 +- .../custom_mock_agent_test.py | 2 +- .../CHANGELOG.md | 16 ++++ .../ai/agentserver/langgraph/__init__.py | 1 + .../ai/agentserver/langgraph/_version.py | 2 +- .../ai/agentserver/langgraph/langgraph.py | 12 ++- .../models/response_api_converter.py | 6 +- .../models/response_api_default_converter.py | 3 +- ...ponse_api_non_stream_response_converter.py | 40 +++++---- .../langgraph/tools/_chat_model.py | 7 +- .../agentserver/langgraph/tools/_tool_node.py | 6 +- ...graph.models.response_event_generators.rst | 74 +++++++++++++++++ .../azure.ai.agentserver.langgraph.models.rst | 82 +++++++++++++++++++ .../doc/azure.ai.agentserver.langgraph.rst | 27 ++++++ .../azure.ai.agentserver.langgraph.tools.rst | 6 ++ .../pyproject.toml | 2 +- .../test_langgraph_request_converter.py | 10 +-- 48 files changed, 620 insertions(+), 178 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.application.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.runtime.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.utils.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.utils.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.tools.rst diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index 84c4a76a27e5..29bae6795995 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,6 +1,25 @@ # Release History +## 1.0.0b9 (2026-01-23) + +- Integrated with Foundry Tools +- Add persistence for agent thread and checkpoint +- Fixed WorkflowAgent concurrency issue +- Support Human-in-the-Loop + + +## 1.0.0b8 (2026-01-21) + +### Features Added + +- Support keep alive for long-running streaming responses. + +### Bugs Fixed + +- Fixed AgentFramework breaking change and pin version to >=1.0.0b251112,<=1.0.0b260107 + + ## 1.0.0b7 (2025-12-05) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index 1a276a14ff9e..32cf57200a49 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -7,12 +7,13 @@ from agent_framework import AgentProtocol, BaseAgent, Workflow, WorkflowBuilder -from azure.ai.agentserver.agentframework._version import VERSION -from azure.ai.agentserver.agentframework._agent_framework import AgentFrameworkAgent -from azure.ai.agentserver.agentframework._ai_agent_adapter import AgentFrameworkAIAgentAdapter -from azure.ai.agentserver.agentframework._workflow_agent_adapter import AgentFrameworkWorkflowAdapter -from azure.ai.agentserver.agentframework._foundry_tools import FoundryToolsChatMiddleware -from azure.ai.agentserver.core.application import PackageMetadata, set_current_app +from azure.ai.agentserver.core.application import PackageMetadata, set_current_app # pylint: disable=import-error,no-name-in-module + +from ._version import VERSION +from ._agent_framework import AgentFrameworkAgent +from ._ai_agent_adapter import AgentFrameworkAIAgentAdapter +from ._workflow_agent_adapter import AgentFrameworkWorkflowAdapter +from ._foundry_tools import FoundryToolsChatMiddleware if TYPE_CHECKING: # pragma: no cover from azure.core.credentials_async import AsyncTokenCredential @@ -27,13 +28,11 @@ def from_agent_framework( ) -> "AgentFrameworkAIAgentAdapter": """ Create an Agent Framework AI Agent Adapter from an AgentProtocol or BaseAgent. - - :param agent: The agent to adapt. + + :keyword agent: The agent to adapt. :type agent: Union[BaseAgent, AgentProtocol] - :param credentials: Optional asynchronous token credential for authentication. + :keyword credentials: Optional asynchronous token credential for authentication. :type credentials: Optional[AsyncTokenCredential] - :param kwargs: Additional keyword arguments to pass to the adapter. - :type kwargs: Any :return: An instance of AgentFrameworkAIAgentAdapter. :rtype: AgentFrameworkAIAgentAdapter @@ -49,19 +48,17 @@ def from_agent_framework( ) -> "AgentFrameworkWorkflowAdapter": """ Create an Agent Framework Workflow Adapter. - The arugument `workflow` can be either a WorkflowBuilder or a factory function + The arugument `workflow` can be either a WorkflowBuilder or a factory function that returns a Workflow. - It will be called to create a new Workflow instance and `.as_agent()` will be + It will be called to create a new Workflow instance and `.as_agent()` will be called as well for each incoming CreateResponse request. Please ensure that the workflow definition can be converted to a WorkflowAgent. For more information, see the agent-framework samples and documentation. - :param workflow: The workflow builder or factory function to adapt. + :keyword workflow: The workflow builder or factory function to adapt. :type workflow: Union[WorkflowBuilder, Callable[[], Workflow]] - :param credentials: Optional asynchronous token credential for authentication. + :keyword credentials: Optional asynchronous token credential for authentication. :type credentials: Optional[AsyncTokenCredential] - :param kwargs: Additional keyword arguments to pass to the adapter. - :type kwargs: Any :return: An instance of AgentFrameworkWorkflowAdapter. :rtype: AgentFrameworkWorkflowAdapter """ @@ -75,18 +72,16 @@ def from_agent_framework( **kwargs: Any, ) -> "AgentFrameworkAgent": """ - Create an Agent Framework Adapter from either an AgentProtocol/BaseAgent or a + Create an Agent Framework Adapter from either an AgentProtocol/BaseAgent or a WorkflowAgent. One of agent or workflow must be provided. - :param agent: The agent to adapt. + :keyword agent: The agent to adapt. :type agent: Optional[Union[BaseAgent, AgentProtocol]] - :param workflow: The workflow builder or factory function to adapt. + :keyword workflow: The workflow builder or factory function to adapt. :type workflow: Optional[Union[WorkflowBuilder, Callable[[], Workflow]]] - :param credentials: Optional asynchronous token credential for authentication. + :keyword credentials: Optional asynchronous token credential for authentication. :type credentials: Optional[AsyncTokenCredential] - :param kwargs: Additional keyword arguments to pass to the adapter. - :type kwargs: Any :return: An instance of AgentFrameworkAgent. :rtype: AgentFrameworkAgent :raises TypeError: If neither or both of agent and workflow are provided, or if @@ -107,7 +102,7 @@ def workflow_factory() -> Workflow: return AgentFrameworkWorkflowAdapter(workflow_factory=workflow, credentials=credentials, **kwargs) raise TypeError("workflow must be a WorkflowBuilder or callable returning a Workflow") - if isinstance(agent, AgentProtocol) or isinstance(agent, BaseAgent): + if isinstance(agent, (AgentProtocol, BaseAgent)): return AgentFrameworkAIAgentAdapter(agent, credentials=credentials, **kwargs) raise TypeError("agent must be an instance of AgentProtocol or BaseAgent") @@ -118,4 +113,4 @@ def workflow_factory() -> Workflow: ] __version__ = VERSION -set_current_app(PackageMetadata.from_dist("azure-ai-agentserver-agentframework")) \ No newline at end of file +set_current_app(PackageMetadata.from_dist("azure-ai-agentserver-agentframework")) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py index ece0198285bf..732b70095028 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py @@ -19,7 +19,7 @@ ResponseStreamEvent, ) from azure.ai.agentserver.core.models.projects import ResponseErrorEvent, ResponseFailedEvent -from azure.ai.agentserver.core.tools import OAuthConsentRequiredError +from azure.ai.agentserver.core.tools import OAuthConsentRequiredError # pylint: disable=import-error from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter from .models.human_in_the_loop_helper import HumanInTheLoopHelper @@ -98,7 +98,7 @@ def init_tracing(self): logger.info("Observability setup completed with provided exporters.") elif project_endpoint: self._setup_tracing_with_azure_ai_client(project_endpoint) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught logger.warning(f"Failed to initialize tracing: {e}", exc_info=True) self.tracer = trace.get_tracer(__name__) @@ -107,21 +107,21 @@ def _create_application_insights_exporter(self, connection_string): from azure.monitor.opentelemetry.exporter import AzureMonitorTraceExporter return AzureMonitorTraceExporter.from_connection_string(connection_string) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught logger.error(f"Failed to create Application Insights exporter: {e}", exc_info=True) return None def _create_otlp_exporter(self, endpoint, protocol=None): try: if protocol and protocol.lower() in ("http", "http/protobuf", "http/json"): - from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter - - return OTLPSpanExporter(endpoint=endpoint) - else: - from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter + from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter return OTLPSpanExporter(endpoint=endpoint) - except Exception as e: + + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter + + return OTLPSpanExporter(endpoint=endpoint) + except Exception as e: # pylint: disable=broad-exception-caught logger.error(f"Failed to create OTLP exporter: {e}", exc_info=True) return None @@ -182,8 +182,12 @@ async def agent_run( # pylint: disable=too-many-statements AsyncGenerator[ResponseStreamEvent, Any], ]: raise NotImplementedError("This method is implemented in the base class.") - - async def _load_agent_thread(self, context: AgentRunContext, agent: Union[AgentProtocol, WorkflowAgent]) -> Optional[AgentThread]: + + async def _load_agent_thread( + self, + context: AgentRunContext, + agent: Union[AgentProtocol, WorkflowAgent], + ) -> Optional[AgentThread]: """Load the agent thread for a given conversation ID. :param context: The agent run context. @@ -209,6 +213,9 @@ async def _save_agent_thread(self, context: AgentRunContext, agent_thread: Agent :type context: AgentRunContext :param agent_thread: The agent thread to save. :type agent_thread: AgentThread + + :return: None + :rtype: None """ if agent_thread and self._thread_repository: await self._thread_repository.set(context.conversation_id, agent_thread) @@ -216,14 +223,28 @@ async def _save_agent_thread(self, context: AgentRunContext, agent_thread: Agent def _run_streaming_updates( self, - *, context: AgentRunContext, run_stream: Callable[[], AsyncGenerator[Any, None]], agent_thread: Optional[AgentThread] = None, ) -> AsyncGenerator[ResponseStreamEvent, Any]: - """Execute a streaming run with shared OAuth/error handling.""" + """ + Execute a streaming run with shared OAuth/error handling. + + :param context: The agent run context. + :type context: AgentRunContext + :param run_stream: A callable that invokes the agent in stream mode + :type run_stream: Callable[[], AsyncGenerator[Any, None]] + :param agent_thread: The agent thread to use during streaming updates. + :type agent_thread: Optional[AgentThread] + + :return: An async generator yielding streaming events. + :rtype: AsyncGenerator[ResponseStreamEvent, Any] + """ logger.info("Running agent in streaming mode") - streaming_converter = AgentFrameworkOutputStreamingConverter(context, hitl_helper=self._hitl_helper) + streaming_converter = AgentFrameworkOutputStreamingConverter( + context, + hitl_helper=self._hitl_helper, + ) async def stream_updates(): try: @@ -250,7 +271,9 @@ async def stream_updates(): ) yield ResponseFailedEvent( sequence_number=streaming_converter.next_sequence(), - response=streaming_converter._build_response(status="failed"), # pylint: disable=protected-access + response=streaming_converter._build_response( # pylint: disable=protected-access + status="failed" + ), ) except Exception as e: # pylint: disable=broad-exception-caught logger.error("Unhandled exception during streaming updates: %s", e, exc_info=True) @@ -262,10 +285,12 @@ async def stream_updates(): ) yield ResponseFailedEvent( sequence_number=streaming_converter.next_sequence(), - response=streaming_converter._build_response(status="failed"), # pylint: disable=protected-access + response=streaming_converter._build_response( # pylint: disable=protected-access + status="failed" + ), ) finally: # No request-scoped resources to clean up today, but keep hook for future use. pass - return stream_updates() \ No newline at end of file + return stream_updates() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py index 6105470dbdc9..622fb2762e7b 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# pylint: disable=no-name-in-module,import-error from __future__ import annotations from typing import Any, AsyncGenerator, Optional, Union @@ -40,7 +41,7 @@ async def agent_run( # pylint: disable=too-many-statements AsyncGenerator[ResponseStreamEvent, Any], ]: try: - logger.info(f"Starting AIAgent agent_run with stream={context.stream}") + logger.info("Starting AIAgent agent_run with stream=%s", context.stream) request_input = context.request.get("input") agent_thread = await self._load_agent_thread(context, self._agent) @@ -49,8 +50,7 @@ async def agent_run( # pylint: disable=too-many-statements message = await input_converter.transform_input( request_input, agent_thread=agent_thread) - logger.debug(f"Transformed input message type: {type(message)}") - + logger.debug("Transformed input message type: %s", type(message)) # Use split converters if context.stream: return self._run_streaming_updates( @@ -67,7 +67,7 @@ async def agent_run( # pylint: disable=too-many-statements result = await self.agent.run( message, thread=agent_thread) - logger.debug(f"Agent run completed, result type: {type(result)}") + logger.debug("Agent run completed, result type: %s", type(result)) await self._save_agent_thread(context, agent_thread) non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context, hitl_helper=self._hitl_helper) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py index 875c1de24e8c..78d8108ed96c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py @@ -1,6 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# pylint: disable=client-accepts-api-version-keyword,missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs +# pylint: disable=no-name-in-module,import-error from __future__ import annotations import inspect @@ -24,7 +26,7 @@ def _attach_signature_from_pydantic_model(func, input_model) -> None: ann = field.annotation or Any annotations[name] = ann - default = inspect._empty if field.is_required() else field.default + default = inspect._empty if field.is_required() else field.default # pylint: disable=protected-access params.append( inspect.Parameter( name=name, @@ -50,7 +52,7 @@ async def list_tools(self) -> List[AIFunction]: foundry_tool_catalog = server_context.tools.catalog resolved_tools = await foundry_tool_catalog.list(self._allowed_tools) return [self._to_aifunction(tool) for tool in resolved_tools] - + def _to_aifunction(self, foundry_tool: "ResolvedFoundryTool") -> AIFunction: """Convert an FoundryTool to an Agent Framework AI Function diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index 84058978c521..b1c2836b6921 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b7" +VERSION = "1.0.0b9" diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py index 92667eb7cbcc..fb40cb453124 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py @@ -1,18 +1,25 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Optional, Protocol, Union, List +# pylint: disable=no-name-in-module,import-error +from typing import ( + Any, + AsyncGenerator, + Callable, + Optional, + Union, +) from agent_framework import Workflow, CheckpointStorage, WorkflowAgent, WorkflowCheckpoint from agent_framework._workflows import get_checkpoint_summary -from azure.ai.agentserver.core.tools import OAuthConsentRequiredError from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import ( Response as OpenAIResponse, ResponseStreamEvent, ) +from azure.ai.agentserver.core.tools import OAuthConsentRequiredError from ._agent_framework import AgentFrameworkAgent from .models.agent_framework_input_converters import AgentFrameworkInputConverter @@ -46,7 +53,7 @@ async def agent_run( # pylint: disable=too-many-statements try: agent = self._build_agent() - logger.info(f"Starting WorkflowAgent agent_run with stream={context.stream}") + logger.info("Starting WorkflowAgent agent_run with stream=%s", context.stream) request_input = context.request.get("input") agent_thread = await self._load_agent_thread(context, agent) @@ -59,18 +66,21 @@ async def agent_run( # pylint: disable=too-many-statements if selected_checkpoint: summary = get_checkpoint_summary(selected_checkpoint) if summary.status == "completed": - logger.warning(f"Selected checkpoint {selected_checkpoint.checkpoint_id} is completed. Will not resume from it.") + logger.warning( + "Selected checkpoint %s is completed. Will not resume from it.", + selected_checkpoint.checkpoint_id, + ) selected_checkpoint = None # Do not resume from completed checkpoints else: await self._load_checkpoint(agent, selected_checkpoint, checkpoint_storage) - logger.info(f"Loaded checkpoint with ID: {selected_checkpoint.checkpoint_id}") + logger.info("Loaded checkpoint with ID: %s", selected_checkpoint.checkpoint_id) input_converter = AgentFrameworkInputConverter(hitl_helper=self._hitl_helper) message = await input_converter.transform_input( request_input, agent_thread=agent_thread, checkpoint=selected_checkpoint) - logger.debug(f"Transformed input message type: {type(message)}") + logger.debug("Transformed input message type: %s", type(message)) # Use split converters if context.stream: @@ -90,7 +100,7 @@ async def agent_run( # pylint: disable=too-many-statements message, thread=agent_thread, checkpoint_storage=checkpoint_storage) - logger.debug(f"WorkflowAgent run completed, result type: {type(result)}") + logger.debug("WorkflowAgent run completed, result type: %s", type(result)) await self._save_agent_thread(context, agent_thread) @@ -130,13 +140,21 @@ async def _get_latest_checkpoint(self, return latest_checkpoint return None - async def _load_checkpoint(self, agent: WorkflowAgent, + async def _load_checkpoint(self, + agent: WorkflowAgent, checkpoint: WorkflowCheckpoint, checkpoint_storage: CheckpointStorage) -> None: """Load the checkpoint data from the given WorkflowCheckpoint. + :param agent: The WorkflowAgent to load the checkpoint into. + :type agent: WorkflowAgent :param checkpoint: The WorkflowCheckpoint to load data from. :type checkpoint: WorkflowCheckpoint + :param checkpoint_storage: The storage to load the checkpoint from. + :type checkpoint_storage: CheckpointStorage + + :return: None + :rtype: None """ await agent.run(checkpoint_id=checkpoint.checkpoint_id, - checkpoint_storage=checkpoint_storage) \ No newline at end of file + checkpoint_storage=checkpoint_storage) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py index a6eefb2b1740..9ba678e7bebf 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py @@ -43,21 +43,21 @@ async def transform_input( if isinstance(input, str): return input - + if self._hitl_helper: # load pending requests from checkpoint and thread messages if available thread_messages = [] if agent_thread and agent_thread.message_store: thread_messages = await agent_thread.message_store.list_messages() pending_hitl_requests = self._hitl_helper.get_pending_hitl_request(thread_messages, checkpoint) - logger.info(f"Pending HitL requests: {list(pending_hitl_requests.keys())}") - hitl_response = self._hitl_helper.validate_and_convert_hitl_response( - input, - pending_requests=pending_hitl_requests) - logger.info(f"HitL response validation result: {[m.to_dict() for m in hitl_response]}") - if hitl_response: - return hitl_response - + if pending_hitl_requests: + logger.info("Pending HitL requests: %s", list(pending_hitl_requests.keys())) + hitl_response = self._hitl_helper.validate_and_convert_hitl_response( + input, + pending_requests=pending_hitl_requests) + if hitl_response: + return hitl_response + return self._transform_input_internal(input) def _transform_input_internal( @@ -163,7 +163,7 @@ def _validate_and_convert_hitl_response( if not isinstance(input, list) or len(input) != 1: logger.warning("Expected single-item list input for HitL response validation.") return None - + item = input[0] if item.get("type") != "function_call_output": logger.warning("Expected function_call_output type for HitL response validation.") @@ -178,5 +178,5 @@ def _validate_and_convert_hitl_response( if not isinstance(request_info, RequestInfoEvent): logger.warning("No valid pending request info found for call_id: %s", call_id) return None - + return self._hitl_helper.convert_response(request_info, item) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index 95c7bb7acc6b..4984b2fc0423 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -19,12 +19,7 @@ from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ( - AgentId, - CreatedBy, - ItemContentOutputText, - ResponsesAssistantMessageItemResource, -) +from azure.ai.agentserver.core.models.projects import ItemContentOutputText from .agent_id_generator import AgentIdGenerator from .constants import Constants @@ -53,13 +48,13 @@ def _build_item_content_output_text(self, text: str) -> ItemContentOutputText: def _build_created_by(self, author_name: str) -> dict: self._ensure_response_started() - + agent_dict = { "type": "agent_id", "name": author_name or "", "version": "", # Default to empty string } - + return { "agent": agent_dict, "response_id": self._response_id, @@ -189,7 +184,12 @@ def _append_function_call_content(self, content: FunctionCallContent, sink: List len(arguments or ""), ) - def _append_function_result_content(self, content: FunctionResultContent, sink: List[dict], author_name: str) -> None: + def _append_function_result_content( + self, + content: FunctionResultContent, + sink: List[dict], + author_name: str, + ) -> None: # Coerce the function result into a simple display string. result = [] raw = getattr(content, "result", None) @@ -211,13 +211,19 @@ def _append_function_result_content(self, content: FunctionResultContent, sink: } ) logger.debug( - "added function_call_output item id=%s call_id=%s output_len=%d", + "added function_call_output item id=%s call_id=%s " + "output_len=%d", func_out_id, call_id, len(result), ) - - def _append_user_input_request_contents(self, content: UserInputRequestContents, sink: List[dict], author_name: str) -> None: + + def _append_user_input_request_contents( + self, + content: UserInputRequestContents, + sink: List[dict], + author_name: str, + ) -> None: item_id = self._context.id_generator.generate_function_call_id() content = self._hitl_helper.convert_user_input_request_content(content) sink.append( @@ -231,7 +237,11 @@ def _append_user_input_request_contents(self, content: UserInputRequestContents, "created_by": self._build_created_by(author_name), } ) - logger.debug(" added user_input_request item id=%s call_id=%s", item_id, content["call_id"]) + logger.debug( + " added user_input_request item id=%s call_id=%s", + item_id, + content["call_id"], + ) # ------------- simple normalization helper ------------------------- def _coerce_result_text(self, value: Any) -> str | dict: diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 253b0fc7aa9e..805f3fc79ead 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -1,16 +1,19 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=attribute-defined-outside-init,protected-access +# pylint: disable=attribute-defined-outside-init,protected-access,unnecessary-lambda-assignment # mypy: disable-error-code="call-overload,assignment,arg-type,override" from __future__ import annotations -from ast import arguments import datetime import json from typing import Any, AsyncIterable, List, Union -from agent_framework import AgentRunResponseUpdate, BaseContent, FunctionApprovalRequestContent, FunctionResultContent +from agent_framework import ( + AgentRunResponseUpdate, + BaseContent, + FunctionResultContent, +) from agent_framework._types import ( ErrorContent, FunctionCallContent, @@ -24,8 +27,6 @@ ResponseStreamEvent, ) from azure.ai.agentserver.core.models.projects import ( - AgentId, - CreatedBy, FunctionToolCallItemResource, FunctionToolCallOutputItemResource, ItemContentOutputText, @@ -52,7 +53,12 @@ class _BaseStreamingState: """Base interface for streaming state handlers.""" - async def convert_contents(self, contents: AsyncIterable[BaseContent], author_name: str) -> AsyncIterable[ResponseStreamEvent]: # pylint: disable=unused-argument + async def convert_contents( + self, + contents: AsyncIterable[BaseContent], + author_name: str, + ) -> AsyncIterable[ResponseStreamEvent]: + # pylint: disable=unused-argument raise NotImplementedError @@ -62,7 +68,11 @@ class _TextContentStreamingState(_BaseStreamingState): def __init__(self, parent: AgentFrameworkOutputStreamingConverter): self._parent = parent - async def convert_contents(self, contents: AsyncIterable[TextContent], author_name: str) -> AsyncIterable[ResponseStreamEvent]: + async def convert_contents( + self, + contents: AsyncIterable[TextContent], + author_name: str, + ) -> AsyncIterable[ResponseStreamEvent]: item_id = self._parent.context.id_generator.generate_message_id() output_index = self._parent.next_output_index() @@ -116,8 +126,8 @@ async def convert_contents(self, contents: AsyncIterable[TextContent], author_na ) item = ResponsesAssistantMessageItemResource( - id=item_id, - status="completed", + id=item_id, + status="completed", content=[content_part], created_by=self._parent._build_created_by(author_name), ) @@ -209,7 +219,7 @@ async def convert_contents( ) self._parent.add_completed_output_item(item) # pylint: disable=protected-access - + # process HITL contents after function calls for content in hitl_contents: item_id = self._parent.context.id_generator.generate_function_call_id() @@ -260,7 +270,7 @@ def _serialize_arguments(self, arguments: Any) -> str: return arguments try: return json.dumps(arguments) - except Exception as e: + except Exception: # pylint: disable=broad-exception-caught return str(arguments) @@ -363,7 +373,7 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async is_changed = ( lambda a, b: a is not None \ and b is not None \ - and a.message_id != b.message_id # pylint: disable=unnecessary-lambda-assignment + and a.message_id != b.message_id ) async for group in chunk_on_change(updates, is_changed): @@ -381,15 +391,19 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async elif isinstance(first, FunctionResultContent): state = _FunctionCallOutputStreamingState(self) elif isinstance(first, ErrorContent): - raise ValueError(f"ErrorContent received: code={first.error_code}, message={first.message}") + error_msg = ( + f"ErrorContent received: code={first.error_code}, " + f"message={first.message}" + ) + raise ValueError(error_msg) if not state: continue # Extract just the content from (content, author_name) tuples using async generator async def extract_contents(): - async for content, _ in contents_with_author: + async for content, _ in contents_with_author: # pylint: disable=cell-var-from-loop yield content - + async for content in state.convert_contents(extract_contents(), author_name): yield content @@ -404,13 +418,16 @@ def _build_created_by(self, author_name: str) -> dict: "name": author_name or "", "version": "", } - + return { "agent": agent_dict, "response_id": self._response_id, } - async def _read_updates(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> AsyncIterable[tuple[BaseContent, str]]: + async def _read_updates( + self, + updates: AsyncIterable[AgentRunResponseUpdate], + ) -> AsyncIterable[tuple[BaseContent, str]]: async for update in updates: if not update.contents: continue diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py index 30bb3aa8d9c5..4b3dce2c1bdb 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py @@ -1,3 +1,6 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- from typing import Any, List, Dict, Optional, Union import json @@ -30,10 +33,10 @@ def get_pending_hitl_request(self, request_obj = RequestInfoEvent.from_dict(request) res[call_id] = request_obj return res - + if not thread_messages: return res - + # if no checkpoint (Agent), find user input request and pair the feedbacks for message in thread_messages: for content in message.contents: @@ -59,7 +62,7 @@ def get_pending_hitl_request(self, if call_id and call_id in res: res.pop(call_id) return res - + def convert_user_input_request_content(self, content: UserInputRequestContents) -> dict: function_call = content.function_call call_id = getattr(function_call, "call_id", "") @@ -69,7 +72,7 @@ def convert_user_input_request_content(self, content: UserInputRequestContents) "name": HUMAN_IN_THE_LOOP_FUNCTION_NAME, "arguments": arguments or "", } - + def convert_request_arguments(self, arguments: Any) -> str: # convert data to payload if possible if isinstance(arguments, dict): @@ -85,16 +88,16 @@ def convert_request_arguments(self, arguments: Any) -> str: except Exception: # pragma: no cover - fallback # pylint: disable=broad-exception-caught arguments = str(arguments) return arguments - + def validate_and_convert_hitl_response(self, - input: str | List[Dict] | None, + input: Union[str, List[Dict], None], pending_requests: Dict[str, RequestInfoEvent], - ) -> List[ChatMessage] | None: + ) -> Optional[List[ChatMessage]]: if input is None or isinstance(input, str): logger.warning("Expected list input for HitL response validation, got str.") return None - + res = [] for item in input: if item.get("type") != "function_call_output": @@ -116,4 +119,4 @@ def convert_response(self, hitl_request: RequestInfoEvent, input: Dict) -> ChatM call_id=hitl_request.request_id, result=response_result, ) - return ChatMessage(role="tool", contents=[response_content]) \ No newline at end of file + return ChatMessage(role="tool", contents=[response_content]) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py index 40ce839556bd..cf07cb449d00 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py @@ -18,4 +18,4 @@ "CheckpointRepository", "InMemoryCheckpointRepository", "FileCheckpointRepository", -] \ No newline at end of file +] diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py index 294d7d0948fc..66528ff96213 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py @@ -1,3 +1,6 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- from abc import ABC, abstractmethod import json import os @@ -10,7 +13,11 @@ class AgentThreadRepository(ABC): """AgentThread repository to manage saved thread messages of agent threads and workflows.""" @abstractmethod - async def get(self, conversation_id: str, agent: Optional[Union[AgentProtocol, WorkflowAgent]]=None) -> Optional[AgentThread]: + async def get( + self, + conversation_id: str, + agent: Optional[Union[AgentProtocol, WorkflowAgent]] = None, + ) -> Optional[AgentThread]: """Retrieve the savedt thread for a given conversation ID. :param conversation_id: The conversation ID. @@ -22,7 +29,7 @@ async def get(self, conversation_id: str, agent: Optional[Union[AgentProtocol, W :rtype: Optional[AgentThread] """ - @abstractmethod + @abstractmethod async def set(self, conversation_id: str, thread: AgentThread) -> None: """Save the thread for a given conversation ID. @@ -38,7 +45,11 @@ class InMemoryAgentThreadRepository(AgentThreadRepository): def __init__(self) -> None: self._inventory: dict[str, AgentThread] = {} - async def get(self, conversation_id: str, agent: Optional[Union[AgentProtocol, WorkflowAgent]]=None) -> Optional[AgentThread]: + async def get( + self, + conversation_id: str, + agent: Optional[Union[AgentProtocol, WorkflowAgent]] = None, + ) -> Optional[AgentThread]: """Retrieve the saved thread for a given conversation ID. :param conversation_id: The conversation ID. @@ -69,13 +80,17 @@ class SerializedAgentThreadRepository(AgentThreadRepository): def __init__(self, agent: AgentProtocol) -> None: """ Initialize the repository with the given agent. - + :param agent: The agent instance. :type agent: AgentProtocol """ self._agent = agent - async def get(self, conversation_id: str, agent: Optional[Union[AgentProtocol, WorkflowAgent]]=None) -> Optional[AgentThread]: + async def get( + self, + conversation_id: str, + agent: Optional[Union[AgentProtocol, WorkflowAgent]] = None, + ) -> Optional[AgentThread]: """Retrieve the saved thread for a given conversation ID. :param conversation_id: The conversation ID. @@ -115,7 +130,7 @@ async def read_from_storage(self, conversation_id: str) -> Optional[Any]: :rtype: Optional[Any] """ raise NotImplementedError("read_from_storage is not implemented.") - + async def write_to_storage(self, conversation_id: str, serialized_thread: Any) -> None: """Write the serialized thread to storage. @@ -123,9 +138,11 @@ async def write_to_storage(self, conversation_id: str, serialized_thread: Any) - :type conversation_id: str :param serialized_thread: The serialized thread to save. :type serialized_thread: Any + :return: None + :rtype: None """ raise NotImplementedError("write_to_storage is not implemented.") - + class JsonLocalFileAgentThreadRepository(SerializedAgentThreadRepository): """Json based implementation of AgentThreadRepository using local file storage.""" @@ -150,4 +167,4 @@ async def write_to_storage(self, conversation_id: str, serialized_thread: Any) - f.write(serialized_str) def _get_file_path(self, conversation_id: str) -> str: - return os.path.join(self._storage_path, f"{conversation_id}.json") \ No newline at end of file + return os.path.join(self._storage_path, f"{conversation_id}.json") diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py index 0bc89a4b5377..471d3a2f7f84 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py @@ -1,6 +1,9 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- from abc import ABC, abstractmethod import os -from typing import Any, Optional +from typing import Optional from agent_framework import ( CheckpointStorage, @@ -57,6 +60,6 @@ async def get_or_create(self, conversation_id: str) -> Optional[CheckpointStorag if conversation_id not in self._inventory: self._inventory[conversation_id] = FileCheckpointStorage(self._get_dir_path(conversation_id)) return self._inventory[conversation_id] - + def _get_dir_path(self, conversation_id: str) -> str: - return os.path.join(self._storage_path, conversation_id) \ No newline at end of file + return os.path.join(self._storage_path, conversation_id) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 47ffdce2c23e..7cbbfd0edf6a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b7", + "azure-ai-agentserver-core==1.0.0b9", "agent-framework-azure-ai>=1.0.0b251112,<=1.0.0b260107", "agent-framework-core>=1.0.0b251112,<=1.0.0b260107", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index 84c4a76a27e5..b05d70708716 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,6 +1,18 @@ # Release History +## 1.0.0b9 (2026-01-23) + +- Integrated with Foundry Tools + + +## 1.0.0b8 (2026-01-21) + +### Features Added + +- Support keep alive for long-running streaming responses. + + ## 1.0.0b7 (2025-12-05) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index 84058978c521..b1c2836b6921 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b7" +VERSION = "1.0.0b9" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 53eb15af3550..2464179a119f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -13,6 +13,9 @@ class AgentRunContext: + """ + :meta private: + """ def __init__(self, payload: dict, **kwargs: Any) -> None: self._raw_payload = payload self._request = _deserialize_create_response(payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py index 030fbe26b5e7..a998de7f9597 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py @@ -43,12 +43,13 @@ class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: di This client provides access to tools from both MCP (Model Context Protocol) servers and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. - :param str endpoint: - The fully qualified endpoint for the Azure AI Agents service. - Example: "https://.api.azureml.ms" + :param endpoint: + The fully qualified endpoint for the Azure AI Agents service. + Example: "https://.api.azureml.ms" + :type endpoint: str :param credential: - Credential for authenticating requests to the service. - Use credentials from azure-identity like DefaultAzureCredential. + Credential for authenticating requests to the service. + Use credentials from azure-identity like DefaultAzureCredential. :type credential: ~azure.core.credentials.TokenCredential :param api_version: The API version to use for this operation. :type api_version: str or None @@ -87,18 +88,21 @@ async def list_tools( Retrieves tools from both MCP servers and Azure AI Tools API endpoints, returning them as ResolvedFoundryTool instances ready for invocation. + :param tools: Collection of FoundryTool instances to resolve. :type tools: Collection[~FoundryTool] :param user: Information about the user requesting the tools. :type user: Optional[UserInfo] :param agent_name: Name of the agent requesting the tools. :type agent_name: str + :return: List of resolved Foundry tools. :rtype: List[ResolvedFoundryTool] :raises ~azure.ai.agentserver.core.tools._exceptions.OAuthConsentRequiredError: - Raised when the service requires user OAuth consent. + Raised when the service requires user OAuth consent. :raises ~azure.core.exceptions.HttpResponseError: - Raised for HTTP communication failures. + Raised for HTTP communication failures. + """ _ = kwargs # Reserved for future use resolved_tools: List[ResolvedFoundryTool] = [] @@ -119,18 +123,21 @@ async def list_tools_details( Retrieves tools from both MCP servers and Azure AI Tools API endpoints, returning them as ResolvedFoundryTool instances ready for invocation. + :param tools: Collection of FoundryTool instances to resolve. :type tools: Collection[~FoundryTool] :param user: Information about the user requesting the tools. :type user: Optional[UserInfo] :param agent_name: Name of the agent requesting the tools. :type agent_name: str + :return: Mapping of tool IDs to lists of FoundryToolDetails. :rtype: Mapping[str, List[FoundryToolDetails]] :raises ~azure.ai.agentserver.core.tools._exceptions.OAuthConsentRequiredError: - Raised when the service requires user OAuth consent. + Raised when the service requires user OAuth consent. :raises ~azure.core.exceptions.HttpResponseError: - Raised for HTTP communication failures. + Raised for HTTP communication failures. + """ _ = kwargs # Reserved for future use resolved_tools: Dict[str, List[FoundryToolDetails]] = defaultdict(list) @@ -187,6 +194,7 @@ async def invoke_tool( Raised for HTTP communication failures. :raises ~ToolInvocationError: Raised when the tool invocation fails or source is not supported. + """ _ = kwargs # Reserved for future use if tool.source is FoundryToolSource.HOSTED_MCP: diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py index f60fb63f2cdc..80b25d78b20e 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py @@ -33,11 +33,12 @@ def install(cls, :param app: The Starlette application to install the middleware into. :type app: Starlette :param user_context: Optional context variable to use for storing user info. - If not provided, a default context variable will be used. - :type user_context: Optional[ContextVar[Optional[UserInfo]]] + If not provided, a default context variable will be used. + :type user_context: Optional[ContextVar[Optional[UserInfo]]] :param user_resolver: Optional function to resolve user info from the request. - If not provided, a default resolver will be used. + If not provided, a default resolver will be used. :type user_resolver: Optional[Callable[[Request], Awaitable[Optional[UserInfo]]]] + """ app.add_middleware(UserInfoContextMiddleware, # type: ignore[arg-type] user_info_var=user_context or ContextVarUserProvider.default_user_info_context, diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.application.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.application.rst new file mode 100644 index 000000000000..415b7d3b2538 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.application.rst @@ -0,0 +1,7 @@ +azure.ai.agentserver.core.application package +============================================= + +.. automodule:: azure.ai.agentserver.core.application + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst new file mode 100644 index 000000000000..dd1cce6eecca --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst @@ -0,0 +1,8 @@ +azure.ai.agentserver.core.models.openai package +=============================================== + +.. automodule:: azure.ai.agentserver.core.models.openai + :inherited-members: + :members: + :undoc-members: + :ignore-module-all: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst new file mode 100644 index 000000000000..38e0be4f331b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst @@ -0,0 +1,8 @@ +azure.ai.agentserver.core.models.projects package +================================================= + +.. automodule:: azure.ai.agentserver.core.models.projects + :inherited-members: + :members: + :undoc-members: + :ignore-module-all: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst new file mode 100644 index 000000000000..008b280c64de --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst @@ -0,0 +1,17 @@ +azure.ai.agentserver.core.models package +======================================== + +.. automodule:: azure.ai.agentserver.core.models + :inherited-members: + :members: + :undoc-members: + :ignore-module-all: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + azure.ai.agentserver.core.models.openai + azure.ai.agentserver.core.models.projects diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst index da01b083b0b3..b8f1dadf3a73 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst @@ -12,7 +12,11 @@ Subpackages .. toctree:: :maxdepth: 4 + azure.ai.agentserver.core.application + azure.ai.agentserver.core.models azure.ai.agentserver.core.server + azure.ai.agentserver.core.tools + azure.ai.agentserver.core.utils Submodules ---------- diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst index 01e54afab103..8fb5b52e4465 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst @@ -24,4 +24,11 @@ azure.ai.agentserver.core.server.common.agent\_run\_context module :inherited-members: :members: :undoc-members: - :no-index: + +azure.ai.agentserver.core.server.common.constants module +-------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.core.server.common.constants + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst new file mode 100644 index 000000000000..8182914f69f9 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst @@ -0,0 +1,7 @@ +azure.ai.agentserver.core.tools.client package +============================================== + +.. automodule:: azure.ai.agentserver.core.tools.client + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst new file mode 100644 index 000000000000..c112ec2beabd --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst @@ -0,0 +1,18 @@ +azure.ai.agentserver.core.tools package +======================================= + +.. automodule:: azure.ai.agentserver.core.tools + :inherited-members: + :members: + :undoc-members: + :exclude-members: BaseModel,model_json_schema + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + azure.ai.agentserver.core.tools.client + azure.ai.agentserver.core.tools.runtime + azure.ai.agentserver.core.tools.utils diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.runtime.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.runtime.rst new file mode 100644 index 000000000000..c502d56b42f6 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.runtime.rst @@ -0,0 +1,7 @@ +azure.ai.agentserver.core.tools.runtime package +=============================================== + +.. automodule:: azure.ai.agentserver.core.tools.runtime + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.utils.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.utils.rst new file mode 100644 index 000000000000..94d3f310e112 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.utils.rst @@ -0,0 +1,7 @@ +azure.ai.agentserver.core.tools.utils package +============================================= + +.. automodule:: azure.ai.agentserver.core.tools.utils + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.utils.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.utils.rst new file mode 100644 index 000000000000..5250167cf7e6 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.utils.rst @@ -0,0 +1,7 @@ +azure.ai.agentserver.core.utils package +======================================= + +.. automodule:: azure.ai.agentserver.core.utils + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index e53b8f5474b7..afb5e6797396 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -30,7 +30,7 @@ dependencies = [ "starlette>=0.45.0", "uvicorn>=0.31.0", "aiohttp>=3.13.0", # used by azure-identity aio - "cachetools" + "cachetools>=6.0.0" ] [build-system] diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py index 3d4187a188f2..f6d2c08bb0b9 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py @@ -97,7 +97,7 @@ async def agent_run(context: AgentRunContext): return response -my_agent = FoundryCBAgent() +my_agent = FoundryCBAgent(project_endpoint="mock-endpoint") my_agent.agent_run = agent_run if __name__ == "__main__": diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index abea93ee106a..43641a3de515 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,6 +1,22 @@ # Release History +## 1.0.0b9 (2026-01-23) + +- Integrated with Foundry Tools + +- Support Human-in-the-Loop + +- Added Response API converters for request conversion and orchestration. + + +## 1.0.0b8 (2026-01-21) + +### Features Added + +- Support keep alive for long-running streaming responses. + + ## 1.0.0b7 (2025-12-05) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index 7fe934ae81c6..7fefa1b486d5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -6,6 +6,7 @@ from typing import Optional, TYPE_CHECKING from azure.ai.agentserver.core.application import PackageMetadata, set_current_app + from ._context import LanggraphRunContext from ._version import VERSION from .langgraph import LangGraphAdapter diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index 84058978c521..b1c2836b6921 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b7" +VERSION = "1.0.0b9" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 6827cced1902..e8e524764db2 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -12,8 +12,8 @@ from azure.ai.agentserver.core.constants import Constants from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.server.base import FoundryCBAgent -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext -from azure.ai.agentserver.core.tools import OAuthConsentRequiredError +from azure.ai.agentserver.core import AgentRunContext +from azure.ai.agentserver.core.tools import OAuthConsentRequiredError # pylint:disable=import-error,no-name-in-module from ._context import LanggraphRunContext from .models.response_api_converter import GraphInputArguments, ResponseAPIConverter from .models.response_api_default_converter import ResponseAPIDefaultConverter @@ -68,7 +68,7 @@ async def agent_run(self, context: AgentRunContext): try: lg_run_context = await self.setup_lg_run_context(context) input_arguments = await self.converter.convert_request(lg_run_context) - self.ensure_runnable_config(input_arguments, lg_run_context) + self.ensure_runnable_config(input_arguments) if not context.stream: response = await self.agent_run_non_stream(input_arguments) @@ -156,19 +156,17 @@ async def agent_run_astream(self, logger.error(f"Error during streaming agent run: {e}", exc_info=True) raise e - def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: LanggraphRunContext): + def ensure_runnable_config(self, input_arguments: GraphInputArguments): """ Ensure the RunnableConfig is set in the input arguments. :param input_arguments: The input arguments for the agent run. :type input_arguments: GraphInputArguments - :param context: The Langgraph run context. - :type context: LanggraphRunContext """ config = input_arguments.get("config", {}) configurable = config.get("configurable", {}) - config["configurable"] = configurable configurable["thread_id"] = input_arguments["context"].agent_run.conversation_id + config["configurable"] = configurable callbacks = config.get("callbacks", []) if self.azure_ai_tracer and self.azure_ai_tracer not in callbacks: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py index d1c5531993a1..32cbf93a4bfb 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py @@ -61,14 +61,16 @@ async def convert_request(self, context: LanggraphRunContext) -> GraphInputArgum """ @abstractmethod - async def convert_response_non_stream(self, output: Any, context: LanggraphRunContext) -> Response: + async def convert_response_non_stream( + self, output: Union[dict[str, Any], Any], context: LanggraphRunContext + ) -> Response: """Convert the completed LangGraph state into a final non-streaming Response object. This is a convenience wrapper around state_to_response that retrieves the current state snapshot asynchronously. :param output: The LangGraph output to convert. - :type output: Any + :type output: Union[dict[str, Any], Any], :param context: The context for the agent run. :type context: LanggraphRunContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py index 9bc237c87cf1..cfe5229e3634 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py @@ -56,7 +56,8 @@ async def convert_request(self, context: LanggraphRunContext) -> GraphInputArgum context=context, ) - async def convert_response_non_stream(self, output: Any, context: LanggraphRunContext) -> Response: + async def convert_response_non_stream( + self, output: Union[dict[str, Any], Any], context: LanggraphRunContext) -> Response: agent_run_context = context.agent_run converter = self._create_non_stream_response_converter(context) converted_output = converter.convert(output) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py index c776fad3dcad..7ec8bdf14f1a 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py @@ -5,7 +5,7 @@ # mypy: disable-error-code="valid-type,call-overload,attr-defined" import copy from abc import ABC, abstractmethod -from typing import Any, Collection, Iterable, List +from typing import Any, Collection, Iterable, List, Union from langchain_core import messages from langchain_core.messages import AnyMessage @@ -51,11 +51,15 @@ def __init__(self, self.context = context self.hitl_helper = hitl_helper - def convert(self, output: dict[str, Any]) -> list[project_models.ItemResource]: + def convert(self, output: Union[dict[str, Any], Any]) -> list[project_models.ItemResource]: res: list[project_models.ItemResource] = [] - for node_name, node_output in output.items(): - node_results = self._convert_node_output(node_name, node_output) - res.extend(node_results) + if not isinstance(output, list): + logger.error(f"Expected output to be a list, got {type(output)}: {output}") + raise ValueError(f"Invalid output format. Expected a list, got {type(output)}.") + for step in output: + for node_name, node_output in step.items(): + node_results = self._convert_node_output(node_name, node_output) + res.extend(node_results) return res def _convert_node_output( @@ -63,19 +67,19 @@ def _convert_node_output( ) -> Iterable[project_models.ItemResource]: if node_name == INTERRUPT_NODE_NAME: yield from self.hitl_helper.convert_interrupts(node_output) - - message_arr = node_output.get("messages") - if not message_arr or not isinstance(message_arr, Collection): - logger.warning(f"No messages found in node {node_name} output: {node_output}") - return - - for message in message_arr: - try: - converted = self.convert_output_message(message) - if converted: - yield converted - except Exception as e: - logger.error(f"Error converting message {message}: {e}") + else: + message_arr = node_output.get("messages") + if not message_arr or not isinstance(message_arr, Collection): + logger.warning(f"No messages found in node {node_name} output: {node_output}") + return + + for message in message_arr: + try: + converted = self.convert_output_message(message) + if converted: + yield converted + except Exception as e: + logger.error(f"Error converting message {message}: {e}") def convert_output_message(self, output_message: AnyMessage): # pylint: disable=inconsistent-return-statements # Implement the conversion logic for inner inputs diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py index e511f5bbf915..c221910218f4 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py @@ -52,12 +52,13 @@ def tool_node_wrapper(self) -> FoundryToolNodeWrappers: """Get the Foundry tool call wrappers for this chat model. Example:: - >>> from langgraph.prebuilt import ToolNode - >>> foundry_tool_bound_chat_model = FoundryToolLateBindingChatModel(...) - >>> ToolNode([...], **foundry_tool_bound_chat_model.as_wrappers()) + >>> from langgraph.prebuilt import ToolNode + >>> foundry_tool_bound_chat_model = FoundryToolLateBindingChatModel(...) + >>> ToolNode([...], **foundry_tool_bound_chat_model.as_wrappers()) :return: The Foundry tool call wrappers. :rtype: FoundryToolNodeWrappers + """ return FoundryToolCallWrapper(self._foundry_tools_to_bind).as_wrappers() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py index e66e1c554ba1..5f3c6326836b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py @@ -18,9 +18,9 @@ class FoundryToolNodeWrappers(TypedDict): """A TypedDict for Foundry tool node wrappers. Example:: - >>> from langgraph.prebuilt import ToolNode - >>> call_wrapper = FoundryToolCallWrapper(...) - >>> ToolNode([...], **call_wrapper.as_wrappers()) + >>> from langgraph.prebuilt import ToolNode + >>> call_wrapper = FoundryToolCallWrapper(...) + >>> ToolNode([...], **call_wrapper.as_wrappers()) :param wrap_tool_call: The synchronous tool call wrapper. :type wrap_tool_call: ToolCallWrapper diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst new file mode 100644 index 000000000000..af7cc69bd859 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst @@ -0,0 +1,74 @@ +azure.ai.agentserver.langgraph.models.response\_event\_generators package +========================================================================= + +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators + :inherited-members: + :members: + :undoc-members: + +Submodules +---------- + +azure.ai.agentserver.langgraph.models.response\_event\_generators.item\_content\_helpers module +----------------------------------------------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.item_content_helpers + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_event\_generators.item\_resource\_helpers module +------------------------------------------------------------------------------------------------ + +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.item_resource_helpers + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_content\_part\_event\_generator module +------------------------------------------------------------------------------------------------------------------ + +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_content_part_event_generator + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_event\_generator module +--------------------------------------------------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_event_generator + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_function\_call\_argument\_event\_generator module +----------------------------------------------------------------------------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_function_call_argument_event_generator + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_output\_item\_event\_generator module +----------------------------------------------------------------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_output_item_event_generator + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_output\_text\_event\_generator module +----------------------------------------------------------------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_output_text_event_generator + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_stream\_event\_generator module +----------------------------------------------------------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_stream_event_generator + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst new file mode 100644 index 000000000000..aba857c3b64a --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst @@ -0,0 +1,82 @@ +azure.ai.agentserver.langgraph.models package +============================================= + +.. automodule:: azure.ai.agentserver.langgraph.models + :inherited-members: + :members: + :undoc-members: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + azure.ai.agentserver.langgraph.models.response_event_generators + +Submodules +---------- + +azure.ai.agentserver.langgraph.models.human\_in\_the\_loop\_helper module +------------------------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.human_in_the_loop_helper + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.human\_in\_the\_loop\_json\_helper module +------------------------------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.human_in_the_loop_json_helper + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_api\_converter module +--------------------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.response_api_converter + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_api\_default\_converter module +------------------------------------------------------------------------------ + +.. automodule:: azure.ai.agentserver.langgraph.models.response_api_default_converter + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_api\_non\_stream\_response\_converter module +-------------------------------------------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.response_api_non_stream_response_converter + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_api\_request\_converter module +------------------------------------------------------------------------------ + +.. automodule:: azure.ai.agentserver.langgraph.models.response_api_request_converter + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.response\_api\_stream\_response\_converter module +--------------------------------------------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.response_api_stream_response_converter + :inherited-members: + :members: + :undoc-members: + +azure.ai.agentserver.langgraph.models.utils module +-------------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.models.utils + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst new file mode 100644 index 000000000000..deefeb67fa96 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst @@ -0,0 +1,27 @@ +azure.ai.agentserver.langgraph package +====================================== + +.. automodule:: azure.ai.agentserver.langgraph + :inherited-members: + :members: + :undoc-members: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + azure.ai.agentserver.langgraph.models + azure.ai.agentserver.langgraph.tools + +Submodules +---------- + +azure.ai.agentserver.langgraph.langgraph module +----------------------------------------------- + +.. automodule:: azure.ai.agentserver.langgraph.langgraph + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.tools.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.tools.rst new file mode 100644 index 000000000000..17f7ef6d2ab7 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.tools.rst @@ -0,0 +1,6 @@ +azure.ai.agentserver.langgraph.tools package +============================================ + +.. automodule:: azure.ai.agentserver.langgraph.tools + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index b970062738ee..0e8a32a3afa2 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -19,7 +19,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b7", + "azure-ai-agentserver-core==1.0.0b9", "langchain>0.3.20", "langchain-openai>0.3.10", "langchain-azure-ai[opentelemetry]>=0.1.8", diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py index 84a8c8784d8b..b1894f7350d5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py @@ -3,7 +3,7 @@ from azure.ai.agentserver.core import models from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.langgraph import models as langgraph_models +from azure.ai.agentserver.langgraph.models.response_api_request_converter import ResponseAPIMessageRequestConverter @pytest.mark.unit @@ -16,7 +16,7 @@ def test_convert_implicit_user_message(): input=[implicit_user_message], ) - converter = langgraph_models.LangGraphRequestConverter(create_response) + converter = ResponseAPIMessageRequestConverter(create_response) res = converter.convert() assert "messages" in res @@ -34,7 +34,7 @@ def test_convert_implicit_user_message_with_contents(): ] create_response = models.CreateResponse(input=[{"content": input_data}]) - converter = langgraph_models.LangGraphRequestConverter(create_response) + converter = ResponseAPIMessageRequestConverter(create_response) res = converter.convert() assert "messages" in res @@ -61,7 +61,7 @@ def test_convert_item_param_message(): create_response = models.CreateResponse( input=input_data, ) - converter = langgraph_models.LangGraphRequestConverter(create_response) + converter = ResponseAPIMessageRequestConverter(create_response) res = converter.convert() assert "messages" in res @@ -103,7 +103,7 @@ def test_convert_item_param_function_call_and_function_call_output(): create_response = models.CreateResponse( input=input_data, ) - converter = langgraph_models.LangGraphRequestConverter(create_response) + converter = ResponseAPIMessageRequestConverter(create_response) res = converter.convert() assert "messages" in res assert len(res["messages"]) == len(input_data) From 351b677f93d8968a6d96e59fe5555a45a8d55229 Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Thu, 22 Jan 2026 23:47:58 -0800 Subject: [PATCH 56/94] [AgentServer] update langgraph dependency and readme (#44819) * update langgraph dependency * update readme --- .../azure-ai-agentserver-agentframework/README.md | 2 +- sdk/agentserver/azure-ai-agentserver-langgraph/README.md | 6 ------ .../azure-ai-agentserver-langgraph/pyproject.toml | 6 +++--- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/README.md index 54d80aed48e7..a8c525cdddaa 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/README.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/README.md @@ -25,7 +25,7 @@ from azure.ai.agentserver.agentframework import from_agent_framework if __name__ == "__main__": # with this simple line, your agent will be hosted on http://localhost:8088 - from_agent_framework(my_awesome_agent).run() + from_agent_framework(agent=my_awesome_agent).run() ``` diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/README.md b/sdk/agentserver/azure-ai-agentserver-langgraph/README.md index 1c1eaab6837e..970c14df9a85 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/README.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/README.md @@ -28,12 +28,6 @@ if __name__ == "__main__": ``` -**Note** -If your langgraph agent was not using langgraph's builtin [MessageState](https://langchain-ai.github.io/langgraph/concepts/low_level/?h=messagesstate#messagesstate), you should implement your own `LanggraphStateConverter` and provide to `from_langgraph`. - -Reference this [example](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py) for more details. - - ## Troubleshooting First run your agent with azure-ai-agentserver-langgraph locally. diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index 0e8a32a3afa2..40a85fe802eb 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -20,9 +20,9 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-ai-agentserver-core==1.0.0b9", - "langchain>0.3.20", - "langchain-openai>0.3.10", - "langchain-azure-ai[opentelemetry]>=0.1.8", + "langchain>=1.0.3", + "langchain-openai>=1.0.3", + "langchain-azure-ai[opentelemetry]~=1.0.0", ] [build-system] From cc8ba6e1d7119e793c88f1b19da33abe35094fc1 Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Fri, 23 Jan 2026 09:49:42 -0800 Subject: [PATCH 57/94] [agentserver] Add unit tests for tools in core and langgraph packages (#44818) * remove unused code in af * created subclasses for agent-framework AIAgent and WorkflowAgent * remove unused code * validate core with tox * refining agent framework adapters * refining adapters * updated minors * update from_agent_framework * resolve trailing whitespace * fix samples * resolve unused import and long lines * fixed pylint * fix unittest in langgraph * fix sphinx for core * fix minors * fix pylint * tools ut in core package * add cachetools min version * fix bugs for non stream converter * fix pylint * add output type check * fix minors * updated version and changelog * update required core package * fix langgraph sphinx * Put LG run context to runtime & config * langgraph ut * optimize af code * add more uts. support resource id format of project connection id --------- Co-authored-by: Lu Sun --- .../agentframework/_foundry_tools.py | 24 +- .../tests/__init__.py | 1 - .../unit_tests/agent_framework/__init__.py | 5 + .../agent_framework}/conftest.py | 0 .../test_agent_framework_input_converter.py | 0 .../ai/agentserver/core/tools/__init__.py | 2 + .../agentserver/core/tools/runtime/_facade.py | 45 +- .../tests/unit_tests/core/__init__.py | 5 + .../tests/unit_tests/core/tools/__init__.py | 4 + .../unit_tests/core/tools/client/__init__.py | 5 + .../core/tools/client/operations/__init__.py | 4 + .../test_foundry_connected_tools.py | 479 +++++++++++++++++ .../test_foundry_hosted_mcp_tools.py | 309 +++++++++++ .../core/tools/client/test_client.py | 485 +++++++++++++++++ .../core/tools/client/test_configuration.py | 25 + .../tests/unit_tests/core/tools/conftest.py | 127 +++++ .../unit_tests/core/tools/runtime/__init__.py | 4 + .../unit_tests/core/tools/runtime/conftest.py | 39 ++ .../core/tools/runtime/test_catalog.py | 349 ++++++++++++ .../core/tools/runtime/test_facade.py | 180 +++++++ .../core/tools/runtime/test_invoker.py | 198 +++++++ .../core/tools/runtime/test_resolver.py | 202 +++++++ .../core/tools/runtime/test_runtime.py | 283 ++++++++++ .../core/tools/runtime/test_starlette.py | 261 +++++++++ .../core/tools/runtime/test_user.py | 210 ++++++++ .../unit_tests/core/tools/utils/__init__.py | 4 + .../unit_tests/core/tools/utils/conftest.py | 56 ++ .../core/tools/utils/test_name_resolver.py | 260 +++++++++ .../ai/agentserver/langgraph/_context.py | 40 +- .../ai/agentserver/langgraph/langgraph.py | 7 +- .../agentserver/langgraph/tools/_builder.py | 13 +- .../langgraph/tools/_chat_model.py | 19 +- .../agentserver/langgraph/tools/_tool_node.py | 2 +- .../tool_client_example/graph_agent_tool.py | 104 ++++ .../tests/__init__.py | 1 - .../tests/unit_tests/langgraph/__init__.py | 5 + .../{ => unit_tests/langgraph}/conftest.py | 0 .../test_langgraph_request_converter.py | 0 .../unit_tests/langgraph/tools/__init__.py | 5 + .../unit_tests/langgraph/tools/conftest.py | 271 ++++++++++ .../langgraph/tools/test_agent_integration.py | 404 ++++++++++++++ .../langgraph/tools/test_builder.py | 109 ++++ .../langgraph/tools/test_chat_model.py | 277 ++++++++++ .../langgraph/tools/test_context.py | 36 ++ .../langgraph/tools/test_middleware.py | 197 +++++++ .../langgraph/tools/test_resolver.py | 502 ++++++++++++++++++ .../langgraph/tools/test_tool_node.py | 179 +++++++ 47 files changed, 5697 insertions(+), 40 deletions(-) delete mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/tests/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/__init__.py rename sdk/agentserver/azure-ai-agentserver-agentframework/tests/{ => unit_tests/agent_framework}/conftest.py (100%) rename sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/{ => agent_framework}/test_agent_framework_input_converter.py (100%) create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_connected_tools.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_hosted_mcp_tools.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_configuration.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/conftest.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/conftest.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_catalog.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_facade.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_invoker.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_resolver.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_runtime.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_starlette.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_user.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/conftest.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/test_name_resolver.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_agent_tool.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/__init__.py rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/{ => unit_tests/langgraph}/conftest.py (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{ => langgraph}/test_langgraph_request_converter.py (100%) create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/conftest.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_agent_integration.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_builder.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_chat_model.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_context.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_middleware.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_resolver.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_tool_node.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py index 78d8108ed96c..64120308a872 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py @@ -13,7 +13,7 @@ from azure.ai.agentserver.core import AgentServerContext from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.tools import FoundryToolLike, ResolvedFoundryTool +from azure.ai.agentserver.core.tools import FoundryToolLike, ResolvedFoundryTool, ensure_foundry_tool logger = get_logger() @@ -45,7 +45,7 @@ def __init__( self, tools: Sequence[FoundryToolLike], ) -> None: - self._allowed_tools: List[FoundryToolLike] = list(tools) + self._allowed_tools: List[FoundryToolLike] = [ensure_foundry_tool(tool) for tool in tools] async def list_tools(self) -> List[AIFunction]: server_context = AgentServerContext.get() @@ -71,7 +71,7 @@ def _to_aifunction(self, foundry_tool: "ResolvedFoundryTool") -> AIFunction: # Build field definitions for the Pydantic model field_definitions: Dict[str, Any] = {} for field_name, field_info in properties.items(): - field_type = self._json_schema_type_to_python(field_info.type or "string") + field_type = field_info.type.py_type field_description = field_info.description or "" is_required = field_name in required_fields @@ -107,24 +107,6 @@ async def tool_func(**kwargs: Any) -> Any: input_model=input_model ) - def _json_schema_type_to_python(self, json_type: str) -> type: - """Convert JSON schema type to Python type. - - :param json_type: The JSON schema type string. - :type json_type: str - :return: The corresponding Python type. - :rtype: type - """ - type_map = { - "string": str, - "number": float, - "integer": int, - "boolean": bool, - "array": list, - "object": dict, - } - return type_map.get(json_type, str) - class FoundryToolsChatMiddleware(ChatMiddleware): """Chat middleware to inject Foundry tools into ChatOptions on each call.""" diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/__init__.py deleted file mode 100644 index 4a5d26360bce..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Unit tests package diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/__init__.py new file mode 100644 index 000000000000..28077537d94b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/__init__.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/conftest.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/conftest.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-agentframework/tests/conftest.py rename to sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/conftest.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_agent_framework_input_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/test_agent_framework_input_converter.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_agent_framework_input_converter.py rename to sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/test_agent_framework_input_converter.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py index 5b356f38c825..34c58d65cfd6 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py @@ -15,6 +15,7 @@ FoundryConnectedTool, FoundryHostedMcpTool, FoundryTool, + FoundryToolDetails, FoundryToolProtocol, FoundryToolSource, ResolvedFoundryTool, @@ -47,6 +48,7 @@ "FoundryConnectedTool", "FoundryHostedMcpTool", "FoundryTool", + "FoundryToolDetails", "FoundryToolProtocol", "FoundryToolSource", "ResolvedFoundryTool", diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py index f12d3f0db7b5..bfc4a08d9a63 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +import re from typing import Any, Dict, Union from .. import FoundryConnectedTool, FoundryHostedMcpTool @@ -45,6 +46,48 @@ def ensure_foundry_tool(tool: FoundryToolLike) -> FoundryTool: if not isinstance(project_connection_id, str) or not project_connection_id: raise InvalidToolFacadeError(f"project_connection_id is required for tool protocol {protocol}.") - return FoundryConnectedTool(protocol=protocol, project_connection_id=project_connection_id) + # Parse the connection identifier to extract the connection name + connection_name = _parse_connection_id(project_connection_id) + return FoundryConnectedTool(protocol=protocol, project_connection_id=connection_name) except ValueError: return FoundryHostedMcpTool(name=tool_type, configuration=tool) + + +# Pattern for Azure resource ID format: +# /subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts//projects//connections/ +_RESOURCE_ID_PATTERN = re.compile( + r"^/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.CognitiveServices/" + r"accounts/[^/]+/projects/[^/]+/connections/(?P[^/]+)$", + re.IGNORECASE, +) + + +def _parse_connection_id(connection_id: str) -> str: + """Parse the connection identifier and extract the connection name. + + Supports two formats: + 1. Simple name: "my-connection-name" + 2. Resource ID: "/subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts//projects//connections/" + + :param connection_id: The connection identifier, either a simple name or a full resource ID. + :type connection_id: str + :return: The connection name extracted from the identifier. + :rtype: str + :raises InvalidToolFacadeError: If the connection_id format is invalid. + """ + if not connection_id: + raise InvalidToolFacadeError("Connection identifier cannot be empty.") + + # Check if it's a resource ID format (starts with /) + if connection_id.startswith("/"): + match = _RESOURCE_ID_PATTERN.match(connection_id) + if not match: + raise InvalidToolFacadeError( + f"Invalid resource ID format for connection: '{connection_id}'. " + "Expected format: /subscriptions//resourceGroups//providers/" + "Microsoft.CognitiveServices/accounts//projects//connections/" + ) + return match.group("name") + + # Otherwise, treat it as a simple connection name + return connection_id diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/__init__.py new file mode 100644 index 000000000000..28077537d94b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/__init__.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/__init__.py new file mode 100644 index 000000000000..d02a9af6c5f6 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/__init__.py new file mode 100644 index 000000000000..28077537d94b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/__init__.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/__init__.py new file mode 100644 index 000000000000..d02a9af6c5f6 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_connected_tools.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_connected_tools.py new file mode 100644 index 000000000000..e7273f37a7e7 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_connected_tools.py @@ -0,0 +1,479 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for FoundryConnectedToolsOperations - testing only public methods.""" +import pytest +from unittest.mock import AsyncMock, MagicMock + +from azure.ai.agentserver.core.tools.client._models import ( + FoundryConnectedTool, + FoundryToolDetails, +) +from azure.ai.agentserver.core.tools.client.operations._foundry_connected_tools import ( + FoundryConnectedToolsOperations, +) +from azure.ai.agentserver.core.tools._exceptions import OAuthConsentRequiredError, ToolInvocationError + +from ...conftest import create_mock_http_response + + +class TestFoundryConnectedToolsOperationsListTools: + """Tests for FoundryConnectedToolsOperations.list_tools public method.""" + + @pytest.mark.asyncio + async def test_list_tools_with_empty_list_returns_empty(self): + """Test list_tools returns empty when tools list is empty.""" + mock_client = AsyncMock() + ops = FoundryConnectedToolsOperations(mock_client) + + result = await ops.list_tools([], None, "test-agent") + + assert result == [] + # Should not make any HTTP request + mock_client.send_request.assert_not_called() + + @pytest.mark.asyncio + async def test_list_tools_returns_tools_from_server( + self, + sample_connected_tool, + sample_user_info + ): + """Test list_tools returns tools from server response.""" + mock_client = AsyncMock() + + response_data = { + "tools": [ + { + "remoteServer": { + "protocol": sample_connected_tool.protocol, + "projectConnectionId": sample_connected_tool.project_connection_id + }, + "manifest": [ + { + "name": "remote_tool", + "description": "A remote connected tool", + "parameters": { + "type": "object", + "properties": { + "input": {"type": "string"} + } + } + } + ] + } + ] + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + result = list(await ops.list_tools([sample_connected_tool], sample_user_info, "test-agent")) + + assert len(result) == 1 + definition, details = result[0] + assert definition == sample_connected_tool + assert isinstance(details, FoundryToolDetails) + assert details.name == "remote_tool" + assert details.description == "A remote connected tool" + + @pytest.mark.asyncio + async def test_list_tools_without_user_info(self, sample_connected_tool): + """Test list_tools works without user info (local execution).""" + mock_client = AsyncMock() + + response_data = { + "tools": [ + { + "remoteServer": { + "protocol": sample_connected_tool.protocol, + "projectConnectionId": sample_connected_tool.project_connection_id + }, + "manifest": [ + { + "name": "tool_no_user", + "description": "Tool without user", + "parameters": {"type": "object", "properties": {}} + } + ] + } + ] + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + result = list(await ops.list_tools([sample_connected_tool], None, "test-agent")) + + assert len(result) == 1 + assert result[0][1].name == "tool_no_user" + + @pytest.mark.asyncio + async def test_list_tools_with_multiple_connections(self, sample_user_info): + """Test list_tools with multiple connected tool definitions.""" + mock_client = AsyncMock() + + tool1 = FoundryConnectedTool(protocol="mcp", project_connection_id="conn-1") + tool2 = FoundryConnectedTool(protocol="a2a", project_connection_id="conn-2") + + response_data = { + "tools": [ + { + "remoteServer": { + "protocol": "mcp", + "projectConnectionId": "conn-1" + }, + "manifest": [ + { + "name": "tool_from_conn1", + "description": "From connection 1", + "parameters": {"type": "object", "properties": {}} + } + ] + }, + { + "remoteServer": { + "protocol": "a2a", + "projectConnectionId": "conn-2" + }, + "manifest": [ + { + "name": "tool_from_conn2", + "description": "From connection 2", + "parameters": {"type": "object", "properties": {}} + } + ] + } + ] + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + result = list(await ops.list_tools([tool1, tool2], sample_user_info, "test-agent")) + + assert len(result) == 2 + names = {r[1].name for r in result} + assert names == {"tool_from_conn1", "tool_from_conn2"} + + @pytest.mark.asyncio + async def test_list_tools_filters_by_connection_id(self, sample_user_info): + """Test list_tools only returns tools from requested connections.""" + mock_client = AsyncMock() + + requested_tool = FoundryConnectedTool(protocol="mcp", project_connection_id="requested-conn") + + # Server returns tools from multiple connections, but we only requested one + response_data = { + "tools": [ + { + "remoteServer": { + "protocol": "mcp", + "projectConnectionId": "requested-conn" + }, + "manifest": [ + { + "name": "requested_tool", + "description": "Requested", + "parameters": {"type": "object", "properties": {}} + } + ] + }, + { + "remoteServer": { + "protocol": "mcp", + "projectConnectionId": "unrequested-conn" + }, + "manifest": [ + { + "name": "unrequested_tool", + "description": "Not requested", + "parameters": {"type": "object", "properties": {}} + } + ] + } + ] + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + result = list(await ops.list_tools([requested_tool], sample_user_info, "test-agent")) + + # Should only return tools from requested connection + assert len(result) == 1 + assert result[0][1].name == "requested_tool" + + @pytest.mark.asyncio + async def test_list_tools_multiple_tools_per_connection( + self, + sample_connected_tool, + sample_user_info + ): + """Test list_tools returns multiple tools from same connection.""" + mock_client = AsyncMock() + + response_data = { + "tools": [ + { + "remoteServer": { + "protocol": sample_connected_tool.protocol, + "projectConnectionId": sample_connected_tool.project_connection_id + }, + "manifest": [ + { + "name": "tool_one", + "description": "First tool", + "parameters": {"type": "object", "properties": {}} + }, + { + "name": "tool_two", + "description": "Second tool", + "parameters": {"type": "object", "properties": {}} + }, + { + "name": "tool_three", + "description": "Third tool", + "parameters": {"type": "object", "properties": {}} + } + ] + } + ] + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + result = list(await ops.list_tools([sample_connected_tool], sample_user_info, "test-agent")) + + assert len(result) == 3 + names = {r[1].name for r in result} + assert names == {"tool_one", "tool_two", "tool_three"} + + @pytest.mark.asyncio + async def test_list_tools_raises_oauth_consent_error( + self, + sample_connected_tool, + sample_user_info + ): + """Test list_tools raises OAuthConsentRequiredError when consent needed.""" + mock_client = AsyncMock() + + response_data = { + "type": "OAuthConsentRequired", + "toolResult": { + "consentUrl": "https://login.microsoftonline.com/consent", + "message": "User consent is required to access this resource", + "projectConnectionId": sample_connected_tool.project_connection_id + } + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + + with pytest.raises(OAuthConsentRequiredError) as exc_info: + list(await ops.list_tools([sample_connected_tool], sample_user_info, "test-agent")) + + assert exc_info.value.consent_url == "https://login.microsoftonline.com/consent" + assert "consent" in exc_info.value.message.lower() + + +class TestFoundryConnectedToolsOperationsInvokeTool: + """Tests for FoundryConnectedToolsOperations.invoke_tool public method.""" + + @pytest.mark.asyncio + async def test_invoke_tool_returns_result_value( + self, + sample_resolved_connected_tool, + sample_user_info + ): + """Test invoke_tool returns the result value from server.""" + mock_client = AsyncMock() + + expected_result = {"data": "some output", "status": "success"} + response_data = {"toolResult": expected_result} + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + result = await ops.invoke_tool( + sample_resolved_connected_tool, + {"input": "test"}, + sample_user_info, + "test-agent" + ) + + assert result == expected_result + + @pytest.mark.asyncio + async def test_invoke_tool_without_user_info(self, sample_resolved_connected_tool): + """Test invoke_tool works without user info (local execution).""" + mock_client = AsyncMock() + + response_data = {"toolResult": "local result"} + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + result = await ops.invoke_tool( + sample_resolved_connected_tool, + {}, + None, # No user info + "test-agent" + ) + + assert result == "local result" + + @pytest.mark.asyncio + async def test_invoke_tool_with_complex_arguments( + self, + sample_resolved_connected_tool, + sample_user_info + ): + """Test invoke_tool handles complex nested arguments.""" + mock_client = AsyncMock() + + response_data = {"toolResult": "processed"} + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + complex_args = { + "query": "search term", + "filters": { + "date_range": {"start": "2025-01-01", "end": "2025-12-31"}, + "categories": ["A", "B", "C"] + }, + "limit": 50 + } + + result = await ops.invoke_tool( + sample_resolved_connected_tool, + complex_args, + sample_user_info, + "test-agent" + ) + + assert result == "processed" + mock_client.send_request.assert_called_once() + + @pytest.mark.asyncio + async def test_invoke_tool_returns_none_for_empty_result( + self, + sample_resolved_connected_tool, + sample_user_info + ): + """Test invoke_tool returns None when server returns no result.""" + mock_client = AsyncMock() + + # Server returns empty response (no toolResult) + response_data = { + "toolResult": None + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + result = await ops.invoke_tool( + sample_resolved_connected_tool, + {}, + sample_user_info, + "test-agent" + ) + + assert result is None + + @pytest.mark.asyncio + async def test_invoke_tool_with_mcp_tool_raises_error( + self, + sample_resolved_mcp_tool, + sample_user_info + ): + """Test invoke_tool raises ToolInvocationError for non-connected tool.""" + mock_client = AsyncMock() + ops = FoundryConnectedToolsOperations(mock_client) + + with pytest.raises(ToolInvocationError) as exc_info: + await ops.invoke_tool( + sample_resolved_mcp_tool, + {}, + sample_user_info, + "test-agent" + ) + + assert "not a Foundry connected tool" in str(exc_info.value) + # Should not make any HTTP request + mock_client.send_request.assert_not_called() + + @pytest.mark.asyncio + async def test_invoke_tool_raises_oauth_consent_error( + self, + sample_resolved_connected_tool, + sample_user_info + ): + """Test invoke_tool raises OAuthConsentRequiredError when consent needed.""" + mock_client = AsyncMock() + + response_data = { + "type": "OAuthConsentRequired", + "toolResult": { + "consentUrl": "https://login.microsoftonline.com/oauth/consent", + "message": "Please provide consent to continue", + "projectConnectionId": "test-connection-id" + } + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + + with pytest.raises(OAuthConsentRequiredError) as exc_info: + await ops.invoke_tool( + sample_resolved_connected_tool, + {"input": "test"}, + sample_user_info, + "test-agent" + ) + + assert "https://login.microsoftonline.com/oauth/consent" in exc_info.value.consent_url + + @pytest.mark.asyncio + async def test_invoke_tool_with_different_agent_names( + self, + sample_resolved_connected_tool, + sample_user_info + ): + """Test invoke_tool uses correct agent name in request.""" + mock_client = AsyncMock() + + response_data = {"toolResult": "result"} + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryConnectedToolsOperations(mock_client) + + # Invoke with different agent names + for agent_name in ["agent-1", "my-custom-agent", "production-agent"]: + await ops.invoke_tool( + sample_resolved_connected_tool, + {}, + sample_user_info, + agent_name + ) + + # Verify the correct path was used + call_args = mock_client.post.call_args + assert agent_name in call_args[0][0] + diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_hosted_mcp_tools.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_hosted_mcp_tools.py new file mode 100644 index 000000000000..473b27cc8768 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_hosted_mcp_tools.py @@ -0,0 +1,309 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for FoundryMcpToolsOperations - testing only public methods.""" +import pytest +from unittest.mock import AsyncMock, MagicMock + +from azure.ai.agentserver.core.tools.client._models import ( + FoundryHostedMcpTool, + FoundryToolDetails, + ResolvedFoundryTool, + SchemaDefinition, + SchemaProperty, + SchemaType, +) +from azure.ai.agentserver.core.tools.client.operations._foundry_hosted_mcp_tools import ( + FoundryMcpToolsOperations, +) +from azure.ai.agentserver.core.tools._exceptions import ToolInvocationError + +from ...conftest import create_mock_http_response + + +class TestFoundryMcpToolsOperationsListTools: + """Tests for FoundryMcpToolsOperations.list_tools public method.""" + + @pytest.mark.asyncio + async def test_list_tools_with_empty_list_returns_empty(self): + """Test list_tools returns empty when allowed_tools is empty.""" + mock_client = AsyncMock() + ops = FoundryMcpToolsOperations(mock_client) + + result = await ops.list_tools([]) + + assert result == [] + # Should not make any HTTP request + mock_client.send_request.assert_not_called() + + @pytest.mark.asyncio + async def test_list_tools_returns_matching_tools(self, sample_hosted_mcp_tool): + """Test list_tools returns tools that match the allowed list.""" + mock_client = AsyncMock() + + response_data = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": sample_hosted_mcp_tool.name, + "description": "Test MCP tool", + "inputSchema": { + "type": "object", + "properties": { + "query": {"type": "string", "description": "Search query"} + } + } + } + ] + } + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryMcpToolsOperations(mock_client) + result = list(await ops.list_tools([sample_hosted_mcp_tool])) + + assert len(result) == 1 + definition, details = result[0] + assert definition == sample_hosted_mcp_tool + assert isinstance(details, FoundryToolDetails) + assert details.name == sample_hosted_mcp_tool.name + assert details.description == "Test MCP tool" + + @pytest.mark.asyncio + async def test_list_tools_filters_out_non_allowed_tools(self, sample_hosted_mcp_tool): + """Test list_tools only returns tools in the allowed list.""" + mock_client = AsyncMock() + + # Server returns multiple tools but only one is allowed + response_data = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": sample_hosted_mcp_tool.name, + "description": "Allowed tool", + "inputSchema": {"type": "object", "properties": {}} + }, + { + "name": "other_tool_not_in_list", + "description": "Not allowed tool", + "inputSchema": {"type": "object", "properties": {}} + }, + { + "name": "another_unlisted_tool", + "description": "Also not allowed", + "inputSchema": {"type": "object", "properties": {}} + } + ] + } + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryMcpToolsOperations(mock_client) + result = list(await ops.list_tools([sample_hosted_mcp_tool])) + + assert len(result) == 1 + assert result[0][1].name == sample_hosted_mcp_tool.name + + @pytest.mark.asyncio + async def test_list_tools_with_multiple_allowed_tools(self): + """Test list_tools with multiple tools in allowed list.""" + mock_client = AsyncMock() + + tool1 = FoundryHostedMcpTool(name="tool_one") + tool2 = FoundryHostedMcpTool(name="tool_two") + + response_data = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": "tool_one", + "description": "First tool", + "inputSchema": {"type": "object", "properties": {}} + }, + { + "name": "tool_two", + "description": "Second tool", + "inputSchema": {"type": "object", "properties": {}} + } + ] + } + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryMcpToolsOperations(mock_client) + result = list(await ops.list_tools([tool1, tool2])) + + assert len(result) == 2 + names = {r[1].name for r in result} + assert names == {"tool_one", "tool_two"} + + @pytest.mark.asyncio + async def test_list_tools_preserves_tool_metadata(self): + """Test list_tools preserves metadata from server response.""" + mock_client = AsyncMock() + + tool = FoundryHostedMcpTool(name="tool_with_meta") + + response_data = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": "tool_with_meta", + "description": "Tool with metadata", + "inputSchema": { + "type": "object", + "properties": { + "param1": {"type": "string"} + }, + "required": ["param1"] + }, + "_meta": { + "type": "object", + "properties": { + "model": {"type": "string"} + } + } + } + ] + } + } + mock_response = create_mock_http_response(200, response_data) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryMcpToolsOperations(mock_client) + result = list(await ops.list_tools([tool])) + + assert len(result) == 1 + details = result[0][1] + assert details.metadata is not None + + +class TestFoundryMcpToolsOperationsInvokeTool: + """Tests for FoundryMcpToolsOperations.invoke_tool public method.""" + + @pytest.mark.asyncio + async def test_invoke_tool_returns_server_response(self, sample_resolved_mcp_tool): + """Test invoke_tool returns the response from server.""" + mock_client = AsyncMock() + + expected_response = { + "jsonrpc": "2.0", + "id": 2, + "result": { + "content": [{"type": "text", "text": "Hello World"}] + } + } + mock_response = create_mock_http_response(200, expected_response) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryMcpToolsOperations(mock_client) + result = await ops.invoke_tool(sample_resolved_mcp_tool, {"query": "test"}) + + assert result == expected_response + + @pytest.mark.asyncio + async def test_invoke_tool_with_empty_arguments(self, sample_resolved_mcp_tool): + """Test invoke_tool works with empty arguments.""" + mock_client = AsyncMock() + + expected_response = {"result": "success"} + mock_response = create_mock_http_response(200, expected_response) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryMcpToolsOperations(mock_client) + result = await ops.invoke_tool(sample_resolved_mcp_tool, {}) + + assert result == expected_response + + @pytest.mark.asyncio + async def test_invoke_tool_with_complex_arguments(self, sample_resolved_mcp_tool): + """Test invoke_tool handles complex nested arguments.""" + mock_client = AsyncMock() + + mock_response = create_mock_http_response(200, {"result": "ok"}) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryMcpToolsOperations(mock_client) + complex_args = { + "text": "sample text", + "options": { + "temperature": 0.7, + "max_tokens": 100 + }, + "tags": ["tag1", "tag2"] + } + + result = await ops.invoke_tool(sample_resolved_mcp_tool, complex_args) + + assert result == {"result": "ok"} + mock_client.send_request.assert_called_once() + + @pytest.mark.asyncio + async def test_invoke_tool_with_connected_tool_raises_error( + self, + sample_resolved_connected_tool + ): + """Test invoke_tool raises ToolInvocationError for non-MCP tool.""" + mock_client = AsyncMock() + ops = FoundryMcpToolsOperations(mock_client) + + with pytest.raises(ToolInvocationError) as exc_info: + await ops.invoke_tool(sample_resolved_connected_tool, {}) + + assert "not a Foundry-hosted MCP tool" in str(exc_info.value) + # Should not make any HTTP request + mock_client.send_request.assert_not_called() + + @pytest.mark.asyncio + async def test_invoke_tool_with_configuration_and_metadata(self): + """Test invoke_tool handles tool with configuration and metadata.""" + mock_client = AsyncMock() + + # Create tool with configuration + tool_def = FoundryHostedMcpTool( + name="image_generation", + configuration={"model_deployment_name": "dall-e-3"} + ) + + # Create tool details with metadata schema + meta_schema = SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "model": SchemaProperty(type=SchemaType.STRING) + } + ) + details = FoundryToolDetails( + name="image_generation", + description="Generate images", + input_schema=SchemaDefinition(type=SchemaType.OBJECT, properties={}), + metadata=meta_schema + ) + resolved_tool = ResolvedFoundryTool(definition=tool_def, details=details) + + mock_response = create_mock_http_response(200, {"result": "image_url"}) + mock_client.send_request.return_value = mock_response + mock_client.post.return_value = MagicMock() + + ops = FoundryMcpToolsOperations(mock_client) + result = await ops.invoke_tool(resolved_tool, {"prompt": "a cat"}) + + assert result == {"result": "image_url"} + diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_client.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_client.py new file mode 100644 index 000000000000..de60f545e089 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_client.py @@ -0,0 +1,485 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for FoundryToolClient - testing only public methods.""" +import pytest +from unittest.mock import AsyncMock, MagicMock, patch + +from azure.ai.agentserver.core.tools.client._client import FoundryToolClient +from azure.ai.agentserver.core.tools.client._models import ( + FoundryToolDetails, + FoundryToolSource, + ResolvedFoundryTool, +) +from azure.ai.agentserver.core.tools._exceptions import ToolInvocationError + +from ..conftest import create_mock_http_response + + +class TestFoundryToolClientInit: + """Tests for FoundryToolClient.__init__ public method.""" + + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + def test_init_with_valid_endpoint_and_credential(self, mock_pipeline_client_class, mock_credential): + """Test client can be initialized with valid endpoint and credential.""" + endpoint = "https://fake-project-endpoint.site" + + client = FoundryToolClient(endpoint, mock_credential) + + # Verify client was created with correct base_url + call_kwargs = mock_pipeline_client_class.call_args + assert call_kwargs[1]["base_url"] == endpoint + assert client is not None + + +class TestFoundryToolClientListTools: + """Tests for FoundryToolClient.list_tools public method.""" + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_list_tools_empty_collection_returns_empty_list( + self, + mock_pipeline_client_class, + mock_credential + ): + """Test list_tools returns empty list when given empty collection.""" + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + + result = await client.list_tools([], agent_name="test-agent") + + assert result == [] + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_list_tools_with_single_mcp_tool_returns_resolved_tools( + self, + mock_pipeline_client_class, + mock_credential, + sample_hosted_mcp_tool + ): + """Test list_tools with a single MCP tool returns resolved tools.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + # Mock HTTP response for MCP tools listing + response_data = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": sample_hosted_mcp_tool.name, + "description": "Test MCP tool description", + "inputSchema": {"type": "object", "properties": {}} + } + ] + } + } + mock_response = create_mock_http_response(200, response_data) + mock_client_instance.send_request.return_value = mock_response + mock_client_instance.post.return_value = MagicMock() + + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + result = await client.list_tools([sample_hosted_mcp_tool], agent_name="test-agent") + + assert len(result) == 1 + assert isinstance(result[0], ResolvedFoundryTool) + assert result[0].name == sample_hosted_mcp_tool.name + assert result[0].source == FoundryToolSource.HOSTED_MCP + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_list_tools_with_single_connected_tool_returns_resolved_tools( + self, + mock_pipeline_client_class, + mock_credential, + sample_connected_tool, + sample_user_info + ): + """Test list_tools with a single connected tool returns resolved tools.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + # Mock HTTP response for connected tools listing + response_data = { + "tools": [ + { + "remoteServer": { + "protocol": sample_connected_tool.protocol, + "projectConnectionId": sample_connected_tool.project_connection_id + }, + "manifest": [ + { + "name": "connected_test_tool", + "description": "Test connected tool", + "parameters": {"type": "object", "properties": {}} + } + ] + } + ] + } + mock_response = create_mock_http_response(200, response_data) + mock_client_instance.send_request.return_value = mock_response + mock_client_instance.post.return_value = MagicMock() + + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + result = await client.list_tools( + [sample_connected_tool], + agent_name="test-agent", + user=sample_user_info + ) + + assert len(result) == 1 + assert isinstance(result[0], ResolvedFoundryTool) + assert result[0].name == "connected_test_tool" + assert result[0].source == FoundryToolSource.CONNECTED + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_list_tools_with_mixed_tool_types_returns_all_resolved( + self, + mock_pipeline_client_class, + mock_credential, + sample_hosted_mcp_tool, + sample_connected_tool, + sample_user_info + ): + """Test list_tools with both MCP and connected tools returns all resolved tools.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + # We need to return different responses based on the request + mcp_response_data = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": sample_hosted_mcp_tool.name, + "description": "MCP tool", + "inputSchema": {"type": "object", "properties": {}} + } + ] + } + } + connected_response_data = { + "tools": [ + { + "remoteServer": { + "protocol": sample_connected_tool.protocol, + "projectConnectionId": sample_connected_tool.project_connection_id + }, + "manifest": [ + { + "name": "connected_tool", + "description": "Connected tool", + "parameters": {"type": "object", "properties": {}} + } + ] + } + ] + } + + # Mock to return different responses for different requests + mock_client_instance.send_request.side_effect = [ + create_mock_http_response(200, mcp_response_data), + create_mock_http_response(200, connected_response_data) + ] + mock_client_instance.post.return_value = MagicMock() + + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + result = await client.list_tools( + [sample_hosted_mcp_tool, sample_connected_tool], + agent_name="test-agent", + user=sample_user_info + ) + + assert len(result) == 2 + sources = {tool.source for tool in result} + assert FoundryToolSource.HOSTED_MCP in sources + assert FoundryToolSource.CONNECTED in sources + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_list_tools_filters_unlisted_mcp_tools( + self, + mock_pipeline_client_class, + mock_credential, + sample_hosted_mcp_tool + ): + """Test list_tools only returns tools that are in the allowed list.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + # Server returns more tools than requested + response_data = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": sample_hosted_mcp_tool.name, + "description": "Requested tool", + "inputSchema": {"type": "object", "properties": {}} + }, + { + "name": "unrequested_tool", + "description": "This tool was not requested", + "inputSchema": {"type": "object", "properties": {}} + } + ] + } + } + mock_response = create_mock_http_response(200, response_data) + mock_client_instance.send_request.return_value = mock_response + mock_client_instance.post.return_value = MagicMock() + + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + result = await client.list_tools([sample_hosted_mcp_tool], agent_name="test-agent") + + # Should only return the requested tool + assert len(result) == 1 + assert result[0].name == sample_hosted_mcp_tool.name + + +class TestFoundryToolClientListToolsDetails: + """Tests for FoundryToolClient.list_tools_details public method.""" + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_list_tools_details_returns_mapping_structure( + self, + mock_pipeline_client_class, + mock_credential, + sample_hosted_mcp_tool + ): + """Test list_tools_details returns correct mapping structure.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + response_data = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": sample_hosted_mcp_tool.name, + "description": "Test tool", + "inputSchema": {"type": "object", "properties": {}} + } + ] + } + } + mock_response = create_mock_http_response(200, response_data) + mock_client_instance.send_request.return_value = mock_response + mock_client_instance.post.return_value = MagicMock() + + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + result = await client.list_tools_details([sample_hosted_mcp_tool], agent_name="test-agent") + + assert isinstance(result, dict) + assert sample_hosted_mcp_tool.id in result + assert len(result[sample_hosted_mcp_tool.id]) == 1 + assert isinstance(result[sample_hosted_mcp_tool.id][0], FoundryToolDetails) + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_list_tools_details_groups_multiple_tools_by_definition( + self, + mock_pipeline_client_class, + mock_credential, + sample_hosted_mcp_tool + ): + """Test list_tools_details groups multiple tools from same source by definition ID.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + # Server returns multiple tools for the same MCP source + response_data = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": sample_hosted_mcp_tool.name, + "description": "Tool variant 1", + "inputSchema": {"type": "object", "properties": {}} + } + ] + } + } + mock_response = create_mock_http_response(200, response_data) + mock_client_instance.send_request.return_value = mock_response + mock_client_instance.post.return_value = MagicMock() + + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + result = await client.list_tools_details([sample_hosted_mcp_tool], agent_name="test-agent") + + # All tools should be grouped under the same definition ID + assert sample_hosted_mcp_tool.id in result + + +class TestFoundryToolClientInvokeTool: + """Tests for FoundryToolClient.invoke_tool public method.""" + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_invoke_mcp_tool_returns_result( + self, + mock_pipeline_client_class, + mock_credential, + sample_resolved_mcp_tool + ): + """Test invoke_tool with MCP tool returns the invocation result.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + expected_result = {"result": {"content": [{"text": "Hello World"}]}} + mock_response = create_mock_http_response(200, expected_result) + mock_client_instance.send_request.return_value = mock_response + mock_client_instance.post.return_value = MagicMock() + + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + result = await client.invoke_tool( + sample_resolved_mcp_tool, + arguments={"input": "test"}, + agent_name="test-agent" + ) + + assert result == expected_result + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_invoke_connected_tool_returns_result( + self, + mock_pipeline_client_class, + mock_credential, + sample_resolved_connected_tool, + sample_user_info + ): + """Test invoke_tool with connected tool returns the invocation result.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + expected_value = {"output": "Connected tool result"} + response_data = {"toolResult": expected_value} + mock_response = create_mock_http_response(200, response_data) + mock_client_instance.send_request.return_value = mock_response + mock_client_instance.post.return_value = MagicMock() + + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + result = await client.invoke_tool( + sample_resolved_connected_tool, + arguments={"input": "test"}, + agent_name="test-agent", + user=sample_user_info + ) + + assert result == expected_value + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_invoke_tool_with_complex_arguments( + self, + mock_pipeline_client_class, + mock_credential, + sample_resolved_mcp_tool + ): + """Test invoke_tool correctly passes complex arguments.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + mock_response = create_mock_http_response(200, {"result": "success"}) + mock_client_instance.send_request.return_value = mock_response + mock_client_instance.post.return_value = MagicMock() + + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + complex_args = { + "string_param": "value", + "number_param": 42, + "bool_param": True, + "list_param": [1, 2, 3], + "nested_param": {"key": "value"} + } + + result = await client.invoke_tool( + sample_resolved_mcp_tool, + arguments=complex_args, + agent_name="test-agent" + ) + + # Verify request was made + mock_client_instance.send_request.assert_called_once() + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_invoke_tool_with_unsupported_source_raises_error( + self, + mock_pipeline_client_class, + mock_credential, + sample_tool_details + ): + """Test invoke_tool raises ToolInvocationError for unsupported tool source.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + # Create a mock tool with unsupported source + mock_definition = MagicMock() + mock_definition.source = "unsupported_source" + mock_tool = MagicMock(spec=ResolvedFoundryTool) + mock_tool.definition = mock_definition + mock_tool.source = "unsupported_source" + mock_tool.details = sample_tool_details + + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + + with pytest.raises(ToolInvocationError) as exc_info: + await client.invoke_tool( + mock_tool, + arguments={"input": "test"}, + agent_name="test-agent" + ) + + assert "Unsupported tool source" in str(exc_info.value) + + +class TestFoundryToolClientClose: + """Tests for FoundryToolClient.close public method.""" + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_close_closes_underlying_client( + self, + mock_pipeline_client_class, + mock_credential + ): + """Test close() properly closes the underlying HTTP client.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + client = FoundryToolClient("https://fake-project-endpoint.site", mock_credential) + await client.close() + + mock_client_instance.close.assert_called_once() + + +class TestFoundryToolClientContextManager: + """Tests for FoundryToolClient async context manager protocol.""" + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.client._client.AsyncPipelineClient") + async def test_async_context_manager_enters_and_exits( + self, + mock_pipeline_client_class, + mock_credential + ): + """Test client can be used as async context manager.""" + mock_client_instance = AsyncMock() + mock_pipeline_client_class.return_value = mock_client_instance + + async with FoundryToolClient("https://fake-project-endpoint.site", mock_credential) as client: + assert client is not None + mock_client_instance.__aenter__.assert_called_once() + + mock_client_instance.__aexit__.assert_called_once() + diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_configuration.py new file mode 100644 index 000000000000..2f3c2710a3fc --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_configuration.py @@ -0,0 +1,25 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for FoundryToolClientConfiguration.""" + +from azure.core.pipeline import policies + +from azure.ai.agentserver.core.tools.client._configuration import FoundryToolClientConfiguration + + +class TestFoundryToolClientConfiguration: + """Tests for FoundryToolClientConfiguration class.""" + + def test_init_creates_all_required_policies(self, mock_credential): + """Test that initialization creates all required pipeline policies.""" + config = FoundryToolClientConfiguration(mock_credential) + + assert isinstance(config.retry_policy, policies.AsyncRetryPolicy) + assert isinstance(config.logging_policy, policies.NetworkTraceLoggingPolicy) + assert isinstance(config.request_id_policy, policies.RequestIdPolicy) + assert isinstance(config.http_logging_policy, policies.HttpLoggingPolicy) + assert isinstance(config.user_agent_policy, policies.UserAgentPolicy) + assert isinstance(config.authentication_policy, policies.AsyncBearerTokenCredentialPolicy) + assert isinstance(config.redirect_policy, policies.AsyncRedirectPolicy) + diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/conftest.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/conftest.py new file mode 100644 index 000000000000..8849ce8aafbf --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/conftest.py @@ -0,0 +1,127 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Shared fixtures for tools unit tests.""" +import json +from typing import Any, Dict, Optional +from unittest.mock import AsyncMock, MagicMock + +import pytest + +from azure.ai.agentserver.core.tools.client._models import ( + FoundryConnectedTool, + FoundryHostedMcpTool, + FoundryToolDetails, + ResolvedFoundryTool, + SchemaDefinition, + SchemaProperty, + SchemaType, + UserInfo, +) + + +@pytest.fixture +def mock_credential(): + """Create a mock async token credential.""" + credential = AsyncMock() + credential.get_token = AsyncMock(return_value=MagicMock(token="test-token")) + return credential + + +@pytest.fixture +def sample_user_info(): + """Create a sample UserInfo instance.""" + return UserInfo(object_id="test-object-id", tenant_id="test-tenant-id") + + +@pytest.fixture +def sample_hosted_mcp_tool(): + """Create a sample FoundryHostedMcpTool.""" + return FoundryHostedMcpTool( + name="test_mcp_tool", + configuration={"model_deployment_name": "gpt-4"} + ) + + +@pytest.fixture +def sample_connected_tool(): + """Create a sample FoundryConnectedTool.""" + return FoundryConnectedTool( + protocol="mcp", + project_connection_id="test-connection-id" + ) + + +@pytest.fixture +def sample_schema_definition(): + """Create a sample SchemaDefinition.""" + return SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "input": SchemaProperty(type=SchemaType.STRING, description="Input parameter") + }, + required={"input"} + ) + + +@pytest.fixture +def sample_tool_details(sample_schema_definition): + """Create a sample FoundryToolDetails.""" + return FoundryToolDetails( + name="test_tool", + description="A test tool", + input_schema=sample_schema_definition + ) + + +@pytest.fixture +def sample_resolved_mcp_tool(sample_hosted_mcp_tool, sample_tool_details): + """Create a sample ResolvedFoundryTool for MCP.""" + return ResolvedFoundryTool( + definition=sample_hosted_mcp_tool, + details=sample_tool_details + ) + + +@pytest.fixture +def sample_resolved_connected_tool(sample_connected_tool, sample_tool_details): + """Create a sample ResolvedFoundryTool for connected tools.""" + return ResolvedFoundryTool( + definition=sample_connected_tool, + details=sample_tool_details + ) + + +def create_mock_http_response( + status_code: int = 200, + json_data: Optional[Dict[str, Any]] = None +) -> AsyncMock: + """Create a mock HTTP response that simulates real Azure SDK response behavior. + + This mock matches the behavior expected by BaseOperations._extract_response_json, + where response.text() and response.body() are synchronous methods that return + the actual string/bytes values directly. + + :param status_code: HTTP status code. + :param json_data: JSON data to return. + :return: Mock response object. + """ + response = AsyncMock() + response.status_code = status_code + + if json_data is not None: + json_str = json.dumps(json_data) + json_bytes = json_str.encode("utf-8") + # text() and body() are synchronous methods in AsyncHttpResponse + # They must be MagicMock (not AsyncMock) to return values directly when called + response.text = MagicMock(return_value=json_str) + response.body = MagicMock(return_value=json_bytes) + else: + response.text = MagicMock(return_value="") + response.body = MagicMock(return_value=b"") + + # Support async context manager + response.__aenter__ = AsyncMock(return_value=response) + response.__aexit__ = AsyncMock(return_value=None) + + return response diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/__init__.py new file mode 100644 index 000000000000..964fac9d8a55 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Runtime unit tests package.""" diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/conftest.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/conftest.py new file mode 100644 index 000000000000..52a371bdc958 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/conftest.py @@ -0,0 +1,39 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Shared fixtures for runtime unit tests. + +Common fixtures are inherited from the parent conftest.py automatically by pytest. +""" +from unittest.mock import AsyncMock + +import pytest + + +@pytest.fixture +def mock_foundry_tool_client(): + """Create a mock FoundryToolClient.""" + client = AsyncMock() + client.list_tools = AsyncMock(return_value=[]) + client.list_tools_details = AsyncMock(return_value={}) + client.invoke_tool = AsyncMock(return_value={"result": "success"}) + client.__aenter__ = AsyncMock(return_value=client) + client.__aexit__ = AsyncMock(return_value=None) + return client + + +@pytest.fixture +def mock_user_provider(sample_user_info): + """Create a mock UserProvider.""" + provider = AsyncMock() + provider.get_user = AsyncMock(return_value=sample_user_info) + return provider + + +@pytest.fixture +def mock_user_provider_none(): + """Create a mock UserProvider that returns None.""" + provider = AsyncMock() + provider.get_user = AsyncMock(return_value=None) + return provider + diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_catalog.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_catalog.py new file mode 100644 index 000000000000..45b03f0530a2 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_catalog.py @@ -0,0 +1,349 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for _catalog.py - testing public methods of DefaultFoundryToolCatalog.""" +import asyncio +import pytest +from unittest.mock import AsyncMock + +from azure.ai.agentserver.core.tools.runtime._catalog import ( + DefaultFoundryToolCatalog, +) +from azure.ai.agentserver.core.tools.client._models import ( + FoundryToolDetails, + ResolvedFoundryTool, + UserInfo, +) + + +class TestFoundryToolCatalogGet: + """Tests for FoundryToolCatalog.get method.""" + + @pytest.mark.asyncio + async def test_get_returns_resolved_tool_when_found( + self, + mock_foundry_tool_client, + mock_user_provider, + sample_hosted_mcp_tool, + sample_tool_details, + sample_user_info + ): + """Test get returns a resolved tool when the tool is found.""" + mock_foundry_tool_client.list_tools_details = AsyncMock( + return_value={sample_hosted_mcp_tool.id: [sample_tool_details]} + ) + + catalog = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + result = await catalog.get(sample_hosted_mcp_tool) + + assert result is not None + assert isinstance(result, ResolvedFoundryTool) + assert result.details == sample_tool_details + + @pytest.mark.asyncio + async def test_get_returns_none_when_not_found( + self, + mock_foundry_tool_client, + mock_user_provider, + sample_hosted_mcp_tool + ): + """Test get returns None when the tool is not found.""" + mock_foundry_tool_client.list_tools_details = AsyncMock(return_value={}) + + catalog = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + result = await catalog.get(sample_hosted_mcp_tool) + + assert result is None + + +class TestDefaultFoundryToolCatalogList: + """Tests for DefaultFoundryToolCatalog.list method.""" + + @pytest.mark.asyncio + async def test_list_returns_empty_list_when_no_tools( + self, + mock_foundry_tool_client, + mock_user_provider + ): + """Test list returns empty list when no tools are provided.""" + catalog = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + result = await catalog.list([]) + + assert result == [] + + @pytest.mark.asyncio + async def test_list_returns_resolved_tools( + self, + mock_foundry_tool_client, + mock_user_provider, + sample_hosted_mcp_tool, + sample_tool_details + ): + """Test list returns resolved tools.""" + mock_foundry_tool_client.list_tools_details = AsyncMock( + return_value={sample_hosted_mcp_tool.id: [sample_tool_details]} + ) + + catalog = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + result = await catalog.list([sample_hosted_mcp_tool]) + + assert len(result) == 1 + assert isinstance(result[0], ResolvedFoundryTool) + assert result[0].definition == sample_hosted_mcp_tool + assert result[0].details == sample_tool_details + + @pytest.mark.asyncio + async def test_list_multiple_tools_with_multiple_details( + self, + mock_foundry_tool_client, + mock_user_provider, + sample_hosted_mcp_tool, + sample_connected_tool, + sample_schema_definition + ): + """Test list returns all resolved tools when tools have multiple details.""" + details1 = FoundryToolDetails( + name="tool1", + description="First tool", + input_schema=sample_schema_definition + ) + details2 = FoundryToolDetails( + name="tool2", + description="Second tool", + input_schema=sample_schema_definition + ) + + mock_foundry_tool_client.list_tools_details = AsyncMock( + return_value={ + sample_hosted_mcp_tool.id: [details1], + sample_connected_tool.id: [details2] + } + ) + + catalog = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + result = await catalog.list([sample_hosted_mcp_tool, sample_connected_tool]) + + assert len(result) == 2 + names = {r.details.name for r in result} + assert names == {"tool1", "tool2"} + + @pytest.mark.asyncio + async def test_list_caches_results_for_hosted_mcp_tools( + self, + mock_foundry_tool_client, + mock_user_provider, + sample_hosted_mcp_tool, + sample_tool_details + ): + """Test that list caches results for hosted MCP tools.""" + mock_foundry_tool_client.list_tools_details = AsyncMock( + return_value={sample_hosted_mcp_tool.id: [sample_tool_details]} + ) + + catalog = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + # First call + result1 = await catalog.list([sample_hosted_mcp_tool]) + # Second call should use cache + result2 = await catalog.list([sample_hosted_mcp_tool]) + + # Client should only be called once + assert mock_foundry_tool_client.list_tools_details.call_count == 1 + assert len(result1) == len(result2) == 1 + + @pytest.mark.asyncio + async def test_list_with_facade_dict( + self, + mock_foundry_tool_client, + mock_user_provider, + sample_tool_details + ): + """Test list works with facade dictionaries.""" + facade = {"type": "custom_tool", "config": "value"} + expected_id = "hosted_mcp:custom_tool" + + mock_foundry_tool_client.list_tools_details = AsyncMock( + return_value={expected_id: [sample_tool_details]} + ) + + catalog = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + result = await catalog.list([facade]) + + assert len(result) == 1 + assert result[0].details == sample_tool_details + + @pytest.mark.asyncio + async def test_list_returns_multiple_details_per_tool( + self, + mock_foundry_tool_client, + mock_user_provider, + sample_hosted_mcp_tool, + sample_schema_definition + ): + """Test list returns multiple resolved tools when a tool has multiple details.""" + details1 = FoundryToolDetails( + name="function1", + description="First function", + input_schema=sample_schema_definition + ) + details2 = FoundryToolDetails( + name="function2", + description="Second function", + input_schema=sample_schema_definition + ) + + mock_foundry_tool_client.list_tools_details = AsyncMock( + return_value={sample_hosted_mcp_tool.id: [details1, details2]} + ) + + catalog = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + result = await catalog.list([sample_hosted_mcp_tool]) + + assert len(result) == 2 + names = {r.details.name for r in result} + assert names == {"function1", "function2"} + + @pytest.mark.asyncio + async def test_list_handles_exception_from_client( + self, + mock_foundry_tool_client, + mock_user_provider, + sample_hosted_mcp_tool + ): + """Test list propagates exception from client and clears cache.""" + mock_foundry_tool_client.list_tools_details = AsyncMock( + side_effect=RuntimeError("Network error") + ) + + catalog = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + with pytest.raises(RuntimeError, match="Network error"): + await catalog.list([sample_hosted_mcp_tool]) + + @pytest.mark.asyncio + async def test_list_connected_tool_cache_key_includes_user( + self, + mock_foundry_tool_client, + mock_user_provider, + sample_connected_tool, + sample_tool_details, + sample_user_info + ): + """Test that connected tool cache key includes user info.""" + mock_foundry_tool_client.list_tools_details = AsyncMock( + return_value={sample_connected_tool.id: [sample_tool_details]} + ) + + # Create a new user provider returning a different user + other_user = UserInfo(object_id="other-oid", tenant_id="other-tid") + mock_user_provider2 = AsyncMock() + mock_user_provider2.get_user = AsyncMock(return_value=other_user) + + catalog1 = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + catalog2 = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider2, + agent_name="test-agent" + ) + + # Both catalogs should be able to list tools + result1 = await catalog1.list([sample_connected_tool]) + result2 = await catalog2.list([sample_connected_tool]) + + assert len(result1) == 1 + assert len(result2) == 1 + + +class TestCachedFoundryToolCatalogConcurrency: + """Tests for CachedFoundryToolCatalog concurrency handling.""" + + @pytest.mark.asyncio + async def test_concurrent_requests_share_single_fetch( + self, + mock_foundry_tool_client, + mock_user_provider, + sample_hosted_mcp_tool, + sample_tool_details + ): + """Test that concurrent requests for the same tool share a single fetch.""" + call_count = 0 + fetch_event = asyncio.Event() + + async def slow_fetch(*args, **kwargs): + nonlocal call_count + call_count += 1 + await fetch_event.wait() + return {sample_hosted_mcp_tool.id: [sample_tool_details]} + + mock_foundry_tool_client.list_tools_details = slow_fetch + + catalog = DefaultFoundryToolCatalog( + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + # Start two concurrent requests + task1 = asyncio.create_task(catalog.list([sample_hosted_mcp_tool])) + task2 = asyncio.create_task(catalog.list([sample_hosted_mcp_tool])) + + # Allow tasks to start + await asyncio.sleep(0.01) + + # Release the fetch + fetch_event.set() + + results = await asyncio.gather(task1, task2) + + # Both should get results, but fetch should only be called once + assert len(results[0]) == 1 + assert len(results[1]) == 1 + assert call_count == 1 diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_facade.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_facade.py new file mode 100644 index 000000000000..c5377dc339a4 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_facade.py @@ -0,0 +1,180 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for _facade.py - testing public function ensure_foundry_tool.""" +import pytest + +from azure.ai.agentserver.core.tools.runtime._facade import ensure_foundry_tool +from azure.ai.agentserver.core.tools.client._models import ( + FoundryConnectedTool, + FoundryHostedMcpTool, + FoundryToolProtocol, + FoundryToolSource, +) +from azure.ai.agentserver.core.tools._exceptions import InvalidToolFacadeError + + +class TestEnsureFoundryTool: + """Tests for ensure_foundry_tool public function.""" + + def test_returns_same_instance_when_given_foundry_tool(self, sample_hosted_mcp_tool): + """Test that passing a FoundryTool returns the same instance.""" + result = ensure_foundry_tool(sample_hosted_mcp_tool) + + assert result is sample_hosted_mcp_tool + + def test_returns_same_instance_for_connected_tool(self, sample_connected_tool): + """Test that passing a FoundryConnectedTool returns the same instance.""" + result = ensure_foundry_tool(sample_connected_tool) + + assert result is sample_connected_tool + + def test_converts_facade_with_mcp_protocol_to_connected_tool(self): + """Test that a facade with 'mcp' protocol is converted to FoundryConnectedTool.""" + facade = { + "type": "mcp", + "project_connection_id": "my-connection" + } + + result = ensure_foundry_tool(facade) + + assert isinstance(result, FoundryConnectedTool) + assert result.protocol == FoundryToolProtocol.MCP + assert result.project_connection_id == "my-connection" + assert result.source == FoundryToolSource.CONNECTED + + def test_converts_facade_with_a2a_protocol_to_connected_tool(self): + """Test that a facade with 'a2a' protocol is converted to FoundryConnectedTool.""" + facade = { + "type": "a2a", + "project_connection_id": "my-a2a-connection" + } + + result = ensure_foundry_tool(facade) + + assert isinstance(result, FoundryConnectedTool) + assert result.protocol == FoundryToolProtocol.A2A + assert result.project_connection_id == "my-a2a-connection" + + def test_converts_facade_with_unknown_type_to_hosted_mcp_tool(self): + """Test that a facade with unknown type is converted to FoundryHostedMcpTool.""" + facade = { + "type": "my_custom_tool", + "some_config": "value123", + "another_config": True + } + + result = ensure_foundry_tool(facade) + + assert isinstance(result, FoundryHostedMcpTool) + assert result.name == "my_custom_tool" + assert result.configuration == {"some_config": "value123", "another_config": True} + assert result.source == FoundryToolSource.HOSTED_MCP + + def test_raises_error_when_type_is_missing(self): + """Test that InvalidToolFacadeError is raised when 'type' is missing.""" + facade = {"project_connection_id": "my-connection"} + + with pytest.raises(InvalidToolFacadeError) as exc_info: + ensure_foundry_tool(facade) + + assert "type" in str(exc_info.value).lower() + + def test_raises_error_when_type_is_empty_string(self): + """Test that InvalidToolFacadeError is raised when 'type' is empty string.""" + facade = {"type": "", "project_connection_id": "my-connection"} + + with pytest.raises(InvalidToolFacadeError) as exc_info: + ensure_foundry_tool(facade) + + assert "type" in str(exc_info.value).lower() + + def test_raises_error_when_type_is_not_string(self): + """Test that InvalidToolFacadeError is raised when 'type' is not a string.""" + facade = {"type": 123, "project_connection_id": "my-connection"} + + with pytest.raises(InvalidToolFacadeError) as exc_info: + ensure_foundry_tool(facade) + + assert "type" in str(exc_info.value).lower() + + def test_raises_error_when_mcp_protocol_missing_connection_id(self): + """Test that InvalidToolFacadeError is raised when mcp protocol is missing project_connection_id.""" + facade = {"type": "mcp"} + + with pytest.raises(InvalidToolFacadeError) as exc_info: + ensure_foundry_tool(facade) + + assert "project_connection_id" in str(exc_info.value) + + def test_raises_error_when_a2a_protocol_has_empty_connection_id(self): + """Test that InvalidToolFacadeError is raised when a2a protocol has empty project_connection_id.""" + facade = {"type": "a2a", "project_connection_id": ""} + + with pytest.raises(InvalidToolFacadeError) as exc_info: + ensure_foundry_tool(facade) + + assert "project_connection_id" in str(exc_info.value) + + def test_parses_resource_id_format_connection_id(self): + """Test that resource ID format project_connection_id is parsed correctly.""" + resource_id = ( + "/subscriptions/sub-123/resourceGroups/rg-test/providers/" + "Microsoft.CognitiveServices/accounts/acc-test/projects/proj-test/connections/my-conn-name" + ) + facade = { + "type": "mcp", + "project_connection_id": resource_id + } + + result = ensure_foundry_tool(facade) + + assert isinstance(result, FoundryConnectedTool) + assert result.project_connection_id == "my-conn-name" + + def test_raises_error_for_invalid_resource_id_format(self): + """Test that InvalidToolFacadeError is raised for invalid resource ID format.""" + invalid_resource_id = "/subscriptions/sub-123/invalid/path" + facade = { + "type": "mcp", + "project_connection_id": invalid_resource_id + } + + with pytest.raises(InvalidToolFacadeError) as exc_info: + ensure_foundry_tool(facade) + + assert "Invalid resource ID format" in str(exc_info.value) + + def test_uses_simple_connection_name_as_is(self): + """Test that simple connection name is used as-is without parsing.""" + facade = { + "type": "mcp", + "project_connection_id": "simple-connection-name" + } + + result = ensure_foundry_tool(facade) + + assert isinstance(result, FoundryConnectedTool) + assert result.project_connection_id == "simple-connection-name" + + def test_original_facade_not_modified(self): + """Test that the original facade dictionary is not modified.""" + facade = { + "type": "my_tool", + "config_key": "config_value" + } + original_facade = facade.copy() + + ensure_foundry_tool(facade) + + assert facade == original_facade + + def test_hosted_mcp_tool_with_no_extra_configuration(self): + """Test that hosted MCP tool works with no extra configuration.""" + facade = {"type": "simple_tool"} + + result = ensure_foundry_tool(facade) + + assert isinstance(result, FoundryHostedMcpTool) + assert result.name == "simple_tool" + assert result.configuration == {} diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_invoker.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_invoker.py new file mode 100644 index 000000000000..b2a222c09d6e --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_invoker.py @@ -0,0 +1,198 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for _invoker.py - testing public methods of DefaultFoundryToolInvoker.""" +import pytest +from unittest.mock import AsyncMock + +from azure.ai.agentserver.core.tools.runtime._invoker import DefaultFoundryToolInvoker + + +class TestDefaultFoundryToolInvokerResolvedTool: + """Tests for DefaultFoundryToolInvoker.resolved_tool property.""" + + def test_resolved_tool_returns_tool_passed_at_init( + self, + sample_resolved_mcp_tool, + mock_foundry_tool_client, + mock_user_provider + ): + """Test resolved_tool property returns the tool passed during initialization.""" + invoker = DefaultFoundryToolInvoker( + resolved_tool=sample_resolved_mcp_tool, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + assert invoker.resolved_tool is sample_resolved_mcp_tool + + def test_resolved_tool_returns_connected_tool( + self, + sample_resolved_connected_tool, + mock_foundry_tool_client, + mock_user_provider + ): + """Test resolved_tool property returns connected tool.""" + invoker = DefaultFoundryToolInvoker( + resolved_tool=sample_resolved_connected_tool, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + assert invoker.resolved_tool is sample_resolved_connected_tool + + +class TestDefaultFoundryToolInvokerInvoke: + """Tests for DefaultFoundryToolInvoker.invoke method.""" + + @pytest.mark.asyncio + async def test_invoke_calls_client_with_correct_arguments( + self, + sample_resolved_mcp_tool, + mock_foundry_tool_client, + mock_user_provider, + sample_user_info + ): + """Test invoke calls client.invoke_tool with correct arguments.""" + invoker = DefaultFoundryToolInvoker( + resolved_tool=sample_resolved_mcp_tool, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + arguments = {"input": "test value", "count": 5} + + await invoker.invoke(arguments) + + mock_foundry_tool_client.invoke_tool.assert_called_once_with( + sample_resolved_mcp_tool, + arguments, + "test-agent", + sample_user_info + ) + + @pytest.mark.asyncio + async def test_invoke_returns_result_from_client( + self, + sample_resolved_mcp_tool, + mock_foundry_tool_client, + mock_user_provider + ): + """Test invoke returns the result from client.invoke_tool.""" + expected_result = {"output": "test result", "status": "completed"} + mock_foundry_tool_client.invoke_tool = AsyncMock(return_value=expected_result) + + invoker = DefaultFoundryToolInvoker( + resolved_tool=sample_resolved_mcp_tool, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + result = await invoker.invoke({"input": "test"}) + + assert result == expected_result + + @pytest.mark.asyncio + async def test_invoke_with_empty_arguments( + self, + sample_resolved_mcp_tool, + mock_foundry_tool_client, + mock_user_provider, + sample_user_info + ): + """Test invoke works with empty arguments dictionary.""" + invoker = DefaultFoundryToolInvoker( + resolved_tool=sample_resolved_mcp_tool, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + await invoker.invoke({}) + + mock_foundry_tool_client.invoke_tool.assert_called_once_with( + sample_resolved_mcp_tool, + {}, + "test-agent", + sample_user_info + ) + + @pytest.mark.asyncio + async def test_invoke_with_none_user( + self, + sample_resolved_mcp_tool, + mock_foundry_tool_client, + mock_user_provider_none + ): + """Test invoke works when user provider returns None.""" + invoker = DefaultFoundryToolInvoker( + resolved_tool=sample_resolved_mcp_tool, + client=mock_foundry_tool_client, + user_provider=mock_user_provider_none, + agent_name="test-agent" + ) + + await invoker.invoke({"input": "test"}) + + mock_foundry_tool_client.invoke_tool.assert_called_once_with( + sample_resolved_mcp_tool, + {"input": "test"}, + "test-agent", + None + ) + + @pytest.mark.asyncio + async def test_invoke_propagates_client_exception( + self, + sample_resolved_mcp_tool, + mock_foundry_tool_client, + mock_user_provider + ): + """Test invoke propagates exceptions from client.invoke_tool.""" + mock_foundry_tool_client.invoke_tool = AsyncMock( + side_effect=RuntimeError("Client error") + ) + + invoker = DefaultFoundryToolInvoker( + resolved_tool=sample_resolved_mcp_tool, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + with pytest.raises(RuntimeError, match="Client error"): + await invoker.invoke({"input": "test"}) + + @pytest.mark.asyncio + async def test_invoke_with_complex_nested_arguments( + self, + sample_resolved_mcp_tool, + mock_foundry_tool_client, + mock_user_provider, + sample_user_info + ): + """Test invoke with complex nested argument structure.""" + complex_args = { + "nested": {"key1": "value1", "key2": 123}, + "list": [1, 2, 3], + "mixed": [{"a": 1}, {"b": 2}] + } + + invoker = DefaultFoundryToolInvoker( + resolved_tool=sample_resolved_mcp_tool, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + await invoker.invoke(complex_args) + + mock_foundry_tool_client.invoke_tool.assert_called_once_with( + sample_resolved_mcp_tool, + complex_args, + "test-agent", + sample_user_info + ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_resolver.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_resolver.py new file mode 100644 index 000000000000..7bdaa8f957a9 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_resolver.py @@ -0,0 +1,202 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for _resolver.py - testing public methods of DefaultFoundryToolInvocationResolver.""" +import pytest +from unittest.mock import AsyncMock, MagicMock + +from azure.ai.agentserver.core.tools.runtime._resolver import DefaultFoundryToolInvocationResolver +from azure.ai.agentserver.core.tools.runtime._invoker import DefaultFoundryToolInvoker +from azure.ai.agentserver.core.tools._exceptions import UnableToResolveToolInvocationError +from azure.ai.agentserver.core.tools.client._models import ( + FoundryConnectedTool, + FoundryHostedMcpTool, +) + + +class TestDefaultFoundryToolInvocationResolverResolve: + """Tests for DefaultFoundryToolInvocationResolver.resolve method.""" + + @pytest.fixture + def mock_catalog(self, sample_resolved_mcp_tool): + """Create a mock FoundryToolCatalog.""" + catalog = AsyncMock() + catalog.get = AsyncMock(return_value=sample_resolved_mcp_tool) + catalog.list = AsyncMock(return_value=[sample_resolved_mcp_tool]) + return catalog + + @pytest.mark.asyncio + async def test_resolve_with_resolved_tool_returns_invoker_directly( + self, + mock_catalog, + mock_foundry_tool_client, + mock_user_provider, + sample_resolved_mcp_tool + ): + """Test resolve returns invoker directly when given ResolvedFoundryTool.""" + resolver = DefaultFoundryToolInvocationResolver( + catalog=mock_catalog, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + invoker = await resolver.resolve(sample_resolved_mcp_tool) + + assert isinstance(invoker, DefaultFoundryToolInvoker) + assert invoker.resolved_tool is sample_resolved_mcp_tool + # Catalog should not be called when ResolvedFoundryTool is passed + mock_catalog.get.assert_not_called() + + @pytest.mark.asyncio + async def test_resolve_with_foundry_tool_uses_catalog( + self, + mock_catalog, + mock_foundry_tool_client, + mock_user_provider, + sample_hosted_mcp_tool, + sample_resolved_mcp_tool + ): + """Test resolve uses catalog to resolve FoundryTool.""" + mock_catalog.get = AsyncMock(return_value=sample_resolved_mcp_tool) + + resolver = DefaultFoundryToolInvocationResolver( + catalog=mock_catalog, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + invoker = await resolver.resolve(sample_hosted_mcp_tool) + + assert isinstance(invoker, DefaultFoundryToolInvoker) + mock_catalog.get.assert_called_once_with(sample_hosted_mcp_tool) + + @pytest.mark.asyncio + async def test_resolve_with_facade_dict_uses_catalog( + self, + mock_catalog, + mock_foundry_tool_client, + mock_user_provider, + sample_resolved_connected_tool + ): + """Test resolve converts facade dict and uses catalog.""" + mock_catalog.get = AsyncMock(return_value=sample_resolved_connected_tool) + facade = { + "type": "mcp", + "project_connection_id": "test-connection" + } + + resolver = DefaultFoundryToolInvocationResolver( + catalog=mock_catalog, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + invoker = await resolver.resolve(facade) + + assert isinstance(invoker, DefaultFoundryToolInvoker) + mock_catalog.get.assert_called_once() + # Verify the facade was converted to FoundryConnectedTool + call_arg = mock_catalog.get.call_args[0][0] + assert isinstance(call_arg, FoundryConnectedTool) + + @pytest.mark.asyncio + async def test_resolve_raises_error_when_tool_not_found_in_catalog( + self, + mock_catalog, + mock_foundry_tool_client, + mock_user_provider, + sample_hosted_mcp_tool + ): + """Test resolve raises UnableToResolveToolInvocationError when catalog returns None.""" + mock_catalog.get = AsyncMock(return_value=None) + + resolver = DefaultFoundryToolInvocationResolver( + catalog=mock_catalog, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + with pytest.raises(UnableToResolveToolInvocationError) as exc_info: + await resolver.resolve(sample_hosted_mcp_tool) + + assert exc_info.value.tool is sample_hosted_mcp_tool + assert "Unable to resolve tool" in str(exc_info.value) + + @pytest.mark.asyncio + async def test_resolve_with_hosted_mcp_facade( + self, + mock_catalog, + mock_foundry_tool_client, + mock_user_provider, + sample_resolved_mcp_tool + ): + """Test resolve with hosted MCP facade (unknown type becomes FoundryHostedMcpTool).""" + mock_catalog.get = AsyncMock(return_value=sample_resolved_mcp_tool) + facade = { + "type": "custom_mcp_tool", + "config_key": "config_value" + } + + resolver = DefaultFoundryToolInvocationResolver( + catalog=mock_catalog, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + invoker = await resolver.resolve(facade) + + assert isinstance(invoker, DefaultFoundryToolInvoker) + # Verify the facade was converted to FoundryHostedMcpTool + call_arg = mock_catalog.get.call_args[0][0] + assert isinstance(call_arg, FoundryHostedMcpTool) + assert call_arg.name == "custom_mcp_tool" + + @pytest.mark.asyncio + async def test_resolve_returns_invoker_with_correct_agent_name( + self, + mock_catalog, + mock_foundry_tool_client, + mock_user_provider, + sample_resolved_mcp_tool + ): + """Test resolve creates invoker with the correct agent name.""" + resolver = DefaultFoundryToolInvocationResolver( + catalog=mock_catalog, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="custom-agent-name" + ) + + invoker = await resolver.resolve(sample_resolved_mcp_tool) + + # Verify invoker was created with correct agent name by checking internal state + assert invoker._agent_name == "custom-agent-name" + + @pytest.mark.asyncio + async def test_resolve_with_connected_tool_directly( + self, + mock_catalog, + mock_foundry_tool_client, + mock_user_provider, + sample_connected_tool, + sample_resolved_connected_tool + ): + """Test resolve with FoundryConnectedTool directly.""" + mock_catalog.get = AsyncMock(return_value=sample_resolved_connected_tool) + + resolver = DefaultFoundryToolInvocationResolver( + catalog=mock_catalog, + client=mock_foundry_tool_client, + user_provider=mock_user_provider, + agent_name="test-agent" + ) + + invoker = await resolver.resolve(sample_connected_tool) + + assert isinstance(invoker, DefaultFoundryToolInvoker) + mock_catalog.get.assert_called_once_with(sample_connected_tool) diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_runtime.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_runtime.py new file mode 100644 index 000000000000..e42fc29a76cd --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_runtime.py @@ -0,0 +1,283 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for _runtime.py - testing public methods of DefaultFoundryToolRuntime.""" +import os +import pytest +from unittest.mock import AsyncMock, MagicMock, patch + +from azure.ai.agentserver.core.tools.runtime._runtime import DefaultFoundryToolRuntime +from azure.ai.agentserver.core.tools.runtime._catalog import DefaultFoundryToolCatalog +from azure.ai.agentserver.core.tools.runtime._resolver import DefaultFoundryToolInvocationResolver +from azure.ai.agentserver.core.tools.runtime._user import ContextVarUserProvider + + +class TestDefaultFoundryToolRuntimeInit: + """Tests for DefaultFoundryToolRuntime initialization.""" + + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + def test_init_creates_client_with_endpoint_and_credential( + self, + mock_client_class, + mock_credential + ): + """Test initialization creates client with correct endpoint and credential.""" + endpoint = "https://test-project.azure.com" + mock_client_class.return_value = MagicMock() + + runtime = DefaultFoundryToolRuntime( + project_endpoint=endpoint, + credential=mock_credential + ) + + mock_client_class.assert_called_once_with( + endpoint=endpoint, + credential=mock_credential + ) + assert runtime is not None + + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + def test_init_uses_default_user_provider_when_none_provided( + self, + mock_client_class, + mock_credential + ): + """Test initialization uses ContextVarUserProvider when user_provider is None.""" + mock_client_class.return_value = MagicMock() + + runtime = DefaultFoundryToolRuntime( + project_endpoint="https://test.azure.com", + credential=mock_credential + ) + + assert isinstance(runtime._user_provider, ContextVarUserProvider) + + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + def test_init_uses_custom_user_provider( + self, + mock_client_class, + mock_credential, + mock_user_provider + ): + """Test initialization uses custom user provider when provided.""" + mock_client_class.return_value = MagicMock() + + runtime = DefaultFoundryToolRuntime( + project_endpoint="https://test.azure.com", + credential=mock_credential, + user_provider=mock_user_provider + ) + + assert runtime._user_provider is mock_user_provider + + @patch.dict(os.environ, {"AGENT_NAME": "custom-agent"}) + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + def test_init_reads_agent_name_from_environment( + self, + mock_client_class, + mock_credential + ): + """Test initialization reads agent name from environment variable.""" + mock_client_class.return_value = MagicMock() + + runtime = DefaultFoundryToolRuntime( + project_endpoint="https://test.azure.com", + credential=mock_credential + ) + + assert runtime._agent_name == "custom-agent" + + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + def test_init_uses_default_agent_name_when_env_not_set( + self, + mock_client_class, + mock_credential + ): + """Test initialization uses default agent name when env var is not set.""" + mock_client_class.return_value = MagicMock() + + # Ensure AGENT_NAME is not set + env_copy = os.environ.copy() + if "AGENT_NAME" in env_copy: + del env_copy["AGENT_NAME"] + + with patch.dict(os.environ, env_copy, clear=True): + runtime = DefaultFoundryToolRuntime( + project_endpoint="https://test.azure.com", + credential=mock_credential + ) + + assert runtime._agent_name == "$default" + + +class TestDefaultFoundryToolRuntimeCatalog: + """Tests for DefaultFoundryToolRuntime.catalog property.""" + + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + def test_catalog_returns_default_catalog( + self, + mock_client_class, + mock_credential + ): + """Test catalog property returns DefaultFoundryToolCatalog.""" + mock_client_class.return_value = MagicMock() + + runtime = DefaultFoundryToolRuntime( + project_endpoint="https://test.azure.com", + credential=mock_credential + ) + + assert isinstance(runtime.catalog, DefaultFoundryToolCatalog) + + +class TestDefaultFoundryToolRuntimeInvocation: + """Tests for DefaultFoundryToolRuntime.invocation property.""" + + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + def test_invocation_returns_default_resolver( + self, + mock_client_class, + mock_credential + ): + """Test invocation property returns DefaultFoundryToolInvocationResolver.""" + mock_client_class.return_value = MagicMock() + + runtime = DefaultFoundryToolRuntime( + project_endpoint="https://test.azure.com", + credential=mock_credential + ) + + assert isinstance(runtime.invocation, DefaultFoundryToolInvocationResolver) + + +class TestDefaultFoundryToolRuntimeInvoke: + """Tests for DefaultFoundryToolRuntime.invoke method.""" + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + async def test_invoke_resolves_and_invokes_tool( + self, + mock_client_class, + mock_credential, + sample_resolved_mcp_tool + ): + """Test invoke resolves the tool and calls the invoker.""" + mock_client_instance = MagicMock() + mock_client_class.return_value = mock_client_instance + + runtime = DefaultFoundryToolRuntime( + project_endpoint="https://test.azure.com", + credential=mock_credential + ) + + # Mock the invocation resolver + mock_invoker = AsyncMock() + mock_invoker.invoke = AsyncMock(return_value={"result": "success"}) + runtime._invocation.resolve = AsyncMock(return_value=mock_invoker) + + result = await runtime.invoke(sample_resolved_mcp_tool, {"input": "test"}) + + assert result == {"result": "success"} + runtime._invocation.resolve.assert_called_once_with(sample_resolved_mcp_tool) + mock_invoker.invoke.assert_called_once_with({"input": "test"}) + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + async def test_invoke_with_facade_dict( + self, + mock_client_class, + mock_credential + ): + """Test invoke works with facade dictionary.""" + mock_client_instance = MagicMock() + mock_client_class.return_value = mock_client_instance + + runtime = DefaultFoundryToolRuntime( + project_endpoint="https://test.azure.com", + credential=mock_credential + ) + + facade = {"type": "custom_tool", "config": "value"} + + # Mock the invocation resolver + mock_invoker = AsyncMock() + mock_invoker.invoke = AsyncMock(return_value={"output": "done"}) + runtime._invocation.resolve = AsyncMock(return_value=mock_invoker) + + result = await runtime.invoke(facade, {"param": "value"}) + + assert result == {"output": "done"} + runtime._invocation.resolve.assert_called_once_with(facade) + + +class TestDefaultFoundryToolRuntimeContextManager: + """Tests for DefaultFoundryToolRuntime async context manager.""" + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + async def test_aenter_returns_runtime_and_enters_client( + self, + mock_client_class, + mock_credential + ): + """Test __aenter__ enters client and returns runtime.""" + mock_client_instance = AsyncMock() + mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance) + mock_client_instance.__aexit__ = AsyncMock(return_value=None) + mock_client_class.return_value = mock_client_instance + + runtime = DefaultFoundryToolRuntime( + project_endpoint="https://test.azure.com", + credential=mock_credential + ) + + async with runtime as r: + assert r is runtime + mock_client_instance.__aenter__.assert_called_once() + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + async def test_aexit_exits_client( + self, + mock_client_class, + mock_credential + ): + """Test __aexit__ exits client properly.""" + mock_client_instance = AsyncMock() + mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance) + mock_client_instance.__aexit__ = AsyncMock(return_value=None) + mock_client_class.return_value = mock_client_instance + + runtime = DefaultFoundryToolRuntime( + project_endpoint="https://test.azure.com", + credential=mock_credential + ) + + async with runtime: + pass + + mock_client_instance.__aexit__.assert_called_once() + + @pytest.mark.asyncio + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + async def test_aexit_called_on_exception( + self, + mock_client_class, + mock_credential + ): + """Test __aexit__ is called even when exception occurs.""" + mock_client_instance = AsyncMock() + mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance) + mock_client_instance.__aexit__ = AsyncMock(return_value=None) + mock_client_class.return_value = mock_client_instance + + runtime = DefaultFoundryToolRuntime( + project_endpoint="https://test.azure.com", + credential=mock_credential + ) + + with pytest.raises(ValueError): + async with runtime: + raise ValueError("Test error") + + mock_client_instance.__aexit__.assert_called_once() diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_starlette.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_starlette.py new file mode 100644 index 000000000000..d1d72004d011 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_starlette.py @@ -0,0 +1,261 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for _starlette.py - testing public methods of UserInfoContextMiddleware.""" +import pytest +from contextvars import ContextVar +from unittest.mock import AsyncMock, MagicMock + +from azure.ai.agentserver.core.tools.client._models import UserInfo + + +class TestUserInfoContextMiddlewareInstall: + """Tests for UserInfoContextMiddleware.install class method.""" + + def test_install_adds_middleware_to_starlette_app(self): + """Test install adds middleware to Starlette application.""" + # Import here to avoid requiring starlette when not needed + from azure.ai.agentserver.core.tools.runtime._starlette import UserInfoContextMiddleware + + mock_app = MagicMock() + + UserInfoContextMiddleware.install(mock_app) + + mock_app.add_middleware.assert_called_once() + call_args = mock_app.add_middleware.call_args + assert call_args[0][0] == UserInfoContextMiddleware + + def test_install_uses_default_context_when_none_provided(self): + """Test install uses default user context when none is provided.""" + from azure.ai.agentserver.core.tools.runtime._starlette import UserInfoContextMiddleware + from azure.ai.agentserver.core.tools.runtime._user import ContextVarUserProvider + + mock_app = MagicMock() + + UserInfoContextMiddleware.install(mock_app) + + call_kwargs = mock_app.add_middleware.call_args[1] + assert call_kwargs["user_info_var"] is ContextVarUserProvider.default_user_info_context + + def test_install_uses_custom_context(self): + """Test install uses custom user context when provided.""" + from azure.ai.agentserver.core.tools.runtime._starlette import UserInfoContextMiddleware + + mock_app = MagicMock() + custom_context = ContextVar("custom_context") + + UserInfoContextMiddleware.install(mock_app, user_context=custom_context) + + call_kwargs = mock_app.add_middleware.call_args[1] + assert call_kwargs["user_info_var"] is custom_context + + def test_install_uses_custom_resolver(self): + """Test install uses custom user resolver when provided.""" + from azure.ai.agentserver.core.tools.runtime._starlette import UserInfoContextMiddleware + + mock_app = MagicMock() + + async def custom_resolver(request): + return UserInfo(object_id="custom-oid", tenant_id="custom-tid") + + UserInfoContextMiddleware.install(mock_app, user_resolver=custom_resolver) + + call_kwargs = mock_app.add_middleware.call_args[1] + assert call_kwargs["user_resolver"] is custom_resolver + + +class TestUserInfoContextMiddlewareDispatch: + """Tests for UserInfoContextMiddleware.dispatch method.""" + + @pytest.mark.asyncio + async def test_dispatch_sets_user_in_context(self): + """Test dispatch sets user info in context variable.""" + from azure.ai.agentserver.core.tools.runtime._starlette import UserInfoContextMiddleware + + user_context = ContextVar("test_context") + user_info = UserInfo(object_id="test-oid", tenant_id="test-tid") + + async def mock_resolver(request): + return user_info + + # Create a simple mock app + mock_app = AsyncMock() + + middleware = UserInfoContextMiddleware( + app=mock_app, + user_info_var=user_context, + user_resolver=mock_resolver + ) + + mock_request = MagicMock() + captured_user = None + + async def call_next(request): + nonlocal captured_user + captured_user = user_context.get(None) + return MagicMock() + + await middleware.dispatch(mock_request, call_next) + + assert captured_user is user_info + + @pytest.mark.asyncio + async def test_dispatch_resets_context_after_request(self): + """Test dispatch resets context variable after request completes.""" + from azure.ai.agentserver.core.tools.runtime._starlette import UserInfoContextMiddleware + + user_context = ContextVar("test_context") + original_user = UserInfo(object_id="original-oid", tenant_id="original-tid") + user_context.set(original_user) + + new_user = UserInfo(object_id="new-oid", tenant_id="new-tid") + + async def mock_resolver(request): + return new_user + + mock_app = AsyncMock() + + middleware = UserInfoContextMiddleware( + app=mock_app, + user_info_var=user_context, + user_resolver=mock_resolver + ) + + mock_request = MagicMock() + + async def call_next(request): + # During request, should have new_user + assert user_context.get(None) is new_user + return MagicMock() + + await middleware.dispatch(mock_request, call_next) + + # After request, context should be reset to original value + assert user_context.get(None) is original_user + + @pytest.mark.asyncio + async def test_dispatch_resets_context_on_exception(self): + """Test dispatch resets context even when call_next raises exception.""" + from azure.ai.agentserver.core.tools.runtime._starlette import UserInfoContextMiddleware + + user_context = ContextVar("test_context") + original_user = UserInfo(object_id="original-oid", tenant_id="original-tid") + user_context.set(original_user) + + new_user = UserInfo(object_id="new-oid", tenant_id="new-tid") + + async def mock_resolver(request): + return new_user + + mock_app = AsyncMock() + + middleware = UserInfoContextMiddleware( + app=mock_app, + user_info_var=user_context, + user_resolver=mock_resolver + ) + + mock_request = MagicMock() + + async def call_next(request): + raise RuntimeError("Request failed") + + with pytest.raises(RuntimeError, match="Request failed"): + await middleware.dispatch(mock_request, call_next) + + # Context should still be reset to original + assert user_context.get(None) is original_user + + @pytest.mark.asyncio + async def test_dispatch_handles_none_user(self): + """Test dispatch handles None user from resolver.""" + from azure.ai.agentserver.core.tools.runtime._starlette import UserInfoContextMiddleware + + user_context = ContextVar("test_context") + + async def mock_resolver(request): + return None + + mock_app = AsyncMock() + + middleware = UserInfoContextMiddleware( + app=mock_app, + user_info_var=user_context, + user_resolver=mock_resolver + ) + + mock_request = MagicMock() + captured_user = "not_set" + + async def call_next(request): + nonlocal captured_user + captured_user = user_context.get("default") + return MagicMock() + + await middleware.dispatch(mock_request, call_next) + + assert captured_user is None + + @pytest.mark.asyncio + async def test_dispatch_calls_resolver_with_request(self): + """Test dispatch calls user resolver with the request object.""" + from azure.ai.agentserver.core.tools.runtime._starlette import UserInfoContextMiddleware + + user_context = ContextVar("test_context") + captured_request = None + + async def mock_resolver(request): + nonlocal captured_request + captured_request = request + return UserInfo(object_id="oid", tenant_id="tid") + + mock_app = AsyncMock() + + middleware = UserInfoContextMiddleware( + app=mock_app, + user_info_var=user_context, + user_resolver=mock_resolver + ) + + mock_request = MagicMock() + mock_request.url = "https://test.com/api" + + async def call_next(request): + return MagicMock() + + await middleware.dispatch(mock_request, call_next) + + assert captured_request is mock_request + + +class TestUserInfoContextMiddlewareDefaultResolver: + """Tests for UserInfoContextMiddleware default resolver.""" + + @pytest.mark.asyncio + async def test_default_resolver_extracts_user_from_headers(self): + """Test default resolver extracts user info from request headers.""" + from azure.ai.agentserver.core.tools.runtime._starlette import UserInfoContextMiddleware + + mock_request = MagicMock() + mock_request.headers = { + "x-aml-oid": "header-object-id", + "x-aml-tid": "header-tenant-id" + } + + result = await UserInfoContextMiddleware._default_user_resolver(mock_request) + + assert result is not None + assert result.object_id == "header-object-id" + assert result.tenant_id == "header-tenant-id" + + @pytest.mark.asyncio + async def test_default_resolver_returns_none_when_headers_missing(self): + """Test default resolver returns None when required headers are missing.""" + from azure.ai.agentserver.core.tools.runtime._starlette import UserInfoContextMiddleware + + mock_request = MagicMock() + mock_request.headers = {} + + result = await UserInfoContextMiddleware._default_user_resolver(mock_request) + + assert result is None diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_user.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_user.py new file mode 100644 index 000000000000..a909d9e5948a --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_user.py @@ -0,0 +1,210 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for _user.py - testing public methods of ContextVarUserProvider and resolve_user_from_headers.""" +import pytest +from contextvars import ContextVar + +from azure.ai.agentserver.core.tools.runtime._user import ( + ContextVarUserProvider, + resolve_user_from_headers, +) +from azure.ai.agentserver.core.tools.client._models import UserInfo + + +class TestContextVarUserProvider: + """Tests for ContextVarUserProvider public methods.""" + + @pytest.mark.asyncio + async def test_get_user_returns_none_when_context_not_set(self): + """Test get_user returns None when context variable is not set.""" + custom_context = ContextVar("test_user_context") + provider = ContextVarUserProvider(context=custom_context) + + result = await provider.get_user() + + assert result is None + + @pytest.mark.asyncio + async def test_get_user_returns_user_when_context_is_set(self, sample_user_info): + """Test get_user returns UserInfo when context variable is set.""" + custom_context = ContextVar("test_user_context") + custom_context.set(sample_user_info) + provider = ContextVarUserProvider(context=custom_context) + + result = await provider.get_user() + + assert result is sample_user_info + assert result.object_id == "test-object-id" + assert result.tenant_id == "test-tenant-id" + + @pytest.mark.asyncio + async def test_uses_default_context_when_none_provided(self, sample_user_info): + """Test that default context is used when no context is provided.""" + # Set value in default context + ContextVarUserProvider.default_user_info_context.set(sample_user_info) + provider = ContextVarUserProvider() + + result = await provider.get_user() + + assert result is sample_user_info + + @pytest.mark.asyncio + async def test_different_providers_share_same_default_context(self, sample_user_info): + """Test that different providers using default context share the same value.""" + ContextVarUserProvider.default_user_info_context.set(sample_user_info) + provider1 = ContextVarUserProvider() + provider2 = ContextVarUserProvider() + + result1 = await provider1.get_user() + result2 = await provider2.get_user() + + assert result1 is result2 is sample_user_info + + @pytest.mark.asyncio + async def test_custom_context_isolation(self, sample_user_info): + """Test that custom contexts are isolated from each other.""" + context1 = ContextVar("context1") + context2 = ContextVar("context2") + user2 = UserInfo(object_id="other-oid", tenant_id="other-tid") + + context1.set(sample_user_info) + context2.set(user2) + + provider1 = ContextVarUserProvider(context=context1) + provider2 = ContextVarUserProvider(context=context2) + + result1 = await provider1.get_user() + result2 = await provider2.get_user() + + assert result1 is sample_user_info + assert result2 is user2 + assert result1 is not result2 + + +class TestResolveUserFromHeaders: + """Tests for resolve_user_from_headers public function.""" + + def test_returns_user_info_when_both_headers_present(self): + """Test returns UserInfo when both object_id and tenant_id headers are present.""" + headers = { + "x-aml-oid": "user-object-id", + "x-aml-tid": "user-tenant-id" + } + + result = resolve_user_from_headers(headers) + + assert result is not None + assert isinstance(result, UserInfo) + assert result.object_id == "user-object-id" + assert result.tenant_id == "user-tenant-id" + + def test_returns_none_when_object_id_missing(self): + """Test returns None when object_id header is missing.""" + headers = {"x-aml-tid": "user-tenant-id"} + + result = resolve_user_from_headers(headers) + + assert result is None + + def test_returns_none_when_tenant_id_missing(self): + """Test returns None when tenant_id header is missing.""" + headers = {"x-aml-oid": "user-object-id"} + + result = resolve_user_from_headers(headers) + + assert result is None + + def test_returns_none_when_both_headers_missing(self): + """Test returns None when both headers are missing.""" + headers = {} + + result = resolve_user_from_headers(headers) + + assert result is None + + def test_returns_none_when_object_id_is_empty(self): + """Test returns None when object_id is empty string.""" + headers = { + "x-aml-oid": "", + "x-aml-tid": "user-tenant-id" + } + + result = resolve_user_from_headers(headers) + + assert result is None + + def test_returns_none_when_tenant_id_is_empty(self): + """Test returns None when tenant_id is empty string.""" + headers = { + "x-aml-oid": "user-object-id", + "x-aml-tid": "" + } + + result = resolve_user_from_headers(headers) + + assert result is None + + def test_custom_header_names(self): + """Test using custom header names for object_id and tenant_id.""" + headers = { + "custom-oid-header": "custom-object-id", + "custom-tid-header": "custom-tenant-id" + } + + result = resolve_user_from_headers( + headers, + object_id_header="custom-oid-header", + tenant_id_header="custom-tid-header" + ) + + assert result is not None + assert result.object_id == "custom-object-id" + assert result.tenant_id == "custom-tenant-id" + + def test_default_headers_not_matched_with_custom_headers(self): + """Test that default headers are not matched when custom headers are specified.""" + headers = { + "x-aml-oid": "default-object-id", + "x-aml-tid": "default-tenant-id" + } + + result = resolve_user_from_headers( + headers, + object_id_header="custom-oid", + tenant_id_header="custom-tid" + ) + + assert result is None + + def test_case_sensitive_header_matching(self): + """Test that header matching is case-sensitive.""" + headers = { + "X-AML-OID": "user-object-id", + "X-AML-TID": "user-tenant-id" + } + + # Default headers are lowercase, so these should not match + result = resolve_user_from_headers(headers) + + assert result is None + + def test_with_mapping_like_object(self): + """Test with a mapping-like object that supports .get().""" + class HeadersMapping: + def __init__(self, data): + self._data = data + + def get(self, key, default=""): + return self._data.get(key, default) + + headers = HeadersMapping({ + "x-aml-oid": "mapping-object-id", + "x-aml-tid": "mapping-tenant-id" + }) + + result = resolve_user_from_headers(headers) + + assert result is not None + assert result.object_id == "mapping-object-id" + assert result.tenant_id == "mapping-tenant-id" diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/__init__.py new file mode 100644 index 000000000000..2d7503de198d --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Utils unit tests package.""" diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/conftest.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/conftest.py new file mode 100644 index 000000000000..abd2f5145c29 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/conftest.py @@ -0,0 +1,56 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Shared fixtures for utils unit tests. + +Common fixtures are inherited from the parent conftest.py automatically by pytest. +""" +from typing import Optional + +from azure.ai.agentserver.core.tools.client._models import ( + FoundryConnectedTool, + FoundryHostedMcpTool, + FoundryToolDetails, + ResolvedFoundryTool, + SchemaDefinition, + SchemaType, +) + + +def create_resolved_tool_with_name( + name: str, + tool_type: str = "mcp", + connection_id: Optional[str] = None +) -> ResolvedFoundryTool: + """Helper to create a ResolvedFoundryTool with a specific name. + + :param name: The name for the tool details. + :param tool_type: Either "mcp" or "connected". + :param connection_id: Connection ID for connected tools. If provided with tool_type="mcp", + will automatically use "connected" type to ensure unique tool IDs. + :return: A ResolvedFoundryTool instance. + """ + schema = SchemaDefinition( + type=SchemaType.OBJECT, + properties={}, + required=set() + ) + details = FoundryToolDetails( + name=name, + description=f"Tool named {name}", + input_schema=schema + ) + + # If connection_id is provided, use connected tool to ensure unique IDs + if connection_id is not None or tool_type == "connected": + definition = FoundryConnectedTool( + protocol="mcp", + project_connection_id=connection_id or f"conn-{name}" + ) + else: + definition = FoundryHostedMcpTool( + name=f"mcp-{name}", + configuration={} + ) + + return ResolvedFoundryTool(definition=definition, details=details) diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/test_name_resolver.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/test_name_resolver.py new file mode 100644 index 000000000000..14340799253b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/test_name_resolver.py @@ -0,0 +1,260 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for _name_resolver.py - testing public methods of ToolNameResolver.""" +from azure.ai.agentserver.core.tools.utils import ToolNameResolver +from azure.ai.agentserver.core.tools.client._models import ( + FoundryConnectedTool, + FoundryHostedMcpTool, + FoundryToolDetails, + ResolvedFoundryTool, +) + +from .conftest import create_resolved_tool_with_name + + +class TestToolNameResolverResolve: + """Tests for ToolNameResolver.resolve method.""" + + def test_resolve_returns_tool_name_for_first_occurrence( + self, + sample_resolved_mcp_tool + ): + """Test resolve returns the original tool name for first occurrence.""" + resolver = ToolNameResolver() + + result = resolver.resolve(sample_resolved_mcp_tool) + + assert result == sample_resolved_mcp_tool.details.name + + def test_resolve_returns_same_name_for_same_tool( + self, + sample_resolved_mcp_tool + ): + """Test resolve returns the same name when called multiple times for same tool.""" + resolver = ToolNameResolver() + + result1 = resolver.resolve(sample_resolved_mcp_tool) + result2 = resolver.resolve(sample_resolved_mcp_tool) + result3 = resolver.resolve(sample_resolved_mcp_tool) + + assert result1 == result2 == result3 + assert result1 == sample_resolved_mcp_tool.details.name + + def test_resolve_appends_count_for_duplicate_names(self): + """Test resolve appends count for tools with duplicate names.""" + resolver = ToolNameResolver() + + tool1 = create_resolved_tool_with_name("my_tool", connection_id="conn-1") + tool2 = create_resolved_tool_with_name("my_tool", connection_id="conn-2") + tool3 = create_resolved_tool_with_name("my_tool", connection_id="conn-3") + + result1 = resolver.resolve(tool1) + result2 = resolver.resolve(tool2) + result3 = resolver.resolve(tool3) + + assert result1 == "my_tool" + assert result2 == "my_tool_1" + assert result3 == "my_tool_2" + + def test_resolve_handles_multiple_unique_names(self): + """Test resolve handles multiple tools with unique names.""" + resolver = ToolNameResolver() + + tool1 = create_resolved_tool_with_name("tool_alpha") + tool2 = create_resolved_tool_with_name("tool_beta") + tool3 = create_resolved_tool_with_name("tool_gamma") + + result1 = resolver.resolve(tool1) + result2 = resolver.resolve(tool2) + result3 = resolver.resolve(tool3) + + assert result1 == "tool_alpha" + assert result2 == "tool_beta" + assert result3 == "tool_gamma" + + def test_resolve_mixed_unique_and_duplicate_names(self): + """Test resolve handles a mix of unique and duplicate names.""" + resolver = ToolNameResolver() + + tool1 = create_resolved_tool_with_name("shared_name", connection_id="conn-1") + tool2 = create_resolved_tool_with_name("unique_name") + tool3 = create_resolved_tool_with_name("shared_name", connection_id="conn-2") + tool4 = create_resolved_tool_with_name("another_unique") + tool5 = create_resolved_tool_with_name("shared_name", connection_id="conn-3") + + assert resolver.resolve(tool1) == "shared_name" + assert resolver.resolve(tool2) == "unique_name" + assert resolver.resolve(tool3) == "shared_name_1" + assert resolver.resolve(tool4) == "another_unique" + assert resolver.resolve(tool5) == "shared_name_2" + + def test_resolve_returns_cached_name_after_duplicate_added(self): + """Test that resolving a tool again returns cached name even after duplicates are added.""" + resolver = ToolNameResolver() + + tool1 = create_resolved_tool_with_name("my_tool", connection_id="conn-1") + tool2 = create_resolved_tool_with_name("my_tool", connection_id="conn-2") + + # First resolution + first_result = resolver.resolve(tool1) + assert first_result == "my_tool" + + # Add duplicate + dup_result = resolver.resolve(tool2) + assert dup_result == "my_tool_1" + + # Resolve original again - should return cached value + second_result = resolver.resolve(tool1) + assert second_result == "my_tool" + + def test_resolve_with_connected_tool( + self, + sample_resolved_connected_tool + ): + """Test resolve works with connected tools.""" + resolver = ToolNameResolver() + + result = resolver.resolve(sample_resolved_connected_tool) + + assert result == sample_resolved_connected_tool.details.name + + def test_resolve_different_tools_same_details_name(self, sample_schema_definition): + """Test resolve handles different tool definitions with same details name.""" + resolver = ToolNameResolver() + + details = FoundryToolDetails( + name="shared_function", + description="A shared function", + input_schema=sample_schema_definition + ) + + mcp_def = FoundryHostedMcpTool(name="mcp_server", configuration={}) + connected_def = FoundryConnectedTool(protocol="mcp", project_connection_id="my-conn") + + tool1 = ResolvedFoundryTool(definition=mcp_def, details=details) + tool2 = ResolvedFoundryTool(definition=connected_def, details=details) + + result1 = resolver.resolve(tool1) + result2 = resolver.resolve(tool2) + + assert result1 == "shared_function" + assert result2 == "shared_function_1" + + def test_resolve_empty_name(self): + """Test resolve handles tools with empty name.""" + resolver = ToolNameResolver() + + tool = create_resolved_tool_with_name("") + + result = resolver.resolve(tool) + + assert result == "" + + def test_resolve_special_characters_in_name(self): + """Test resolve handles tools with special characters in name.""" + resolver = ToolNameResolver() + + tool1 = create_resolved_tool_with_name("my-tool_v1.0", connection_id="conn-1") + tool2 = create_resolved_tool_with_name("my-tool_v1.0", connection_id="conn-2") + + result1 = resolver.resolve(tool1) + result2 = resolver.resolve(tool2) + + assert result1 == "my-tool_v1.0" + assert result2 == "my-tool_v1.0_1" + + def test_independent_resolver_instances(self): + """Test that different resolver instances maintain independent state.""" + resolver1 = ToolNameResolver() + resolver2 = ToolNameResolver() + + tool1 = create_resolved_tool_with_name("tool_name", connection_id="conn-1") + tool2 = create_resolved_tool_with_name("tool_name", connection_id="conn-2") + + # Both resolvers resolve tool1 first + assert resolver1.resolve(tool1) == "tool_name" + assert resolver2.resolve(tool1) == "tool_name" + + # resolver1 resolves tool2 as duplicate + assert resolver1.resolve(tool2) == "tool_name_1" + + # resolver2 has not seen tool2 yet in its context + # but tool2 has same name, so it should be duplicate + assert resolver2.resolve(tool2) == "tool_name_1" + + def test_resolve_many_duplicates(self): + """Test resolve handles many tools with the same name.""" + resolver = ToolNameResolver() + + tools = [ + create_resolved_tool_with_name("common_name", connection_id=f"conn-{i}") + for i in range(10) + ] + + results = [resolver.resolve(tool) for tool in tools] + + expected = ["common_name"] + [f"common_name_{i}" for i in range(1, 10)] + assert results == expected + + def test_resolve_uses_tool_id_for_caching(self, sample_schema_definition): + """Test that resolve uses tool.id for caching, not just name.""" + resolver = ToolNameResolver() + + # Create two tools with same definition but different details names + definition = FoundryHostedMcpTool(name="same_definition", configuration={}) + + details1 = FoundryToolDetails( + name="function_a", + description="Function A", + input_schema=sample_schema_definition + ) + details2 = FoundryToolDetails( + name="function_b", + description="Function B", + input_schema=sample_schema_definition + ) + + tool1 = ResolvedFoundryTool(definition=definition, details=details1) + tool2 = ResolvedFoundryTool(definition=definition, details=details2) + + result1 = resolver.resolve(tool1) + result2 = resolver.resolve(tool2) + + # Both should get their respective names since they have different tool.id + assert result1 == "function_a" + assert result2 == "function_b" + + def test_resolve_idempotent_for_same_tool_id(self, sample_schema_definition): + """Test that resolve is idempotent for the same tool id.""" + resolver = ToolNameResolver() + + definition = FoundryHostedMcpTool(name="my_mcp", configuration={}) + details = FoundryToolDetails( + name="my_function", + description="My function", + input_schema=sample_schema_definition + ) + tool = ResolvedFoundryTool(definition=definition, details=details) + + # Call resolve many times + results = [resolver.resolve(tool) for _ in range(5)] + + # All should return the same name + assert all(r == "my_function" for r in results) + + def test_resolve_interleaved_tool_resolutions(self): + """Test resolve with interleaved resolutions of different tools.""" + resolver = ToolNameResolver() + + toolA_1 = create_resolved_tool_with_name("A", connection_id="A-1") + toolA_2 = create_resolved_tool_with_name("A", connection_id="A-2") + toolB_1 = create_resolved_tool_with_name("B", connection_id="B-1") + toolA_3 = create_resolved_tool_with_name("A", connection_id="A-3") + toolB_2 = create_resolved_tool_with_name("B", connection_id="B-2") + + assert resolver.resolve(toolA_1) == "A" + assert resolver.resolve(toolB_1) == "B" + assert resolver.resolve(toolA_2) == "A_1" + assert resolver.resolve(toolA_3) == "A_2" + assert resolver.resolve(toolB_2) == "B_1" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py index 81f0e0f0b545..89be24921f54 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py @@ -1,11 +1,13 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +import sys from dataclasses import dataclass from typing import Optional, Union +from langchain_core.runnables import RunnableConfig from langgraph.prebuilt import ToolRuntime -from langgraph.runtime import Runtime +from langgraph.runtime import Runtime, get_runtime from azure.ai.agentserver.core import AgentRunContext from .tools._context import FoundryToolContext @@ -17,6 +19,42 @@ class LanggraphRunContext: tools: FoundryToolContext + def attach_to_config(self, config: RunnableConfig): + config["configurable"]["__foundry_hosted_agent_langgraph_run_context__"] = self + + @classmethod + def resolve(cls, + config: Optional[RunnableConfig] = None, + runtime: Optional[Union[Runtime, ToolRuntime]] = None) -> Optional["LanggraphRunContext"]: + """Resolve the LanggraphRunContext from either a RunnableConfig or a Runtime. + + :param config: Optional RunnableConfig to extract the context from. + :param runtime: Optional Runtime or ToolRuntime to extract the context from. + :return: An instance of LanggraphRunContext if found, otherwise None. + """ + context: Optional["LanggraphRunContext"] = None + if config: + context = cls.from_config(config) + if not context and (r := cls._resolve_runtime(runtime)): + context = cls.from_runtime(r) + return context + + @staticmethod + def _resolve_runtime( + runtime: Optional[Union[Runtime, ToolRuntime]] = None) -> Optional[Union[Runtime, ToolRuntime]]: + if runtime: + return runtime + if sys.version_info >= (3, 11): + return get_runtime(LanggraphRunContext) + return None + + @staticmethod + def from_config(config: RunnableConfig) -> Optional["LanggraphRunContext"]: + context = config["configurable"].get("__foundry_hosted_agent_langgraph_run_context__") + if isinstance(context, LanggraphRunContext): + return context + return None + @staticmethod def from_runtime(runtime: Union[Runtime, ToolRuntime]) -> Optional["LanggraphRunContext"]: context = runtime.context diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index e8e524764db2..aae3bc32ee35 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -68,7 +68,7 @@ async def agent_run(self, context: AgentRunContext): try: lg_run_context = await self.setup_lg_run_context(context) input_arguments = await self.converter.convert_request(lg_run_context) - self.ensure_runnable_config(input_arguments) + self.ensure_runnable_config(input_arguments, lg_run_context) if not context.stream: response = await self.agent_run_non_stream(input_arguments) @@ -156,17 +156,20 @@ async def agent_run_astream(self, logger.error(f"Error during streaming agent run: {e}", exc_info=True) raise e - def ensure_runnable_config(self, input_arguments: GraphInputArguments): + def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: LanggraphRunContext): """ Ensure the RunnableConfig is set in the input arguments. :param input_arguments: The input arguments for the agent run. :type input_arguments: GraphInputArguments + :param context: The Langgraph run context. + :type context: LanggraphRunContext """ config = input_arguments.get("config", {}) configurable = config.get("configurable", {}) configurable["thread_id"] = input_arguments["context"].agent_run.conversation_id config["configurable"] = configurable + context.attach_to_config(config) callbacks = config.get("callbacks", []) if self.azure_ai_tracer and self.azure_ai_tracer not in callbacks: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py index 828a8b42ae45..0ea9a2da80f2 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py @@ -5,7 +5,7 @@ from langchain_core.language_models import BaseChatModel -from azure.ai.agentserver.core.tools import FoundryToolLike +from azure.ai.agentserver.core.tools import FoundryToolLike, ensure_foundry_tool from ._chat_model import FoundryToolLateBindingChatModel from ._middleware import FoundryToolBindingMiddleware from ._resolver import get_registry @@ -54,7 +54,10 @@ def use_foundry_tools( # pylint: disable=C4743 if isinstance(model_or_tools, BaseChatModel): if tools is None: raise ValueError("Tools must be provided when a model is given.") - get_registry().extend(tools) - return FoundryToolLateBindingChatModel(model_or_tools, foundry_tools=tools) - get_registry().extend(model_or_tools) - return FoundryToolBindingMiddleware(model_or_tools) + foundry_tools = [ensure_foundry_tool(tool) for tool in tools] + get_registry().extend(foundry_tools) + return FoundryToolLateBindingChatModel(model_or_tools, runtime=None, foundry_tools=foundry_tools) + + foundry_tools = [ensure_foundry_tool(tool) for tool in model_or_tools] + get_registry().extend(foundry_tools) + return FoundryToolBindingMiddleware(foundry_tools) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py index c221910218f4..4ca422b88c41 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py @@ -30,7 +30,7 @@ class FoundryToolLateBindingChatModel(BaseChatModel): :type foundry_tools: List[FoundryToolLike] """ - def __init__(self, delegate: BaseChatModel, runtime: Runtime, foundry_tools: List[FoundryToolLike]): + def __init__(self, delegate: BaseChatModel, runtime: Optional[Runtime], foundry_tools: List[FoundryToolLike]): super().__init__() self._delegate = delegate self._runtime = runtime @@ -88,12 +88,17 @@ def bind_tools(self, # pylint: disable=C4758 return self - def _bound_delegate_for_call(self) -> Runnable[LanguageModelInput, AIMessage]: + def _bound_delegate_for_call(self, config: Optional[RunnableConfig]) -> Runnable[LanguageModelInput, AIMessage]: from .._context import LanggraphRunContext foundry_tools: Iterable[BaseTool] = [] - if (context := LanggraphRunContext.from_runtime(self._runtime)) is not None: + if context := LanggraphRunContext.resolve(config, self._runtime): foundry_tools = context.tools.resolved_tools.get(self._foundry_tools_to_bind) + elif self._foundry_tools_to_bind: + raise RuntimeError("Unable to resolve foundry tools from context, " + "if you are running in python < 3.11, " + "make sure you are passing RunnableConfig when calling model.") + all_tools = self._bound_tools.copy() all_tools.extend(foundry_tools) @@ -104,16 +109,16 @@ def _bound_delegate_for_call(self) -> Runnable[LanguageModelInput, AIMessage]: return self._delegate.bind_tools(all_tools, **bound_kwargs) def invoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: - return self._bound_delegate_for_call().invoke(input, config=config, **kwargs) + return self._bound_delegate_for_call(config).invoke(input, config=config, **kwargs) async def ainvoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: - return await self._bound_delegate_for_call().ainvoke(input, config=config, **kwargs) + return await self._bound_delegate_for_call(config).ainvoke(input, config=config, **kwargs) def stream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): - yield from self._bound_delegate_for_call().stream(input, config=config, **kwargs) + yield from self._bound_delegate_for_call(config).stream(input, config=config, **kwargs) async def astream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): - async for x in self._bound_delegate_for_call().astream(input, config=config, **kwargs): + async for x in self._bound_delegate_for_call(config).astream(input, config=config, **kwargs): yield x @property diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py index 5f3c6326836b..1bfef8c39f81 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py @@ -78,7 +78,7 @@ def _maybe_calling_foundry_tool(self, request: ToolCallRequest) -> ToolCallReque if (request.tool or not self._allowed_foundry_tools - or (context := LanggraphRunContext.from_runtime(request.runtime)) is None): + or not (context := LanggraphRunContext.resolve(runtime=request.runtime))): # tool is already resolved return request diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_agent_tool.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_agent_tool.py new file mode 100644 index 000000000000..c4992ba71f46 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_agent_tool.py @@ -0,0 +1,104 @@ +import os + +from dotenv import load_dotenv +from langchain.chat_models import init_chat_model +from langchain_core.messages import SystemMessage, ToolMessage +from langchain_core.runnables import RunnableConfig +from langchain_core.tools import tool +from langgraph.graph import ( + END, + START, + MessagesState, + StateGraph, +) +from typing_extensions import Literal +from azure.identity import DefaultAzureCredential, get_bearer_token_provider + +from azure.ai.agentserver.langgraph import from_langgraph +from azure.ai.agentserver.langgraph.tools import use_foundry_tools + +load_dotenv() + +deployment_name = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "gpt-4o") +credential = DefaultAzureCredential() +token_provider = get_bearer_token_provider( + credential, "https://cognitiveservices.azure.com/.default" +) +llm = init_chat_model( + f"azure_openai:{deployment_name}", + azure_ad_token_provider=token_provider, +) +llm_with_foundry_tools = use_foundry_tools(llm, [ + { + # use the python tool to calculate what is 4 * 3.82. and then find its square root and then find the square root of that result + "type": "code_interpreter" + }, + { + # Give me the Azure CLI commands to create an Azure Container App with a managed identity. search Microsoft Learn + "type": "mcp", + "project_connection_id": "MicrosoftLearn" + }, + # { + # "type": "mcp", + # "project_connection_id": "FoundryMCPServerpreview" + # } +]) + + +# Nodes +async def llm_call(state: MessagesState, config: RunnableConfig): + """LLM decides whether to call a tool or not""" + + return { + "messages": [ + await llm_with_foundry_tools.ainvoke( + [ + SystemMessage( + content="You are a helpful assistant tasked with performing arithmetic on a set of inputs." + ) + ] + + state["messages"], + config=config, + ) + ] + } + + +# Conditional edge function to route to the tool node or end based upon whether the LLM made a tool call +def should_continue(state: MessagesState) -> Literal["environment", END]: + """Decide if we should continue the loop or stop based upon whether the LLM made a tool call""" + + messages = state["messages"] + last_message = messages[-1] + # If the LLM makes a tool call, then perform an action + if last_message.tool_calls: + return "Action" + # Otherwise, we stop (reply to the user) + return END + + +# Build workflow +agent_builder = StateGraph(MessagesState) + +# Add nodes +agent_builder.add_node("llm_call", llm_call) +agent_builder.add_node("environment", llm_with_foundry_tools.tool_node) + +# Add edges to connect nodes +agent_builder.add_edge(START, "llm_call") +agent_builder.add_conditional_edges( + "llm_call", + should_continue, + { + "Action": "environment", + END: END, + }, +) +agent_builder.add_edge("environment", "llm_call") + +# Compile the agent +agent = agent_builder.compile() + +if __name__ == "__main__": + adapter = from_langgraph(agent) + adapter.run() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/__init__.py deleted file mode 100644 index 4a5d26360bce..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Unit tests package diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/__init__.py new file mode 100644 index 000000000000..28077537d94b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/__init__.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/conftest.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/conftest.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/conftest.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/conftest.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/test_langgraph_request_converter.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/test_langgraph_request_converter.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/__init__.py new file mode 100644 index 000000000000..28077537d94b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/__init__.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/conftest.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/conftest.py new file mode 100644 index 000000000000..7efc298559c1 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/conftest.py @@ -0,0 +1,271 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Shared fixtures for langgraph tools unit tests.""" +from typing import Any, Dict, List, Optional + +import pytest +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.language_models import BaseChatModel +from langchain_core.messages import AIMessage +from langchain_core.outputs import ChatGeneration, ChatResult +from langchain_core.runnables import RunnableConfig +from langchain_core.tools import BaseTool, tool + +from azure.ai.agentserver.core.tools import ( + FoundryHostedMcpTool, + FoundryConnectedTool, + FoundryToolDetails, + ResolvedFoundryTool, + SchemaDefinition, + SchemaProperty, + SchemaType, +) +from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from azure.ai.agentserver.langgraph._context import LanggraphRunContext +from azure.ai.agentserver.langgraph.tools._context import FoundryToolContext +from azure.ai.agentserver.langgraph.tools._resolver import ResolvedTools + + +class FakeChatModel(BaseChatModel): + """A fake chat model for testing purposes that returns pre-configured responses.""" + + responses: List[AIMessage] = [] + tool_calls_list: List[List[Dict[str, Any]]] = [] + _call_count: int = 0 + _bound_tools: List[Any] = [] + _bound_kwargs: Dict[str, Any] = {} + + def __init__( + self, + responses: Optional[List[AIMessage]] = None, + tool_calls: Optional[List[List[Dict[str, Any]]]] = None, + **kwargs: Any, + ): + """Initialize the fake chat model. + + :param responses: List of AIMessage responses to return in sequence. + :param tool_calls: List of tool_calls lists corresponding to each response. + """ + super().__init__(**kwargs) + self.responses = responses or [] + self.tool_calls_list = tool_calls or [] + self._call_count = 0 + self._bound_tools = [] + self._bound_kwargs = {} + + @property + def _llm_type(self) -> str: + return "fake_chat_model" + + def _generate( + self, + messages: List[Any], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + """Generate a response.""" + response = self._get_next_response() + return ChatResult(generations=[ChatGeneration(message=response)]) + + def bind_tools( + self, + tools: List[Any], + **kwargs: Any, + ) -> "FakeChatModel": + """Bind tools to this model.""" + self._bound_tools = list(tools) + self._bound_kwargs.update(kwargs) + return self + + def invoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> AIMessage: + """Synchronously invoke the model.""" + return self._get_next_response() + + async def ainvoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> AIMessage: + """Asynchronously invoke the model.""" + return self._get_next_response() + + def stream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): + """Stream the response.""" + yield self._get_next_response() + + async def astream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): + """Async stream the response.""" + yield self._get_next_response() + + def _get_next_response(self) -> AIMessage: + """Get the next response in sequence.""" + if self._call_count < len(self.responses): + response = self.responses[self._call_count] + else: + # Default response if no more configured + response = AIMessage(content="Default response") + + # Apply tool calls if configured + if self._call_count < len(self.tool_calls_list): + response = AIMessage( + content=response.content, + tool_calls=self.tool_calls_list[self._call_count], + ) + + self._call_count += 1 + return response + + +@pytest.fixture +def sample_schema_definition() -> SchemaDefinition: + """Create a sample SchemaDefinition.""" + return SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "query": SchemaProperty(type=SchemaType.STRING, description="Search query"), + }, + required={"query"}, + ) + + +@pytest.fixture +def sample_code_interpreter_tool() -> FoundryHostedMcpTool: + """Create a sample code interpreter tool definition.""" + return FoundryHostedMcpTool( + name="code_interpreter", + configuration={}, + ) + + +@pytest.fixture +def sample_mcp_connected_tool() -> FoundryConnectedTool: + """Create a sample MCP connected tool definition.""" + return FoundryConnectedTool( + protocol="mcp", + project_connection_id="MicrosoftLearn", + ) + + +@pytest.fixture +def sample_tool_details(sample_schema_definition: SchemaDefinition) -> FoundryToolDetails: + """Create a sample FoundryToolDetails.""" + return FoundryToolDetails( + name="search", + description="Search for documents", + input_schema=sample_schema_definition, + ) + + +@pytest.fixture +def sample_resolved_tool( + sample_code_interpreter_tool: FoundryHostedMcpTool, + sample_tool_details: FoundryToolDetails, +) -> ResolvedFoundryTool: + """Create a sample resolved foundry tool.""" + return ResolvedFoundryTool( + definition=sample_code_interpreter_tool, + details=sample_tool_details, + ) + + +@pytest.fixture +def mock_langchain_tool() -> BaseTool: + """Create a mock LangChain BaseTool.""" + @tool + def mock_tool(query: str) -> str: + """Mock tool for testing. + + :param query: The search query. + :return: Mock result. + """ + return f"Mock result for: {query}" + + return mock_tool + + +@pytest.fixture +def mock_async_langchain_tool() -> BaseTool: + """Create a mock async LangChain BaseTool.""" + @tool + async def mock_async_tool(query: str) -> str: + """Mock async tool for testing. + + :param query: The search query. + :return: Mock result. + """ + return f"Async mock result for: {query}" + + return mock_async_tool + + +@pytest.fixture +def sample_resolved_tools( + sample_code_interpreter_tool: FoundryHostedMcpTool, + mock_langchain_tool: BaseTool, +) -> ResolvedTools: + """Create a sample ResolvedTools instance.""" + resolved_foundry_tool = ResolvedFoundryTool( + definition=sample_code_interpreter_tool, + details=FoundryToolDetails( + name="mock_tool", + description="Mock tool for testing", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "query": SchemaProperty(type=SchemaType.STRING, description="Query"), + }, + required={"query"}, + ), + ), + ) + return ResolvedTools(tools=[(resolved_foundry_tool, mock_langchain_tool)]) + + +@pytest.fixture +def mock_agent_run_context() -> AgentRunContext: + """Create a mock AgentRunContext.""" + payload = { + "input": [{"role": "user", "content": "Hello"}], + "stream": False, + } + return AgentRunContext(payload=payload) + + +@pytest.fixture +def mock_foundry_tool_context(sample_resolved_tools: ResolvedTools) -> FoundryToolContext: + """Create a mock FoundryToolContext.""" + return FoundryToolContext(resolved_tools=sample_resolved_tools) + + +@pytest.fixture +def mock_langgraph_run_context( + mock_agent_run_context: AgentRunContext, + mock_foundry_tool_context: FoundryToolContext, +) -> LanggraphRunContext: + """Create a mock LanggraphRunContext.""" + return LanggraphRunContext( + agent_run=mock_agent_run_context, + tools=mock_foundry_tool_context, + ) + + +@pytest.fixture +def fake_chat_model_simple() -> FakeChatModel: + """Create a simple fake chat model.""" + return FakeChatModel( + responses=[AIMessage(content="Hello! How can I help you?")], + ) + + +@pytest.fixture +def fake_chat_model_with_tool_call() -> FakeChatModel: + """Create a fake chat model that makes a tool call.""" + return FakeChatModel( + responses=[ + AIMessage(content=""), # First response: tool call + AIMessage(content="The answer is 42."), # Second response: final answer + ], + tool_calls=[ + [{"id": "call_1", "name": "mock_tool", "args": {"query": "test query"}}], + [], # No tool calls in final response + ], + ) + diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_agent_integration.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_agent_integration.py new file mode 100644 index 000000000000..fab1955ef415 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_agent_integration.py @@ -0,0 +1,404 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Integration-style unit tests for langgraph agents with foundry tools. + +These tests demonstrate the usage patterns similar to the tool_client_example samples, +but use mocked models and tools to avoid calling real services. +""" +import pytest + +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage +from langchain_core.runnables import RunnableConfig +from langchain_core.tools import BaseTool, tool +from langgraph.graph import END, START, MessagesState, StateGraph +from langgraph.prebuilt import ToolNode +from typing_extensions import Literal + +from azure.ai.agentserver.core.tools import ( + FoundryHostedMcpTool, + FoundryToolDetails, + ResolvedFoundryTool, + SchemaDefinition, + SchemaProperty, + SchemaType, +) +from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from azure.ai.agentserver.langgraph._context import LanggraphRunContext +from azure.ai.agentserver.langgraph.tools import use_foundry_tools +from azure.ai.agentserver.langgraph.tools._context import FoundryToolContext +from azure.ai.agentserver.langgraph.tools._chat_model import FoundryToolLateBindingChatModel +from azure.ai.agentserver.langgraph.tools._middleware import FoundryToolBindingMiddleware +from azure.ai.agentserver.langgraph.tools._resolver import ResolvedTools, get_registry + +from .conftest import FakeChatModel + + +@pytest.fixture(autouse=True) +def clear_registry(): + """Clear the global registry before and after each test.""" + registry = get_registry() + registry.clear() + yield + registry.clear() + + +@pytest.mark.unit +class TestGraphAgentWithFoundryTools: + """Tests demonstrating graph agent usage patterns similar to graph_agent_tool.py sample.""" + + def _create_mock_langgraph_context( + self, + foundry_tool: FoundryHostedMcpTool, + langchain_tool: BaseTool, + ) -> LanggraphRunContext: + """Create a mock LanggraphRunContext with resolved tools.""" + # Create resolved foundry tool + resolved_foundry_tool = ResolvedFoundryTool( + definition=foundry_tool, + details=FoundryToolDetails( + name=langchain_tool.name, + description=langchain_tool.description or "Mock tool", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "query": SchemaProperty(type=SchemaType.STRING, description="Query"), + }, + required={"query"}, + ), + ), + ) + + # Create resolved tools + resolved_tools = ResolvedTools(tools=[(resolved_foundry_tool, langchain_tool)]) + + # Create context + payload = {"input": [{"role": "user", "content": "test"}], "stream": False} + agent_run_context = AgentRunContext(payload=payload) + tool_context = FoundryToolContext(resolved_tools=resolved_tools) + + return LanggraphRunContext(agent_run=agent_run_context, tools=tool_context) + + @pytest.mark.asyncio + async def test_graph_agent_with_foundry_tools_no_tool_call(self): + """Test a graph agent that uses foundry tools but doesn't make a tool call.""" + # Create a mock tool + @tool + def calculate(expression: str) -> str: + """Calculate a mathematical expression. + + :param expression: The expression to calculate. + :return: The result. + """ + return "42" + + # Create foundry tool definition + foundry_tool = FoundryHostedMcpTool(name="code_interpreter", configuration={}) + foundry_tools = [{"type": "code_interpreter"}] + + # Create mock model that returns simple response (no tool call) + mock_model = FakeChatModel( + responses=[AIMessage(content="The answer is 42.")], + ) + + # Create the foundry tool binding chat model + llm_with_foundry_tools = FoundryToolLateBindingChatModel( + delegate=mock_model, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + # Create context and attach + context = self._create_mock_langgraph_context(foundry_tool, calculate) + config: RunnableConfig = {"configurable": {}} + context.attach_to_config(config) + + # Define the LLM call node + async def llm_call(state: MessagesState, config: RunnableConfig): + return { + "messages": [ + await llm_with_foundry_tools.ainvoke( + [SystemMessage(content="You are a helpful assistant.")] + + state["messages"], + config=config, + ) + ] + } + + # Define routing function + def should_continue(state: MessagesState) -> Literal["tools", "__end__"]: + messages = state["messages"] + last_message = messages[-1] + if hasattr(last_message, 'tool_calls') and last_message.tool_calls: + return "tools" + return END + + # Build the graph + builder = StateGraph(MessagesState) + builder.add_node("llm_call", llm_call) + builder.add_node("tools", llm_with_foundry_tools.tool_node) + builder.add_edge(START, "llm_call") + builder.add_conditional_edges("llm_call", should_continue, {"tools": "tools", END: END}) + builder.add_edge("tools", "llm_call") + + graph = builder.compile() + + # Run the graph + result = await graph.ainvoke( + {"messages": [HumanMessage(content="What is 6 * 7?")]}, + config=config, + ) + + # Verify result + assert len(result["messages"]) == 2 # HumanMessage + AIMessage + assert result["messages"][-1].content == "The answer is 42." + + @pytest.mark.asyncio + async def test_graph_agent_with_tool_call(self): + """Test a graph agent that makes a tool call.""" + # Create a mock tool + @tool + def calculate(expression: str) -> str: + """Calculate a mathematical expression. + + :param expression: The expression to calculate. + :return: The result. + """ + return "42" + + # Create foundry tool definition + foundry_tool = FoundryHostedMcpTool(name="code_interpreter", configuration={}) + foundry_tools = [{"type": "code_interpreter"}] + + # Create mock model that makes a tool call, then returns final answer + mock_model = FakeChatModel( + responses=[ + AIMessage( + content="", + tool_calls=[{"id": "call_1", "name": "calculate", "args": {"expression": "6 * 7"}}], + ), + AIMessage(content="The answer is 42."), + ], + ) + + # Create the foundry tool binding chat model + llm_with_foundry_tools = FoundryToolLateBindingChatModel( + delegate=mock_model, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + # Create context with the calculate tool + context = self._create_mock_langgraph_context(foundry_tool, calculate) + config: RunnableConfig = {"configurable": {}} + context.attach_to_config(config) + + # Define the LLM call node + async def llm_call(state: MessagesState, config: RunnableConfig): + return { + "messages": [ + await llm_with_foundry_tools.ainvoke( + [SystemMessage(content="You are a helpful assistant.")] + + state["messages"], + config=config, + ) + ] + } + + # Define routing function + def should_continue(state: MessagesState) -> Literal["tools", "__end__"]: + messages = state["messages"] + last_message = messages[-1] + if hasattr(last_message, 'tool_calls') and last_message.tool_calls: + return "tools" + return END + + # Build the graph with a regular ToolNode (using the local tool directly for testing) + builder = StateGraph(MessagesState) + builder.add_node("llm_call", llm_call) + builder.add_node("tools", ToolNode([calculate])) + builder.add_edge(START, "llm_call") + builder.add_conditional_edges("llm_call", should_continue, {"tools": "tools", END: END}) + builder.add_edge("tools", "llm_call") + + graph = builder.compile() + + # Run the graph + result = await graph.ainvoke( + {"messages": [HumanMessage(content="What is 6 * 7?")]}, + config=config, + ) + + # Verify result - should have: HumanMessage, AIMessage (with tool call), ToolMessage, AIMessage (final) + assert len(result["messages"]) == 4 + assert result["messages"][-1].content == "The answer is 42." + + # Verify tool was called + tool_message = result["messages"][2] + assert isinstance(tool_message, ToolMessage) + assert tool_message.content == "42" + + +@pytest.mark.unit +class TestReactAgentWithFoundryTools: + """Tests demonstrating react agent usage patterns similar to react_agent_tool.py sample.""" + + @pytest.mark.asyncio + async def test_middleware_integration_with_foundry_tools(self): + """Test that FoundryToolBindingMiddleware correctly integrates with agents.""" + # Define foundry tools configuration + foundry_tools_config = [ + {"type": "code_interpreter"}, + {"type": "mcp", "project_connection_id": "MicrosoftLearn"}, + ] + + # Create middleware using use_foundry_tools + middleware = use_foundry_tools(foundry_tools_config) + + # Verify middleware is created correctly + assert isinstance(middleware, FoundryToolBindingMiddleware) + + # Verify dummy tool is created for the agent + assert len(middleware.tools) == 1 + assert middleware.tools[0].name == "__dummy_tool_by_foundry_middleware__" + + # Verify foundry tools are recorded + assert len(middleware._foundry_tools_to_bind) == 2 + + def test_use_foundry_tools_with_model(self): + """Test use_foundry_tools when used with a model directly.""" + foundry_tools = [{"type": "code_interpreter"}] + mock_model = FakeChatModel() + + result = use_foundry_tools(mock_model, foundry_tools) # type: ignore + + assert isinstance(result, FoundryToolLateBindingChatModel) + assert result._foundry_tools_to_bind == foundry_tools + + +@pytest.mark.unit +class TestLanggraphRunContextIntegration: + """Tests for LanggraphRunContext integration with langgraph.""" + + def test_context_attachment_to_config(self): + """Test that context is correctly attached to RunnableConfig.""" + # Create a mock context + payload = {"input": [{"role": "user", "content": "test"}], "stream": False} + agent_run_context = AgentRunContext(payload=payload) + tool_context = FoundryToolContext() + + context = LanggraphRunContext(agent_run=agent_run_context, tools=tool_context) + + # Create config and attach context + config: RunnableConfig = {"configurable": {}} + context.attach_to_config(config) + + # Verify context is attached + assert "__foundry_hosted_agent_langgraph_run_context__" in config["configurable"] + assert config["configurable"]["__foundry_hosted_agent_langgraph_run_context__"] is context + + def test_context_resolution_from_config(self): + """Test that context can be resolved from RunnableConfig.""" + # Create and attach context + payload = {"input": [{"role": "user", "content": "test"}], "stream": False} + agent_run_context = AgentRunContext(payload=payload) + tool_context = FoundryToolContext() + + context = LanggraphRunContext(agent_run=agent_run_context, tools=tool_context) + + config: RunnableConfig = {"configurable": {}} + context.attach_to_config(config) + + # Resolve context + resolved = LanggraphRunContext.resolve(config=config) + + assert resolved is context + + def test_context_resolution_returns_none_when_not_attached(self): + """Test that context resolution returns None when not attached.""" + config: RunnableConfig = {"configurable": {}} + + resolved = LanggraphRunContext.resolve(config=config) + + assert resolved is None + + def test_from_config_returns_context(self): + """Test LanggraphRunContext.from_config method.""" + payload = {"input": [{"role": "user", "content": "test"}], "stream": False} + agent_run_context = AgentRunContext(payload=payload) + tool_context = FoundryToolContext() + + context = LanggraphRunContext(agent_run=agent_run_context, tools=tool_context) + + config: RunnableConfig = {"configurable": {}} + context.attach_to_config(config) + + result = LanggraphRunContext.from_config(config) + + assert result is context + + def test_from_config_returns_none_for_non_context_value(self): + """Test that from_config returns None when value is not LanggraphRunContext.""" + config: RunnableConfig = { + "configurable": { + "__foundry_hosted_agent_langgraph_run_context__": "not a context" + } + } + + result = LanggraphRunContext.from_config(config) + + assert result is None + + +@pytest.mark.unit +class TestToolsResolutionInGraph: + """Tests for tool resolution within langgraph execution.""" + + @pytest.mark.asyncio + async def test_foundry_tools_resolved_from_context_in_graph_node(self): + """Test that foundry tools are correctly resolved from context during graph execution.""" + # Create mock tool + @tool + def search(query: str) -> str: + """Search for information. + + :param query: The search query. + :return: Search results. + """ + return f"Results for: {query}" + + # Create foundry tool and context + foundry_tool = FoundryHostedMcpTool(name="code_interpreter", configuration={}) + resolved_foundry_tool = ResolvedFoundryTool( + definition=foundry_tool, + details=FoundryToolDetails( + name="search", + description="Search tool", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={"query": SchemaProperty(type=SchemaType.STRING, description="Query")}, + required={"query"}, + ), + ), + ) + + resolved_tools = ResolvedTools(tools=[(resolved_foundry_tool, search)]) + + # Create context + payload = {"input": [{"role": "user", "content": "test"}], "stream": False} + agent_run_context = AgentRunContext(payload=payload) + tool_context = FoundryToolContext(resolved_tools=resolved_tools) + lg_context = LanggraphRunContext(agent_run=agent_run_context, tools=tool_context) + + # Create config and attach context + config: RunnableConfig = {"configurable": {}} + lg_context.attach_to_config(config) + + # Verify tools can be resolved + resolved = LanggraphRunContext.resolve(config=config) + assert resolved is not None + + tools = list(resolved.tools.resolved_tools.get(foundry_tool)) + assert len(tools) == 1 + assert tools[0].name == "search" + diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_builder.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_builder.py new file mode 100644 index 000000000000..1a2a5af167be --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_builder.py @@ -0,0 +1,109 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for use_foundry_tools builder function.""" +import pytest +from typing import List + + +from azure.ai.agentserver.langgraph.tools._builder import use_foundry_tools +from azure.ai.agentserver.langgraph.tools._chat_model import FoundryToolLateBindingChatModel +from azure.ai.agentserver.langgraph.tools._middleware import FoundryToolBindingMiddleware +from azure.ai.agentserver.langgraph.tools._resolver import get_registry + +from .conftest import FakeChatModel + + +@pytest.fixture(autouse=True) +def clear_registry(): + """Clear the global registry before and after each test.""" + registry = get_registry() + registry.clear() + yield + registry.clear() + + +@pytest.mark.unit +class TestUseFoundryTools: + """Tests for use_foundry_tools function.""" + + def test_use_foundry_tools_with_tools_only_returns_middleware(self): + """Test that passing only tools returns FoundryToolBindingMiddleware.""" + tools = [{"type": "code_interpreter"}] + + result = use_foundry_tools(tools) + + assert isinstance(result, FoundryToolBindingMiddleware) + + def test_use_foundry_tools_with_model_and_tools_returns_chat_model(self): + """Test that passing model and tools returns FoundryToolLateBindingChatModel.""" + model = FakeChatModel() + tools = [{"type": "code_interpreter"}] + + result = use_foundry_tools(model, tools) # type: ignore + + assert isinstance(result, FoundryToolLateBindingChatModel) + + def test_use_foundry_tools_with_model_but_no_tools_raises_error(self): + """Test that passing model without tools raises ValueError.""" + model = FakeChatModel() + + with pytest.raises(ValueError, match="Tools must be provided"): + use_foundry_tools(model, None) # type: ignore + + def test_use_foundry_tools_registers_tools_in_global_registry(self): + """Test that tools are registered in the global registry.""" + tools = [ + {"type": "code_interpreter"}, + {"type": "mcp", "project_connection_id": "test"}, + ] + + use_foundry_tools(tools) + + registry = get_registry() + assert len(registry) == 2 + + def test_use_foundry_tools_with_model_registers_tools(self): + """Test that tools are registered when using with model.""" + model = FakeChatModel() + tools = [{"type": "code_interpreter"}] + + use_foundry_tools(model, tools) # type: ignore + + registry = get_registry() + assert len(registry) == 1 + + def test_use_foundry_tools_with_empty_tools_list(self): + """Test using with empty tools list.""" + tools: List = [] + + result = use_foundry_tools(tools) + + assert isinstance(result, FoundryToolBindingMiddleware) + assert len(get_registry()) == 0 + + def test_use_foundry_tools_with_mcp_tools(self): + """Test using with MCP connected tools.""" + tools = [ + { + "type": "mcp", + "project_connection_id": "MicrosoftLearn", + }, + ] + + result = use_foundry_tools(tools) + + assert isinstance(result, FoundryToolBindingMiddleware) + + def test_use_foundry_tools_with_mixed_tool_types(self): + """Test using with a mix of different tool types.""" + tools = [ + {"type": "code_interpreter"}, + {"type": "mcp", "project_connection_id": "MicrosoftLearn"}, + ] + + result = use_foundry_tools(tools) + + assert isinstance(result, FoundryToolBindingMiddleware) + assert len(get_registry()) == 2 + diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_chat_model.py new file mode 100644 index 000000000000..085495a4b91e --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_chat_model.py @@ -0,0 +1,277 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for FoundryToolLateBindingChatModel.""" +import pytest +from typing import Any, List, Optional +from unittest.mock import MagicMock, patch + +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage +from langchain_core.runnables import RunnableConfig +from langchain_core.tools import BaseTool, tool + +from azure.ai.agentserver.core.tools import ( + FoundryHostedMcpTool, + FoundryToolDetails, + ResolvedFoundryTool, + SchemaDefinition, + SchemaProperty, + SchemaType, +) +from azure.ai.agentserver.langgraph._context import LanggraphRunContext +from azure.ai.agentserver.langgraph.tools._chat_model import FoundryToolLateBindingChatModel +from azure.ai.agentserver.langgraph.tools._context import FoundryToolContext +from azure.ai.agentserver.langgraph.tools._resolver import ResolvedTools + +from .conftest import FakeChatModel + + +@pytest.mark.unit +class TestFoundryToolLateBindingChatModel: + """Tests for FoundryToolLateBindingChatModel class.""" + + def test_llm_type_property(self): + """Test the _llm_type property returns correct value.""" + delegate = FakeChatModel() + foundry_tools = [{"type": "code_interpreter"}] + + model = FoundryToolLateBindingChatModel( + delegate=delegate, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + assert "foundry_tool_binding_model" in model._llm_type + assert "fake_chat_model" in model._llm_type + + def test_bind_tools_records_tools(self): + """Test that bind_tools records tools for later use.""" + delegate = FakeChatModel() + foundry_tools = [{"type": "code_interpreter"}] + + model = FoundryToolLateBindingChatModel( + delegate=delegate, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + @tool + def my_tool(x: str) -> str: + """My tool.""" + return x + + result = model.bind_tools([my_tool], tool_choice="auto") + + # Should return self for chaining + assert result is model + # Tools should be recorded + assert len(model._bound_tools) == 1 + assert model._bound_kwargs.get("tool_choice") == "auto" + + def test_bind_tools_multiple_times(self): + """Test binding tools multiple times accumulates them.""" + delegate = FakeChatModel() + foundry_tools = [{"type": "code_interpreter"}] + + model = FoundryToolLateBindingChatModel( + delegate=delegate, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + @tool + def tool1(x: str) -> str: + """Tool 1.""" + return x + + @tool + def tool2(x: str) -> str: + """Tool 2.""" + return x + + model.bind_tools([tool1]) + model.bind_tools([tool2]) + + assert len(model._bound_tools) == 2 + + def test_tool_node_property(self): + """Test that tool_node property returns a ToolNode.""" + delegate = FakeChatModel() + foundry_tools = [{"type": "code_interpreter"}] + + model = FoundryToolLateBindingChatModel( + delegate=delegate, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + tool_node = model.tool_node + + # Should return a ToolNode + assert tool_node is not None + + def test_tool_node_wrapper_property(self): + """Test that tool_node_wrapper returns correct wrappers.""" + delegate = FakeChatModel() + foundry_tools = [{"type": "code_interpreter"}] + + model = FoundryToolLateBindingChatModel( + delegate=delegate, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + wrappers = model.tool_node_wrapper + + assert "wrap_tool_call" in wrappers + assert "awrap_tool_call" in wrappers + assert callable(wrappers["wrap_tool_call"]) + assert callable(wrappers["awrap_tool_call"]) + + def test_invoke_with_context( + self, + mock_langgraph_run_context: LanggraphRunContext, + sample_code_interpreter_tool: FoundryHostedMcpTool, + ): + """Test invoking model with context attached.""" + delegate = FakeChatModel( + responses=[AIMessage(content="Hello from model!")], + ) + foundry_tools = [{"type": "code_interpreter"}] + + model = FoundryToolLateBindingChatModel( + delegate=delegate, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + # Attach context to config + config: RunnableConfig = {"configurable": {}} + mock_langgraph_run_context.attach_to_config(config) + + input_messages = [HumanMessage(content="Hello")] + result = model.invoke(input_messages, config=config) + + assert result.content == "Hello from model!" + + @pytest.mark.asyncio + async def test_ainvoke_with_context( + self, + mock_langgraph_run_context: LanggraphRunContext, + ): + """Test async invoking model with context attached.""" + delegate = FakeChatModel( + responses=[AIMessage(content="Async hello from model!")], + ) + foundry_tools = [{"type": "code_interpreter"}] + + model = FoundryToolLateBindingChatModel( + delegate=delegate, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + # Attach context to config + config: RunnableConfig = {"configurable": {}} + mock_langgraph_run_context.attach_to_config(config) + + input_messages = [HumanMessage(content="Hello")] + result = await model.ainvoke(input_messages, config=config) + + assert result.content == "Async hello from model!" + + def test_invoke_without_context_and_no_foundry_tools(self): + """Test invoking model without context and no foundry tools.""" + delegate = FakeChatModel( + responses=[AIMessage(content="Hello!")], + ) + # No foundry tools + foundry_tools: List[Any] = [] + + model = FoundryToolLateBindingChatModel( + delegate=delegate, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + config: RunnableConfig = {"configurable": {}} + input_messages = [HumanMessage(content="Hello")] + result = model.invoke(input_messages, config=config) + + # Should work since no foundry tools need resolution + assert result.content == "Hello!" + + def test_invoke_without_context_raises_error_when_foundry_tools_present(self): + """Test that invoking without context raises error when foundry tools are set.""" + delegate = FakeChatModel( + responses=[AIMessage(content="Hello!")], + ) + foundry_tools = [{"type": "code_interpreter"}] + + model = FoundryToolLateBindingChatModel( + delegate=delegate, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + config: RunnableConfig = {"configurable": {}} + input_messages = [HumanMessage(content="Hello")] + + with pytest.raises(RuntimeError, match="Unable to resolve foundry tools from context"): + model.invoke(input_messages, config=config) + + def test_stream_with_context( + self, + mock_langgraph_run_context: LanggraphRunContext, + ): + """Test streaming model with context attached.""" + delegate = FakeChatModel( + responses=[AIMessage(content="Streamed response!")], + ) + foundry_tools = [{"type": "code_interpreter"}] + + model = FoundryToolLateBindingChatModel( + delegate=delegate, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + # Attach context to config + config: RunnableConfig = {"configurable": {}} + mock_langgraph_run_context.attach_to_config(config) + + input_messages = [HumanMessage(content="Hello")] + results = list(model.stream(input_messages, config=config)) + + assert len(results) == 1 + assert results[0].content == "Streamed response!" + + @pytest.mark.asyncio + async def test_astream_with_context( + self, + mock_langgraph_run_context: LanggraphRunContext, + ): + """Test async streaming model with context attached.""" + delegate = FakeChatModel( + responses=[AIMessage(content="Async streamed response!")], + ) + foundry_tools = [{"type": "code_interpreter"}] + + model = FoundryToolLateBindingChatModel( + delegate=delegate, # type: ignore + runtime=None, + foundry_tools=foundry_tools, + ) + + # Attach context to config + config: RunnableConfig = {"configurable": {}} + mock_langgraph_run_context.attach_to_config(config) + + input_messages = [HumanMessage(content="Hello")] + results = [] + async for chunk in model.astream(input_messages, config=config): + results.append(chunk) + + assert len(results) == 1 + assert results[0].content == "Async streamed response!" + diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_context.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_context.py new file mode 100644 index 000000000000..577d4e6e4e6f --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_context.py @@ -0,0 +1,36 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for FoundryToolContext.""" +import pytest + +from azure.ai.agentserver.langgraph.tools._context import FoundryToolContext +from azure.ai.agentserver.langgraph.tools._resolver import ResolvedTools + + +@pytest.mark.unit +class TestFoundryToolContext: + """Tests for FoundryToolContext class.""" + + def test_create_with_resolved_tools(self, sample_resolved_tools: ResolvedTools): + """Test creating FoundryToolContext with resolved tools.""" + context = FoundryToolContext(resolved_tools=sample_resolved_tools) + + assert context.resolved_tools is sample_resolved_tools + + def test_create_with_default_resolved_tools(self): + """Test creating FoundryToolContext with default empty resolved tools.""" + context = FoundryToolContext() + + # Default should be empty ResolvedTools + assert context.resolved_tools is not None + tools_list = list(context.resolved_tools) + assert len(tools_list) == 0 + + def test_resolved_tools_is_iterable(self, sample_resolved_tools: ResolvedTools): + """Test that resolved_tools can be iterated.""" + context = FoundryToolContext(resolved_tools=sample_resolved_tools) + + tools_list = list(context.resolved_tools) + assert len(tools_list) == 1 + diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_middleware.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_middleware.py new file mode 100644 index 000000000000..89290a58f97c --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_middleware.py @@ -0,0 +1,197 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for FoundryToolBindingMiddleware.""" +import pytest +from typing import Any, List +from unittest.mock import AsyncMock, MagicMock + +from langchain.agents.middleware.types import ModelRequest +from langchain_core.messages import AIMessage, ToolMessage +from langchain_core.tools import tool +from langgraph.prebuilt.tool_node import ToolCallRequest + +from azure.ai.agentserver.langgraph.tools._middleware import FoundryToolBindingMiddleware + +from .conftest import FakeChatModel + + +@pytest.mark.unit +class TestFoundryToolBindingMiddleware: + """Tests for FoundryToolBindingMiddleware class.""" + + def test_init_with_foundry_tools_creates_dummy_tool(self): + """Test that initialization with foundry tools creates a dummy tool.""" + foundry_tools = [{"type": "code_interpreter"}] + + middleware = FoundryToolBindingMiddleware(foundry_tools) + + # Should have one dummy tool + assert len(middleware.tools) == 1 + assert middleware.tools[0].name == "__dummy_tool_by_foundry_middleware__" + + def test_init_without_foundry_tools_no_dummy_tool(self): + """Test that initialization without foundry tools creates no dummy tool.""" + foundry_tools: List[Any] = [] + + middleware = FoundryToolBindingMiddleware(foundry_tools) + + assert len(middleware.tools) == 0 + + def test_wrap_model_call_wraps_model_with_foundry_binding(self): + """Test that wrap_model_call wraps the model correctly.""" + foundry_tools = [{"type": "code_interpreter"}] + middleware = FoundryToolBindingMiddleware(foundry_tools) + + # Create mock model and request + mock_model = FakeChatModel() + mock_runtime = MagicMock() + mock_request = MagicMock(spec=ModelRequest) + mock_request.model = mock_model + mock_request.runtime = mock_runtime + mock_request.tools = [] + + # Create a modified request to return + modified_request = MagicMock(spec=ModelRequest) + mock_request.override = MagicMock(return_value=modified_request) + + # Mock handler + expected_result = AIMessage(content="Result") + mock_handler = MagicMock(return_value=expected_result) + + result = middleware.wrap_model_call(mock_request, mock_handler) + + # Handler should be called with modified request + mock_handler.assert_called_once() + assert result == expected_result + + @pytest.mark.asyncio + async def test_awrap_model_call_wraps_model_async(self): + """Test that awrap_model_call wraps the model correctly in async.""" + foundry_tools = [{"type": "code_interpreter"}] + middleware = FoundryToolBindingMiddleware(foundry_tools) + + # Create mock model and request + mock_model = FakeChatModel() + mock_runtime = MagicMock() + mock_request = MagicMock(spec=ModelRequest) + mock_request.model = mock_model + mock_request.runtime = mock_runtime + mock_request.tools = [] + + # Create a modified request to return + modified_request = MagicMock(spec=ModelRequest) + mock_request.override = MagicMock(return_value=modified_request) + + # Mock async handler + expected_result = AIMessage(content="Async Result") + mock_handler = AsyncMock(return_value=expected_result) + + result = await middleware.awrap_model_call(mock_request, mock_handler) + + # Handler should be called + mock_handler.assert_awaited_once() + assert result == expected_result + + def test_wrap_model_without_foundry_tools_returns_unchanged(self): + """Test that wrap_model returns unchanged request when no foundry tools.""" + foundry_tools: List[Any] = [] + middleware = FoundryToolBindingMiddleware(foundry_tools) + + mock_model = FakeChatModel() + mock_request = MagicMock(spec=ModelRequest) + mock_request.model = mock_model + mock_request.tools = [] + + # Should not call override + mock_request.override = MagicMock() + + mock_handler = MagicMock(return_value=AIMessage(content="Result")) + + middleware.wrap_model_call(mock_request, mock_handler) + + # Handler should be called with original request + mock_handler.assert_called_once_with(mock_request) + + def test_remove_dummy_tool_from_request(self): + """Test that dummy tool is removed from the request tools.""" + foundry_tools = [{"type": "code_interpreter"}] + middleware = FoundryToolBindingMiddleware(foundry_tools) + + # Create request with dummy tool + dummy = middleware._dummy_tool() + + @tool + def real_tool(x: str) -> str: + """Real tool.""" + return x + + mock_request = MagicMock(spec=ModelRequest) + mock_request.tools = [dummy, real_tool] + + # Call internal method + result = middleware._remove_dummy_tool(mock_request) + + # Should only have real_tool + assert len(result) == 1 + assert result[0] is real_tool + + def test_wrap_tool_call_delegates_to_wrapper(self): + """Test that wrap_tool_call delegates to FoundryToolCallWrapper.""" + foundry_tools = [{"type": "code_interpreter"}] + middleware = FoundryToolBindingMiddleware(foundry_tools) + + # Create mock tool call request + mock_request = MagicMock(spec=ToolCallRequest) + mock_request.tool = None + mock_request.tool_call = {"name": "test_tool", "id": "call_1"} + mock_request.state = {} + mock_request.runtime = None + + # Mock handler + expected_result = ToolMessage(content="Result", tool_call_id="call_1") + mock_handler = MagicMock(return_value=expected_result) + + result = middleware.wrap_tool_call(mock_request, mock_handler) + + # Handler should be called + mock_handler.assert_called_once() + assert result == expected_result + + @pytest.mark.asyncio + async def test_awrap_tool_call_delegates_to_wrapper_async(self): + """Test that awrap_tool_call delegates to FoundryToolCallWrapper async.""" + foundry_tools = [{"type": "code_interpreter"}] + middleware = FoundryToolBindingMiddleware(foundry_tools) + + # Create mock tool call request + mock_request = MagicMock(spec=ToolCallRequest) + mock_request.tool = None + mock_request.tool_call = {"name": "test_tool", "id": "call_1"} + mock_request.state = {} + mock_request.runtime = None + + # Mock async handler + expected_result = ToolMessage(content="Async Result", tool_call_id="call_1") + mock_handler = AsyncMock(return_value=expected_result) + + result = await middleware.awrap_tool_call(mock_request, mock_handler) + + # Handler should be awaited + mock_handler.assert_awaited_once() + assert result == expected_result + + def test_middleware_with_multiple_foundry_tools(self): + """Test middleware initialization with multiple foundry tools.""" + foundry_tools = [ + {"type": "code_interpreter"}, + {"type": "mcp", "project_connection_id": "test"}, + ] + + middleware = FoundryToolBindingMiddleware(foundry_tools) + + # Should still only have one dummy tool + assert len(middleware.tools) == 1 + # But should have all foundry tools registered + assert len(middleware._foundry_tools_to_bind) == 2 + diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_resolver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_resolver.py new file mode 100644 index 000000000000..985ed4caec49 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_resolver.py @@ -0,0 +1,502 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for ResolvedTools and FoundryLangChainToolResolver.""" +import pytest +from unittest.mock import AsyncMock, MagicMock, patch + +from langchain_core.tools import BaseTool, StructuredTool, tool +from pydantic import BaseModel + +from azure.ai.agentserver.core.tools import (FoundryConnectedTool, FoundryHostedMcpTool, FoundryToolDetails, + ResolvedFoundryTool, SchemaDefinition, SchemaProperty, SchemaType) +from azure.ai.agentserver.langgraph.tools._resolver import ( + ResolvedTools, + FoundryLangChainToolResolver, + get_registry, +) + + +@pytest.mark.unit +class TestResolvedTools: + """Tests for ResolvedTools class.""" + + def test_create_empty_resolved_tools(self): + """Test creating an empty ResolvedTools.""" + resolved = ResolvedTools(tools=[]) + + tools_list = list(resolved) + assert len(tools_list) == 0 + + def test_create_with_single_tool( + self, + sample_code_interpreter_tool: FoundryHostedMcpTool, + mock_langchain_tool: BaseTool, + ): + """Test creating ResolvedTools with a single tool.""" + resolved_foundry_tool = ResolvedFoundryTool( + definition=sample_code_interpreter_tool, + details=FoundryToolDetails( + name="test_tool", + description="A test tool", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "input": SchemaProperty(type=SchemaType.STRING, description="Input"), + }, + required={"input"}, + ), + ), + ) + resolved = ResolvedTools(tools=[(resolved_foundry_tool, mock_langchain_tool)]) + + tools_list = list(resolved) + assert len(tools_list) == 1 + assert tools_list[0] is mock_langchain_tool + + def test_create_with_multiple_tools( + self, + sample_code_interpreter_tool: FoundryHostedMcpTool, + sample_mcp_connected_tool: FoundryConnectedTool, + ): + """Test creating ResolvedTools with multiple tools.""" + @tool + def tool1(query: str) -> str: + """Tool 1.""" + return "result1" + + @tool + def tool2(query: str) -> str: + """Tool 2.""" + return "result2" + + resolved_tool1 = ResolvedFoundryTool( + definition=sample_code_interpreter_tool, + details=FoundryToolDetails( + name="tool1", + description="Tool 1", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={"query": SchemaProperty(type=SchemaType.STRING, description="Query")}, + required={"query"}, + ), + ), + ) + resolved_tool2 = ResolvedFoundryTool( + definition=sample_mcp_connected_tool, + details=FoundryToolDetails( + name="tool2", + description="Tool 2", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={"query": SchemaProperty(type=SchemaType.STRING, description="Query")}, + required={"query"}, + ), + ), + ) + + resolved = ResolvedTools(tools=[ + (resolved_tool1, tool1), + (resolved_tool2, tool2), + ]) + + tools_list = list(resolved) + assert len(tools_list) == 2 + + def test_get_tool_by_foundry_tool_like( + self, + sample_code_interpreter_tool: FoundryHostedMcpTool, + mock_langchain_tool: BaseTool, + ): + """Test getting tools by FoundryToolLike.""" + resolved_foundry_tool = ResolvedFoundryTool( + definition=sample_code_interpreter_tool, + details=FoundryToolDetails( + name="test_tool", + description="A test tool", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "input": SchemaProperty(type=SchemaType.STRING, description="Input"), + }, + required={"input"}, + ), + ), + ) + resolved = ResolvedTools(tools=[(resolved_foundry_tool, mock_langchain_tool)]) + + # Get by the original foundry tool definition + tools = list(resolved.get(sample_code_interpreter_tool)) + assert len(tools) == 1 + assert tools[0] is mock_langchain_tool + + def test_get_tools_by_list_of_foundry_tools( + self, + sample_code_interpreter_tool: FoundryHostedMcpTool, + sample_mcp_connected_tool: FoundryConnectedTool, + ): + """Test getting tools by a list of FoundryToolLike.""" + @tool + def tool1(query: str) -> str: + """Tool 1.""" + return "result1" + + @tool + def tool2(query: str) -> str: + """Tool 2.""" + return "result2" + + resolved_tool1 = ResolvedFoundryTool( + definition=sample_code_interpreter_tool, + details=FoundryToolDetails( + name="tool1", + description="Tool 1", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={"query": SchemaProperty(type=SchemaType.STRING, description="Query")}, + required={"query"}, + ), + ), + ) + resolved_tool2 = ResolvedFoundryTool( + definition=sample_mcp_connected_tool, + details=FoundryToolDetails( + name="tool2", + description="Tool 2", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={"query": SchemaProperty(type=SchemaType.STRING, description="Query")}, + required={"query"}, + ), + ), + ) + + resolved = ResolvedTools(tools=[ + (resolved_tool1, tool1), + (resolved_tool2, tool2), + ]) + + # Get by list of foundry tools + tools = list(resolved.get([sample_code_interpreter_tool, sample_mcp_connected_tool])) + assert len(tools) == 2 + + def test_get_all_tools_when_no_filter( + self, + sample_code_interpreter_tool: FoundryHostedMcpTool, + mock_langchain_tool: BaseTool, + ): + """Test getting all tools when no filter is provided.""" + resolved_foundry_tool = ResolvedFoundryTool( + definition=sample_code_interpreter_tool, + details=FoundryToolDetails( + name="test_tool", + description="A test tool", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "input": SchemaProperty(type=SchemaType.STRING, description="Input"), + }, + required={"input"}, + ), + ), + ) + resolved = ResolvedTools(tools=[(resolved_foundry_tool, mock_langchain_tool)]) + + # Get all tools (no filter) + tools = list(resolved.get()) + assert len(tools) == 1 + + def test_get_returns_empty_for_unknown_tool( + self, + sample_code_interpreter_tool: FoundryHostedMcpTool, + sample_mcp_connected_tool: FoundryConnectedTool, + mock_langchain_tool: BaseTool, + ): + """Test that get returns empty when requesting unknown tool.""" + resolved_foundry_tool = ResolvedFoundryTool( + definition=sample_code_interpreter_tool, + details=FoundryToolDetails( + name="test_tool", + description="A test tool", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "input": SchemaProperty(type=SchemaType.STRING, description="Input"), + }, + required={"input"}, + ), + ), + ) + resolved = ResolvedTools(tools=[(resolved_foundry_tool, mock_langchain_tool)]) + + # Get by a different foundry tool (not in resolved) + tools = list(resolved.get(sample_mcp_connected_tool)) + assert len(tools) == 0 + + def test_iteration_over_resolved_tools( + self, + sample_code_interpreter_tool: FoundryHostedMcpTool, + mock_langchain_tool: BaseTool, + ): + """Test iterating over ResolvedTools.""" + resolved_foundry_tool = ResolvedFoundryTool( + definition=sample_code_interpreter_tool, + details=FoundryToolDetails( + name="test_tool", + description="A test tool", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "input": SchemaProperty(type=SchemaType.STRING, description="Input"), + }, + required={"input"}, + ), + ), + ) + resolved = ResolvedTools(tools=[(resolved_foundry_tool, mock_langchain_tool)]) + + # Iterate using for loop + count = 0 + for t in resolved: + assert t is mock_langchain_tool + count += 1 + assert count == 1 + + +@pytest.mark.unit +class TestFoundryLangChainToolResolver: + """Tests for FoundryLangChainToolResolver class.""" + + def test_init_with_default_name_resolver(self): + """Test initialization with default name resolver.""" + resolver = FoundryLangChainToolResolver() + + assert resolver._name_resolver is not None + + def test_init_with_custom_name_resolver(self): + """Test initialization with custom name resolver.""" + from azure.ai.agentserver.core.tools.utils import ToolNameResolver + + custom_resolver = ToolNameResolver() + resolver = FoundryLangChainToolResolver(name_resolver=custom_resolver) + + assert resolver._name_resolver is custom_resolver + + def test_create_pydantic_model_with_required_fields(self): + """Test creating a Pydantic model with required fields.""" + input_schema = SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "query": SchemaProperty(type=SchemaType.STRING, description="Search query"), + "limit": SchemaProperty(type=SchemaType.INTEGER, description="Max results"), + }, + required={"query"}, + ) + + model = FoundryLangChainToolResolver._create_pydantic_model("test_tool", input_schema) + + assert issubclass(model, BaseModel) + # Check that the model has the expected fields + assert "query" in model.model_fields + assert "limit" in model.model_fields + + def test_create_pydantic_model_with_no_required_fields(self): + """Test creating a Pydantic model with no required fields.""" + input_schema = SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "query": SchemaProperty(type=SchemaType.STRING, description="Search query"), + }, + required=set(), + ) + + model = FoundryLangChainToolResolver._create_pydantic_model("optional_tool", input_schema) + + assert issubclass(model, BaseModel) + assert "query" in model.model_fields + # Optional field should have None as default + assert model.model_fields["query"].default is None + + def test_create_pydantic_model_with_special_characters_in_name(self): + """Test creating a Pydantic model with special characters in tool name.""" + input_schema = SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "input": SchemaProperty(type=SchemaType.STRING, description="Input"), + }, + required={"input"}, + ) + + model = FoundryLangChainToolResolver._create_pydantic_model("my-tool name", input_schema) + + assert issubclass(model, BaseModel) + # Name should be sanitized + assert "-Input" in model.__name__ or "Input" in model.__name__ + + def test_create_structured_tool(self): + """Test creating a StructuredTool from a resolved foundry tool.""" + resolver = FoundryLangChainToolResolver() + + foundry_tool = FoundryHostedMcpTool(name="test_tool", configuration={}) + resolved_tool = ResolvedFoundryTool( + definition=foundry_tool, + details=FoundryToolDetails( + name="search", + description="Search for documents", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "query": SchemaProperty(type=SchemaType.STRING, description="Search query"), + }, + required={"query"}, + ), + ), + ) + + structured_tool = resolver._create_structured_tool(resolved_tool) + + assert isinstance(structured_tool, StructuredTool) + assert structured_tool.description == "Search for documents" + assert structured_tool.coroutine is not None # Should have async function + + @pytest.mark.asyncio + async def test_resolve_from_registry(self): + """Test resolving tools from the global registry.""" + resolver = FoundryLangChainToolResolver() + + # Mock the AgentServerContext + mock_context = MagicMock() + mock_catalog = AsyncMock() + mock_context.tools.catalog.list = mock_catalog + + foundry_tool = FoundryHostedMcpTool(name="test_tool", configuration={}) + resolved_foundry_tool = ResolvedFoundryTool( + definition=foundry_tool, + details=FoundryToolDetails( + name="search", + description="Search tool", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "query": SchemaProperty(type=SchemaType.STRING, description="Query"), + }, + required={"query"}, + ), + ), + ) + mock_catalog.return_value = [resolved_foundry_tool] + + # Add tool to registry + registry = get_registry() + registry.clear() + registry.append({"type": "code_interpreter"}) + + with patch("azure.ai.agentserver.langgraph.tools._resolver.AgentServerContext.get", return_value=mock_context): + result = await resolver.resolve_from_registry() + + assert isinstance(result, ResolvedTools) + mock_catalog.assert_called_once() + + # Clean up registry + registry.clear() + + @pytest.mark.asyncio + async def test_resolve_with_foundry_tools_list(self): + """Test resolving a list of foundry tools.""" + resolver = FoundryLangChainToolResolver() + + # Mock the AgentServerContext + mock_context = MagicMock() + mock_catalog = AsyncMock() + mock_context.tools.catalog.list = mock_catalog + + foundry_tool = FoundryHostedMcpTool(name="code_interpreter", configuration={}) + resolved_foundry_tool = ResolvedFoundryTool( + definition=foundry_tool, + details=FoundryToolDetails( + name="execute_code", + description="Execute code", + input_schema=SchemaDefinition( + type=SchemaType.OBJECT, + properties={ + "code": SchemaProperty(type=SchemaType.STRING, description="Code to execute"), + }, + required={"code"}, + ), + ), + ) + mock_catalog.return_value = [resolved_foundry_tool] + + foundry_tools = [{"type": "code_interpreter"}] + + with patch("azure.ai.agentserver.langgraph.tools._resolver.AgentServerContext.get", return_value=mock_context): + result = await resolver.resolve(foundry_tools) + + assert isinstance(result, ResolvedTools) + tools_list = list(result) + assert len(tools_list) == 1 + assert isinstance(tools_list[0], StructuredTool) + + @pytest.mark.asyncio + async def test_resolve_empty_list(self): + """Test resolving an empty list of foundry tools.""" + resolver = FoundryLangChainToolResolver() + + # Mock the AgentServerContext + mock_context = MagicMock() + mock_catalog = AsyncMock() + mock_context.tools.catalog.list = mock_catalog + mock_catalog.return_value = [] + + with patch("azure.ai.agentserver.langgraph.tools._resolver.AgentServerContext.get", return_value=mock_context): + result = await resolver.resolve([]) + + assert isinstance(result, ResolvedTools) + tools_list = list(result) + assert len(tools_list) == 0 + + +@pytest.mark.unit +class TestGetRegistry: + """Tests for the get_registry function.""" + + def test_get_registry_returns_list(self): + """Test that get_registry returns a list.""" + registry = get_registry() + + assert isinstance(registry, list) + + def test_registry_is_singleton(self): + """Test that get_registry returns the same list instance.""" + registry1 = get_registry() + registry2 = get_registry() + + assert registry1 is registry2 + + def test_registry_can_be_modified(self): + """Test that the registry can be modified.""" + registry = get_registry() + original_length = len(registry) + + registry.append({"type": "test_tool"}) + + assert len(registry) == original_length + 1 + + # Clean up + registry.pop() + + def test_registry_extend(self): + """Test extending the registry with multiple tools.""" + registry = get_registry() + registry.clear() + + tools = [ + {"type": "code_interpreter"}, + {"type": "mcp", "project_connection_id": "test"}, + ] + registry.extend(tools) + + assert len(registry) == 2 + + # Clean up + registry.clear() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_tool_node.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_tool_node.py new file mode 100644 index 000000000000..1c46e58785bc --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_tool_node.py @@ -0,0 +1,179 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for FoundryToolCallWrapper and FoundryToolNodeWrappers.""" +import pytest +from typing import Any, List +from unittest.mock import AsyncMock, MagicMock + +from langchain_core.messages import ToolMessage +from langchain_core.tools import tool +from langgraph.prebuilt.tool_node import ToolCallRequest +from langgraph.types import Command + +from azure.ai.agentserver.langgraph.tools._tool_node import ( + FoundryToolCallWrapper, + FoundryToolNodeWrappers, +) + + +@pytest.mark.unit +class TestFoundryToolCallWrapper: + """Tests for FoundryToolCallWrapper class.""" + + def test_as_wrappers_returns_typed_dict(self): + """Test that as_wrappers returns a FoundryToolNodeWrappers TypedDict.""" + foundry_tools = [{"type": "code_interpreter"}] + wrapper = FoundryToolCallWrapper(foundry_tools) + + result = wrapper.as_wrappers() + + assert isinstance(result, dict) + assert "wrap_tool_call" in result + assert "awrap_tool_call" in result + assert callable(result["wrap_tool_call"]) + assert callable(result["awrap_tool_call"]) + + def test_call_tool_with_already_resolved_tool(self): + """Test that call_tool passes through when tool is already resolved.""" + foundry_tools = [{"type": "code_interpreter"}] + wrapper = FoundryToolCallWrapper(foundry_tools) + + # Create request with tool already set + @tool + def existing_tool(x: str) -> str: + """Existing tool.""" + return f"Result: {x}" + + mock_request = MagicMock(spec=ToolCallRequest) + mock_request.tool = existing_tool + mock_request.tool_call = {"name": "existing_tool", "id": "call_1"} + + expected_result = ToolMessage(content="Result: test", tool_call_id="call_1") + mock_invocation = MagicMock(return_value=expected_result) + + result = wrapper.call_tool(mock_request, mock_invocation) + + # Should pass through original request + mock_invocation.assert_called_once_with(mock_request) + assert result == expected_result + + def test_call_tool_with_no_foundry_tools(self): + """Test that call_tool passes through when no foundry tools configured.""" + foundry_tools: List[Any] = [] + wrapper = FoundryToolCallWrapper(foundry_tools) + + mock_request = MagicMock(spec=ToolCallRequest) + mock_request.tool = None + mock_request.tool_call = {"name": "some_tool", "id": "call_1"} + + expected_result = ToolMessage(content="Result", tool_call_id="call_1") + mock_invocation = MagicMock(return_value=expected_result) + + result = wrapper.call_tool(mock_request, mock_invocation) + + mock_invocation.assert_called_once_with(mock_request) + assert result == expected_result + + @pytest.mark.asyncio + async def test_call_tool_async_with_already_resolved_tool(self): + """Test that call_tool_async passes through when tool is already resolved.""" + foundry_tools = [{"type": "code_interpreter"}] + wrapper = FoundryToolCallWrapper(foundry_tools) + + @tool + def existing_tool(x: str) -> str: + """Existing tool.""" + return f"Result: {x}" + + mock_request = MagicMock(spec=ToolCallRequest) + mock_request.tool = existing_tool + mock_request.tool_call = {"name": "existing_tool", "id": "call_1"} + + expected_result = ToolMessage(content="Async Result", tool_call_id="call_1") + mock_invocation = AsyncMock(return_value=expected_result) + + result = await wrapper.call_tool_async(mock_request, mock_invocation) + + mock_invocation.assert_awaited_once_with(mock_request) + assert result == expected_result + + @pytest.mark.asyncio + async def test_call_tool_async_with_no_foundry_tools(self): + """Test that call_tool_async passes through when no foundry tools configured.""" + foundry_tools: List[Any] = [] + wrapper = FoundryToolCallWrapper(foundry_tools) + + mock_request = MagicMock(spec=ToolCallRequest) + mock_request.tool = None + mock_request.tool_call = {"name": "some_tool", "id": "call_1"} + + expected_result = ToolMessage(content="Result", tool_call_id="call_1") + mock_invocation = AsyncMock(return_value=expected_result) + + result = await wrapper.call_tool_async(mock_request, mock_invocation) + + mock_invocation.assert_awaited_once_with(mock_request) + assert result == expected_result + + def test_call_tool_returns_command_result(self): + """Test that call_tool can return Command objects.""" + foundry_tools: List[Any] = [] + wrapper = FoundryToolCallWrapper(foundry_tools) + + mock_request = MagicMock(spec=ToolCallRequest) + mock_request.tool = None + mock_request.tool_call = {"name": "some_tool", "id": "call_1"} + + # Return a Command instead of ToolMessage + expected_result = Command(goto="next_node") + mock_invocation = MagicMock(return_value=expected_result) + + result = wrapper.call_tool(mock_request, mock_invocation) + + assert result == expected_result + assert isinstance(result, Command) + + +@pytest.mark.unit +class TestFoundryToolNodeWrappers: + """Tests for FoundryToolNodeWrappers TypedDict.""" + + def test_foundry_tool_node_wrappers_structure(self): + """Test that FoundryToolNodeWrappers has the expected structure.""" + foundry_tools = [{"type": "code_interpreter"}] + wrapper = FoundryToolCallWrapper(foundry_tools) + + wrappers: FoundryToolNodeWrappers = wrapper.as_wrappers() + + # Should have both sync and async wrappers + assert "wrap_tool_call" in wrappers + assert "awrap_tool_call" in wrappers + + # Should be the wrapper methods + assert wrappers["wrap_tool_call"] == wrapper.call_tool + assert wrappers["awrap_tool_call"] == wrapper.call_tool_async + + def test_wrappers_can_be_unpacked_to_tool_node(self): + """Test that wrappers can be unpacked as kwargs to ToolNode.""" + foundry_tools = [{"type": "code_interpreter"}] + wrapper = FoundryToolCallWrapper(foundry_tools) + + wrappers = wrapper.as_wrappers() + + # Should be usable as kwargs + assert len(wrappers) == 2 + + # This pattern is used: ToolNode([], **wrappers) + def mock_tool_node_init(tools, wrap_tool_call=None, awrap_tool_call=None): + return { + "tools": tools, + "wrap_tool_call": wrap_tool_call, + "awrap_tool_call": awrap_tool_call, + } + + result = mock_tool_node_init([], **wrappers) + + assert result["wrap_tool_call"] is not None + assert result["awrap_tool_call"] is not None + From 9e96c9cc78e1f833cac9224c68601b131e9f8076 Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Fri, 23 Jan 2026 11:25:09 -0800 Subject: [PATCH 58/94] [agentserver] disable latestdependency for azure-ai-agentserver-core (#44824) * disable latestdependency * remove unused log --- .../agentframework/models/human_in_the_loop_helper.py | 1 - sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py index 4b3dce2c1bdb..82c959612faa 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py @@ -40,7 +40,6 @@ def get_pending_hitl_request(self, # if no checkpoint (Agent), find user input request and pair the feedbacks for message in thread_messages: for content in message.contents: - print(f" Content {type(content)}: {content.to_dict()}") if isinstance(content, UserInputRequestContents): # is a human input request function_call = content.function_call diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index afb5e6797396..68c9949805c2 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -73,4 +73,5 @@ combine-as-imports = true [tool.azure-sdk-build] breaking = false # incompatible python version pyright = false -verifytypes = false \ No newline at end of file +verifytypes = false +latestdependency = false \ No newline at end of file From 77098f1e52b0ca889538794134080a594d7c0bb4 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 23 Jan 2026 11:34:20 -0800 Subject: [PATCH 59/94] fix docstring --- .../azure/ai/agentserver/langgraph/_context.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py index 89be24921f54..d037088b18a5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py @@ -29,8 +29,12 @@ def resolve(cls, """Resolve the LanggraphRunContext from either a RunnableConfig or a Runtime. :param config: Optional RunnableConfig to extract the context from. + :type config: Optional[RunnableConfig] :param runtime: Optional Runtime or ToolRuntime to extract the context from. + :type runtime: Optional[Union[Runtime, ToolRuntime]] + :return: An instance of LanggraphRunContext if found, otherwise None. + :rtype: Optional[LanggraphRunContext] """ context: Optional["LanggraphRunContext"] = None if config: From 417f598bb73253c13e5641c9a6ccc0e446877340 Mon Sep 17 00:00:00 2001 From: junanchen Date: Fri, 23 Jan 2026 13:05:32 -0800 Subject: [PATCH 60/94] fix ut in lg --- .../unit_tests/langgraph/tools/test_agent_integration.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_agent_integration.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_agent_integration.py index fab1955ef415..6cfa903475fe 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_agent_integration.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_agent_integration.py @@ -273,7 +273,9 @@ def test_use_foundry_tools_with_model(self): result = use_foundry_tools(mock_model, foundry_tools) # type: ignore assert isinstance(result, FoundryToolLateBindingChatModel) - assert result._foundry_tools_to_bind == foundry_tools + assert len(result._foundry_tools_to_bind) == 1 + assert isinstance(result._foundry_tools_to_bind[0], FoundryHostedMcpTool) + assert result._foundry_tools_to_bind[0].name == "code_interpreter" @pytest.mark.unit From e5f52f05b57dffc5f6764fd5613cc8efc93c1b75 Mon Sep 17 00:00:00 2001 From: junanchen Date: Fri, 23 Jan 2026 13:11:09 -0800 Subject: [PATCH 61/94] move uts to uppper level --- .../unit_tests/{agent_framework => }/__init__.py | 0 .../tests/unit_tests/agent_framework/conftest.py | 15 --------------- .../test_agent_framework_input_converter.py | 0 .../tests/unit_tests/{core => }/__init__.py | 0 .../tests/unit_tests/{core => }/tools/__init__.py | 0 .../{core => }/tools/client/__init__.py | 0 .../tools/client/operations/__init__.py | 0 .../operations/test_foundry_connected_tools.py | 0 .../operations/test_foundry_hosted_mcp_tools.py | 0 .../{core => }/tools/client/test_client.py | 0 .../{core => }/tools/client/test_configuration.py | 0 .../tests/unit_tests/{core => }/tools/conftest.py | 0 .../{core => }/tools/runtime/__init__.py | 0 .../{core => }/tools/runtime/conftest.py | 0 .../{core => }/tools/runtime/test_catalog.py | 0 .../{core => }/tools/runtime/test_facade.py | 0 .../{core => }/tools/runtime/test_invoker.py | 0 .../{core => }/tools/runtime/test_resolver.py | 0 .../{core => }/tools/runtime/test_runtime.py | 0 .../{core => }/tools/runtime/test_starlette.py | 0 .../{core => }/tools/runtime/test_user.py | 0 .../unit_tests/{core => }/tools/utils/__init__.py | 0 .../unit_tests/{core => }/tools/utils/conftest.py | 0 .../{core => }/tools/utils/test_name_resolver.py | 0 .../tests/unit_tests/{langgraph => }/__init__.py | 0 .../tests/unit_tests/langgraph/conftest.py | 10 ---------- .../test_langgraph_request_converter.py | 0 .../unit_tests/{langgraph => }/tools/__init__.py | 0 .../unit_tests/{langgraph => }/tools/conftest.py | 0 .../tools/test_agent_integration.py | 0 .../{langgraph => }/tools/test_builder.py | 0 .../{langgraph => }/tools/test_chat_model.py | 0 .../{langgraph => }/tools/test_context.py | 0 .../{langgraph => }/tools/test_middleware.py | 0 .../{langgraph => }/tools/test_resolver.py | 0 .../{langgraph => }/tools/test_tool_node.py | 0 36 files changed, 25 deletions(-) rename sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/{agent_framework => }/__init__.py (100%) delete mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/conftest.py rename sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/{agent_framework => }/test_agent_framework_input_converter.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/client/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/client/operations/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/client/operations/test_foundry_connected_tools.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/client/operations/test_foundry_hosted_mcp_tools.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/client/test_client.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/client/test_configuration.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/conftest.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/runtime/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/runtime/conftest.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/runtime/test_catalog.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/runtime/test_facade.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/runtime/test_invoker.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/runtime/test_resolver.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/runtime/test_runtime.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/runtime/test_starlette.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/runtime/test_user.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/utils/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/utils/conftest.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/{core => }/tools/utils/test_name_resolver.py (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{langgraph => }/__init__.py (100%) delete mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/conftest.py rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{langgraph => }/test_langgraph_request_converter.py (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{langgraph => }/tools/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{langgraph => }/tools/conftest.py (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{langgraph => }/tools/test_agent_integration.py (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{langgraph => }/tools/test_builder.py (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{langgraph => }/tools/test_chat_model.py (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{langgraph => }/tools/test_context.py (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{langgraph => }/tools/test_middleware.py (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{langgraph => }/tools/test_resolver.py (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/{langgraph => }/tools/test_tool_node.py (100%) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/__init__.py rename to sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/conftest.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/conftest.py deleted file mode 100644 index a56a7164c0a3..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/conftest.py +++ /dev/null @@ -1,15 +0,0 @@ -""" -Pytest configuration and shared fixtures for unit tests. -""" - -import sys -from pathlib import Path - -# Ensure package sources are importable during tests -tests_root = Path(__file__).resolve() -src_root = tests_root.parents[4] -packages_root = tests_root.parents[2] / "packages" - -for path in (packages_root, src_root): - if str(path) not in sys.path: - sys.path.insert(0, str(path)) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/test_agent_framework_input_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_agent_framework_input_converter.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/agent_framework/test_agent_framework_input_converter.py rename to sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_agent_framework_input_converter.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/operations/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/operations/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_connected_tools.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/operations/test_foundry_connected_tools.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_connected_tools.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/operations/test_foundry_connected_tools.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_hosted_mcp_tools.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/operations/test_foundry_hosted_mcp_tools.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/operations/test_foundry_hosted_mcp_tools.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/operations/test_foundry_hosted_mcp_tools.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_client.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/test_client.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_client.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/test_client.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/test_configuration.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/client/test_configuration.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/client/test_configuration.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/conftest.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/conftest.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/conftest.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/conftest.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/conftest.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/conftest.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/conftest.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/conftest.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_catalog.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_catalog.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_catalog.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_catalog.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_facade.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_facade.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_facade.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_facade.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_invoker.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_invoker.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_invoker.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_invoker.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_resolver.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_resolver.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_resolver.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_resolver.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_runtime.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_runtime.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_runtime.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_runtime.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_starlette.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_starlette.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_starlette.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_starlette.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_user.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_user.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/runtime/test_user.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_user.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/utils/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/utils/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/conftest.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/utils/conftest.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/conftest.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/utils/conftest.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/test_name_resolver.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/utils/test_name_resolver.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/core/tools/utils/test_name_resolver.py rename to sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/utils/test_name_resolver.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/__init__.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/conftest.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/conftest.py deleted file mode 100644 index 7f055e40010c..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/conftest.py +++ /dev/null @@ -1,10 +0,0 @@ -""" -Pytest configuration and shared fixtures for unit tests. -""" - -import sys -from pathlib import Path - -# Add the src directory to the Python path so we can import modules under test -src_path = Path(__file__).parent.parent.parent / "src" -sys.path.insert(0, str(src_path)) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/test_langgraph_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/test_langgraph_request_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/__init__.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/conftest.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/conftest.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_agent_integration.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_agent_integration.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_builder.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_builder.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_builder.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_builder.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_chat_model.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_chat_model.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_chat_model.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_context.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_context.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_context.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_context.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_middleware.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_middleware.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_middleware.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_middleware.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_resolver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_resolver.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_resolver.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_resolver.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_tool_node.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_tool_node.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/langgraph/tools/test_tool_node.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_tool_node.py From 72c8e324667328d30c3e3335c7446caf7ccb2c4a Mon Sep 17 00:00:00 2001 From: junanchen Date: Fri, 23 Jan 2026 14:41:19 -0800 Subject: [PATCH 62/94] fix lg uts for py>=3.11 --- .../unit_tests/tools/runtime/test_catalog.py | 3 +- .../tools/test_agent_integration.py | 18 ++++++-- .../tests/unit_tests/tools/test_chat_model.py | 37 +++++++++++++--- .../tests/unit_tests/tools/test_middleware.py | 44 ++++++++++++++----- 4 files changed, 78 insertions(+), 24 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_catalog.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_catalog.py index 45b03f0530a2..a7013a7537e1 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_catalog.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_catalog.py @@ -6,6 +6,7 @@ import pytest from unittest.mock import AsyncMock +from azure.ai.agentserver.core.tools import ensure_foundry_tool from azure.ai.agentserver.core.tools.runtime._catalog import ( DefaultFoundryToolCatalog, ) @@ -189,7 +190,7 @@ async def test_list_with_facade_dict( ): """Test list works with facade dictionaries.""" facade = {"type": "custom_tool", "config": "value"} - expected_id = "hosted_mcp:custom_tool" + expected_id = ensure_foundry_tool(facade).id mock_foundry_tool_client.list_tools_details = AsyncMock( return_value={expected_id: [sample_tool_details]} diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py index 6cfa903475fe..eea917e54fd4 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py @@ -317,12 +317,22 @@ def test_context_resolution_from_config(self): assert resolved is context def test_context_resolution_returns_none_when_not_attached(self): - """Test that context resolution returns None when not attached.""" + """Test that context resolution returns None when not attached. + + For Python < 3.11, LanggraphRunContext.resolve will return None. + For Python >= 3.11, it will try get_runtime() from langgraph which depends on + context propagation. Since we don't run inside langgraph in unit tests, + no one propagates the context for us, so RuntimeError is raised. + """ + import sys config: RunnableConfig = {"configurable": {}} - resolved = LanggraphRunContext.resolve(config=config) - - assert resolved is None + if sys.version_info >= (3, 11): + with pytest.raises(RuntimeError, match="outside of a runnable context"): + LanggraphRunContext.resolve(config=config) + else: + resolved = LanggraphRunContext.resolve(config=config) + assert resolved is None def test_from_config_returns_context(self): """Test LanggraphRunContext.from_config method.""" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_chat_model.py index 085495a4b91e..046285f17562 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_chat_model.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_chat_model.py @@ -181,7 +181,14 @@ async def test_ainvoke_with_context( assert result.content == "Async hello from model!" def test_invoke_without_context_and_no_foundry_tools(self): - """Test invoking model without context and no foundry tools.""" + """Test invoking model without context and no foundry tools. + + For Python < 3.11, LanggraphRunContext.resolve will return None. + For Python >= 3.11, it will try get_runtime() from langgraph which depends on + context propagation. Since we don't run inside langgraph in unit tests, + no one propagates the context for us, so RuntimeError is raised. + """ + import sys delegate = FakeChatModel( responses=[AIMessage(content="Hello!")], ) @@ -196,13 +203,25 @@ def test_invoke_without_context_and_no_foundry_tools(self): config: RunnableConfig = {"configurable": {}} input_messages = [HumanMessage(content="Hello")] - result = model.invoke(input_messages, config=config) - # Should work since no foundry tools need resolution - assert result.content == "Hello!" + if sys.version_info >= (3, 11): + with pytest.raises(RuntimeError, match="outside of a runnable context"): + model.invoke(input_messages, config=config) + else: + result = model.invoke(input_messages, config=config) + # Should work since no foundry tools need resolution + assert result.content == "Hello!" def test_invoke_without_context_raises_error_when_foundry_tools_present(self): - """Test that invoking without context raises error when foundry tools are set.""" + """Test that invoking without context raises error when foundry tools are set. + + For Python < 3.11, LanggraphRunContext.resolve will return None and we expect + 'Unable to resolve foundry tools from context' error. + For Python >= 3.11, it will try get_runtime() from langgraph which depends on + context propagation. Since we don't run inside langgraph in unit tests, + no one propagates the context for us, so RuntimeError is raised. + """ + import sys delegate = FakeChatModel( responses=[AIMessage(content="Hello!")], ) @@ -217,8 +236,12 @@ def test_invoke_without_context_raises_error_when_foundry_tools_present(self): config: RunnableConfig = {"configurable": {}} input_messages = [HumanMessage(content="Hello")] - with pytest.raises(RuntimeError, match="Unable to resolve foundry tools from context"): - model.invoke(input_messages, config=config) + if sys.version_info >= (3, 11): + with pytest.raises(RuntimeError, match="outside of a runnable context"): + model.invoke(input_messages, config=config) + else: + with pytest.raises(RuntimeError, match="Unable to resolve foundry tools from context"): + model.invoke(input_messages, config=config) def test_stream_with_context( self, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_middleware.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_middleware.py index 89290a58f97c..bf337c340ff6 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_middleware.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_middleware.py @@ -137,7 +137,14 @@ def real_tool(x: str) -> str: assert result[0] is real_tool def test_wrap_tool_call_delegates_to_wrapper(self): - """Test that wrap_tool_call delegates to FoundryToolCallWrapper.""" + """Test that wrap_tool_call delegates to FoundryToolCallWrapper. + + For Python < 3.11, LanggraphRunContext.resolve will return None. + For Python >= 3.11, it will try get_runtime() from langgraph which depends on + context propagation. Since we don't run inside langgraph in unit tests, + no one propagates the context for us, so RuntimeError is raised. + """ + import sys foundry_tools = [{"type": "code_interpreter"}] middleware = FoundryToolBindingMiddleware(foundry_tools) @@ -152,15 +159,25 @@ def test_wrap_tool_call_delegates_to_wrapper(self): expected_result = ToolMessage(content="Result", tool_call_id="call_1") mock_handler = MagicMock(return_value=expected_result) - result = middleware.wrap_tool_call(mock_request, mock_handler) - - # Handler should be called - mock_handler.assert_called_once() - assert result == expected_result + if sys.version_info >= (3, 11): + with pytest.raises(RuntimeError, match="outside of a runnable context"): + middleware.wrap_tool_call(mock_request, mock_handler) + else: + result = middleware.wrap_tool_call(mock_request, mock_handler) + # Handler should be called + mock_handler.assert_called_once() + assert result == expected_result @pytest.mark.asyncio async def test_awrap_tool_call_delegates_to_wrapper_async(self): - """Test that awrap_tool_call delegates to FoundryToolCallWrapper async.""" + """Test that awrap_tool_call delegates to FoundryToolCallWrapper async. + + For Python < 3.11, LanggraphRunContext.resolve will return None. + For Python >= 3.11, it will try get_runtime() from langgraph which depends on + context propagation. Since we don't run inside langgraph in unit tests, + no one propagates the context for us, so RuntimeError is raised. + """ + import sys foundry_tools = [{"type": "code_interpreter"}] middleware = FoundryToolBindingMiddleware(foundry_tools) @@ -175,11 +192,14 @@ async def test_awrap_tool_call_delegates_to_wrapper_async(self): expected_result = ToolMessage(content="Async Result", tool_call_id="call_1") mock_handler = AsyncMock(return_value=expected_result) - result = await middleware.awrap_tool_call(mock_request, mock_handler) - - # Handler should be awaited - mock_handler.assert_awaited_once() - assert result == expected_result + if sys.version_info >= (3, 11): + with pytest.raises(RuntimeError, match="outside of a runnable context"): + await middleware.awrap_tool_call(mock_request, mock_handler) + else: + result = await middleware.awrap_tool_call(mock_request, mock_handler) + # Handler should be awaited + mock_handler.assert_awaited_once() + assert result == expected_result def test_middleware_with_multiple_foundry_tools(self): """Test middleware initialization with multiple foundry tools.""" From f4b2f8f459bfca2ecb6e2ea7435a8042f705b72c Mon Sep 17 00:00:00 2001 From: junanchen Date: Fri, 23 Jan 2026 15:20:08 -0800 Subject: [PATCH 63/94] avoid breaking change in af --- .../ai/agentserver/agentframework/__init__.py | 84 +++++++++++-------- .../agentframework/_agent_framework.py | 21 +---- .../agentframework/_ai_agent_adapter.py | 14 ++-- .../agentframework/_workflow_agent_adapter.py | 14 ++-- .../samples/basic_simple/minimal_example.py | 2 +- .../chat_client_with_foundry_tool.py | 2 +- 6 files changed, 66 insertions(+), 71 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index 32cf57200a49..38f326b89c0a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -6,6 +6,8 @@ from typing import TYPE_CHECKING, Any, Callable, Optional, Union, overload from agent_framework import AgentProtocol, BaseAgent, Workflow, WorkflowBuilder +from azure.core.credentials_async import AsyncTokenCredential +from azure.core.credentials import TokenCredential from azure.ai.agentserver.core.application import PackageMetadata, set_current_app # pylint: disable=import-error,no-name-in-module @@ -14,25 +16,25 @@ from ._ai_agent_adapter import AgentFrameworkAIAgentAdapter from ._workflow_agent_adapter import AgentFrameworkWorkflowAdapter from ._foundry_tools import FoundryToolsChatMiddleware - -if TYPE_CHECKING: # pragma: no cover - from azure.core.credentials_async import AsyncTokenCredential +from .persistence import AgentThreadRepository, CheckpointRepository @overload def from_agent_framework( - *, agent: Union[BaseAgent, AgentProtocol], - credentials: Optional["AsyncTokenCredential"] = None, - **kwargs: Any, + /, + credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, + thread_repository: Optional[AgentThreadRepository]=None ) -> "AgentFrameworkAIAgentAdapter": """ Create an Agent Framework AI Agent Adapter from an AgentProtocol or BaseAgent. - :keyword agent: The agent to adapt. + :param agent: The agent to adapt. :type agent: Union[BaseAgent, AgentProtocol] :keyword credentials: Optional asynchronous token credential for authentication. :type credentials: Optional[AsyncTokenCredential] + :keyword thread_repository: Optional thread repository for agent thread management. + :type thread_repository: Optional[AgentThreadRepository] :return: An instance of AgentFrameworkAIAgentAdapter. :rtype: AgentFrameworkAIAgentAdapter @@ -41,10 +43,11 @@ def from_agent_framework( @overload def from_agent_framework( - *, workflow: Union[WorkflowBuilder, Callable[[], Workflow]], - credentials: Optional["AsyncTokenCredential"] = None, - **kwargs: Any, + /, + credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, + thread_repository: Optional[AgentThreadRepository] = None, + checkpoint_repository: Optional[CheckpointRepository] = None, ) -> "AgentFrameworkWorkflowAdapter": """ Create an Agent Framework Workflow Adapter. @@ -55,56 +58,63 @@ def from_agent_framework( workflow definition can be converted to a WorkflowAgent. For more information, see the agent-framework samples and documentation. - :keyword workflow: The workflow builder or factory function to adapt. + :param workflow: The workflow builder or factory function to adapt. :type workflow: Union[WorkflowBuilder, Callable[[], Workflow]] :keyword credentials: Optional asynchronous token credential for authentication. :type credentials: Optional[AsyncTokenCredential] + :keyword thread_repository: Optional thread repository for agent thread management. + :type thread_repository: Optional[AgentThreadRepository] + :keyword checkpoint_repository: Optional checkpoint repository for workflow checkpointing. + :type checkpoint_repository: Optional[CheckpointRepository] :return: An instance of AgentFrameworkWorkflowAdapter. :rtype: AgentFrameworkWorkflowAdapter """ ... def from_agent_framework( - *, - agent: Optional[Union[BaseAgent, AgentProtocol]] = None, - workflow: Optional[Union[WorkflowBuilder, Callable[[], Workflow]]] = None, - credentials: Optional["AsyncTokenCredential"] = None, - **kwargs: Any, + agent_or_workflow: Union[BaseAgent, AgentProtocol, WorkflowBuilder, Callable[[], Workflow]], + /, + credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, + thread_repository: Optional[AgentThreadRepository] = None, + checkpoint_repository: Optional[CheckpointRepository] = None ) -> "AgentFrameworkAgent": """ Create an Agent Framework Adapter from either an AgentProtocol/BaseAgent or a WorkflowAgent. One of agent or workflow must be provided. - :keyword agent: The agent to adapt. - :type agent: Optional[Union[BaseAgent, AgentProtocol]] - :keyword workflow: The workflow builder or factory function to adapt. - :type workflow: Optional[Union[WorkflowBuilder, Callable[[], Workflow]]] + :param agent_or_workflow: The agent to adapt. + :type agent_or_workflow: Optional[Union[BaseAgent, AgentProtocol]] :keyword credentials: Optional asynchronous token credential for authentication. :type credentials: Optional[AsyncTokenCredential] + :keyword thread_repository: Optional thread repository for agent thread management. + :type thread_repository: Optional[AgentThreadRepository] + :keyword checkpoint_repository: Optional checkpoint repository for workflow checkpointing. + :type checkpoint_repository: Optional[CheckpointRepository] :return: An instance of AgentFrameworkAgent. :rtype: AgentFrameworkAgent :raises TypeError: If neither or both of agent and workflow are provided, or if the provided types are incorrect. """ - provided = sum(value is not None for value in (agent, workflow)) - if provided != 1: - raise TypeError("from_agent_framework requires exactly one of 'agent' or 'workflow' keyword arguments") - - if workflow is not None: - if isinstance(workflow, WorkflowBuilder): - def workflow_factory() -> Workflow: - return workflow.build() - - return AgentFrameworkWorkflowAdapter(workflow_factory=workflow_factory, credentials=credentials, **kwargs) - if isinstance(workflow, Callable): - return AgentFrameworkWorkflowAdapter(workflow_factory=workflow, credentials=credentials, **kwargs) - raise TypeError("workflow must be a WorkflowBuilder or callable returning a Workflow") - - if isinstance(agent, (AgentProtocol, BaseAgent)): - return AgentFrameworkAIAgentAdapter(agent, credentials=credentials, **kwargs) - raise TypeError("agent must be an instance of AgentProtocol or BaseAgent") + if isinstance(agent_or_workflow, WorkflowBuilder): + return AgentFrameworkWorkflowAdapter(workflow_factory=agent_or_workflow.build, + credentials=credentials, + thread_repository=thread_repository, + checkpoint_repository=checkpoint_repository) + if isinstance(agent_or_workflow, Callable): # type: ignore + return AgentFrameworkWorkflowAdapter(workflow_factory=agent_or_workflow, + credentials=credentials, + thread_repository=thread_repository, + checkpoint_repository=checkpoint_repository) + # raise TypeError("workflow must be a WorkflowBuilder or callable returning a Workflow") + + if isinstance(agent_or_workflow, (AgentProtocol, BaseAgent)): + return AgentFrameworkAIAgentAdapter(agent_or_workflow, + credentials=credentials, + thread_repository=thread_repository) + raise TypeError("You must provide one of the instances of type " + "[AgentProtocol, BaseAgent, WorkflowBuilder or callable returning a Workflow]") __all__ = [ diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py index 732b70095028..cb7793038408 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py @@ -48,35 +48,20 @@ class AgentFrameworkAgent(FoundryCBAgent): - Supports both streaming and non-streaming responses based on the `stream` flag. """ - def __init__(self, agent: AgentProtocol, + def __init__(self, credentials: "Optional[AsyncTokenCredential]" = None, - *, - thread_repository: Optional[AgentThreadRepository] = None, - **kwargs: Any, - ): + thread_repository: Optional[AgentThreadRepository] = None): """Initialize the AgentFrameworkAgent with an AgentProtocol. - :param agent: The Agent Framework agent to adapt. - :type agent: AgentProtocol :param credentials: Azure credentials for authentication. :type credentials: Optional[AsyncTokenCredential] :param thread_repository: An optional AgentThreadRepository instance for managing thread messages. :type thread_repository: Optional[AgentThreadRepository] """ - super().__init__(credentials=credentials, **kwargs) # pylint: disable=unexpected-keyword-arg - self._agent: AgentProtocol = agent + super().__init__(credentials=credentials) # pylint: disable=unexpected-keyword-arg self._thread_repository = thread_repository self._hitl_helper = HumanInTheLoopHelper() - @property - def agent(self) -> "AgentProtocol": - """Get the resolved agent. This property provides backward compatibility. - - :return: The resolved AgentProtocol if available, None otherwise. - :rtype: AgentProtocol - """ - return self._agent - def init_tracing(self): try: otel_exporter_endpoint = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py index 622fb2762e7b..100f75ed3d82 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py @@ -7,6 +7,8 @@ from typing import Any, AsyncGenerator, Optional, Union from agent_framework import AgentProtocol +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.tools import OAuthConsentRequiredError @@ -27,12 +29,10 @@ class AgentFrameworkAIAgentAdapter(AgentFrameworkAgent): def __init__(self, agent: AgentProtocol, - *, - thread_repository: Optional[AgentThreadRepository]=None, - **kwargs) -> None: - super().__init__(agent=agent, **kwargs) + credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, + thread_repository: Optional[AgentThreadRepository] = None) -> None: + super().__init__(credentials, thread_repository) self._agent = agent - self._thread_repository = thread_repository async def agent_run( # pylint: disable=too-many-statements self, context: AgentRunContext @@ -55,7 +55,7 @@ async def agent_run( # pylint: disable=too-many-statements if context.stream: return self._run_streaming_updates( context=context, - run_stream=lambda: self.agent.run_stream( + run_stream=lambda: self._agent.run_stream( message, thread=agent_thread, ), @@ -64,7 +64,7 @@ async def agent_run( # pylint: disable=too-many-statements # Non-streaming path logger.info("Running agent in non-streaming mode") - result = await self.agent.run( + result = await self._agent.run( message, thread=agent_thread) logger.debug("Agent run completed, result type: %s", type(result)) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py index fb40cb453124..c7bbc496c196 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py @@ -12,6 +12,8 @@ from agent_framework import Workflow, CheckpointStorage, WorkflowAgent, WorkflowCheckpoint from agent_framework._workflows import get_checkpoint_summary +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.logger import get_logger @@ -34,14 +36,12 @@ class AgentFrameworkWorkflowAdapter(AgentFrameworkAgent): """Adapter to run WorkflowBuilder agents within the Agent Framework CBAgent structure.""" def __init__(self, - workflow_factory: Callable[[], Workflow], - *, - thread_repository: Optional[AgentThreadRepository] = None, - checkpoint_repository: Optional[CheckpointRepository] = None, - **kwargs: Any) -> None: - super().__init__(agent=None, **kwargs) + workflow_factory: Callable[[], Workflow], + credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, + thread_repository: Optional[AgentThreadRepository] = None, + checkpoint_repository: Optional[CheckpointRepository] = None) -> None: + super().__init__(credentials, thread_repository) self._workflow_factory = workflow_factory - self._thread_repository = thread_repository self._checkpoint_repository = checkpoint_repository async def agent_run( # pylint: disable=too-many-statements diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/basic_simple/minimal_example.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/basic_simple/minimal_example.py index 1d5aab07ae8a..2ea0f19dd32a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/basic_simple/minimal_example.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/basic_simple/minimal_example.py @@ -26,7 +26,7 @@ def main() -> None: tools=get_weather, ) - from_agent_framework(agent=agent).run() + from_agent_framework(agent).run() if __name__ == "__main__": diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py index 08e7e8bdffc7..cb9c3cd2c9c6 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py @@ -28,7 +28,7 @@ def main(): instructions="You are a helpful assistant with access to various tools.", ) - from_agent_framework(agent=agent).run() + from_agent_framework(agent).run() if __name__ == "__main__": main() From 54f97b686189aaa901b19e882bab051dbffa0460 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 23 Jan 2026 15:32:36 -0800 Subject: [PATCH 64/94] fix samples --- .../samples/human_in_the_loop_ai_function/main.py | 2 +- .../samples/human_in_the_loop_workflow_agent/main.py | 2 +- .../samples/mcp_apikey/mcp_apikey.py | 2 +- .../samples/mcp_simple/mcp_simple.py | 2 +- .../samples/simple_async/minimal_async_example.py | 2 +- .../samples/workflow_agent_simple/workflow_agent_simple.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py index db81c1091336..56dc5fca8860 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_ai_function/main.py @@ -78,7 +78,7 @@ def build_agent(): async def main() -> None: agent = build_agent() thread_repository = JsonLocalFileAgentThreadRepository(agent=agent, storage_path="./thread_storage") - await from_agent_framework(agent=agent, thread_repository=thread_repository).run_async() + await from_agent_framework(agent, thread_repository=thread_repository).run_async() if __name__ == "__main__": asyncio.run(main()) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/main.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/main.py index e749a4a62fc6..cc89c941e65e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/main.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/main.py @@ -111,7 +111,7 @@ async def run_agent() -> None: """Run the workflow inside the agent server adapter.""" builder = create_builder() await from_agent_framework( - workflow=builder, # pass workflow builder to adapter + builder, # pass workflow builder to adapter checkpoint_repository=FileCheckpointRepository(storage_path="./checkpoints"), # for checkpoint storage ).run_async() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_apikey/mcp_apikey.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_apikey/mcp_apikey.py index 2a1058e7f468..985d7fd01e0c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_apikey/mcp_apikey.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_apikey/mcp_apikey.py @@ -35,7 +35,7 @@ async def main() -> None: ) async with agent: - await from_agent_framework(agent=agent).run_async() + await from_agent_framework(agent).run_async() if __name__ == "__main__": diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_simple/mcp_simple.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_simple/mcp_simple.py index ce5bb37eea4f..6b59771fe0da 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_simple/mcp_simple.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/mcp_simple/mcp_simple.py @@ -22,7 +22,7 @@ async def main() -> None: ) async with agent: - await from_agent_framework(agent=agent).run_async() + await from_agent_framework(agent).run_async() if __name__ == "__main__": diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/simple_async/minimal_async_example.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/simple_async/minimal_async_example.py index ac781d4d39ab..4c69c8afa84d 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/simple_async/minimal_async_example.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/simple_async/minimal_async_example.py @@ -28,7 +28,7 @@ async def main() -> None: ) async with agent: - await from_agent_framework(agent=agent).run_async() + await from_agent_framework(agent).run_async() if __name__ == "__main__": diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py index a79e24f1a3fb..5de214c9ff09 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_agent_simple/workflow_agent_simple.py @@ -45,7 +45,7 @@ async def main() -> None: # Or create a factory function for the workflow pass the workflow factory to the adapter def workflow_factory() -> Workflow: return builder.build() - await from_agent_framework(workflow=workflow_factory).run_async() + await from_agent_framework(workflow_factory).run_async() if __name__ == "__main__": From f356999601c1658fe084cec8a5d6698ccf1465ba Mon Sep 17 00:00:00 2001 From: junanchen Date: Fri, 23 Jan 2026 16:32:27 -0800 Subject: [PATCH 65/94] make agent param in from_langgraph and from_agent_framework position-only param --- sdk/agentserver/azure-ai-agentserver-agentframework/README.md | 2 +- .../azure/ai/agentserver/langgraph/__init__.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/README.md index a8c525cdddaa..54d80aed48e7 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/README.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/README.md @@ -25,7 +25,7 @@ from azure.ai.agentserver.agentframework import from_agent_framework if __name__ == "__main__": # with this simple line, your agent will be hosted on http://localhost:8088 - from_agent_framework(agent=my_awesome_agent).run() + from_agent_framework(my_awesome_agent).run() ``` diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index 7fefa1b486d5..f63eaa05ca0c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -18,6 +18,7 @@ def from_langgraph( agent, + /, credentials: Optional["AsyncTokenCredential"] = None, converter: Optional["ResponseAPIConverter"] = None ) -> "LangGraphAdapter": From 09cc6afaeb00b1f67a8d15c47ec2597b7fbf7626 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 23 Jan 2026 17:01:06 -0800 Subject: [PATCH 66/94] diable mypy for release --- .../azure-ai-agentserver-agentframework/pyproject.toml | 6 +----- .../azure-ai-agentserver-langgraph/pyproject.toml | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 7cbbfd0edf6a..e3ef1d26475b 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -65,9 +65,5 @@ breaking = false # incompatible python version pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false -#mindependency = false # depends on -core package -#latestdependency = false -#whl = false -#depends = false -#pylint = false whl_no_aio = false +mypy = false diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index 40a85fe802eb..c9b844f4eaae 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -64,9 +64,5 @@ breaking = false # incompatible python version pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false -#mindependency = false # depends on -core package -#latestdependency = false -#whl = false -#depends = false -#pylint = false whl_no_aio = false +mypy = false \ No newline at end of file From c2aec07b16125b7cc9ddcf11724f0233e3832107 Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Fri, 23 Jan 2026 17:05:11 -0800 Subject: [PATCH 67/94] [agentserver] getting ready for release (#44837) * try skip apistub for langgraph * added beta classifier * fix pylint * fix pylint --- .../CHANGELOG.md | 2 ++ .../ai/agentserver/agentframework/__init__.py | 25 ++++++++++--------- .../mypy.ini | 9 +++++++ .../pyproject.toml | 1 + .../azure-ai-agentserver-core/CHANGELOG.md | 2 ++ .../agentserver/core/tools/client/_client.py | 2 +- .../agentserver/core/tools/client/_models.py | 4 +-- .../agentserver/core/tools/runtime/_facade.py | 6 +++-- .../core/tools/runtime/_resolver.py | 2 +- .../azure-ai-agentserver-core/pyproject.toml | 4 ++- .../CHANGELOG.md | 2 ++ .../pyproject.toml | 1 + 12 files changed, 40 insertions(+), 20 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/mypy.ini diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index 29bae6795995..e75e2e293b1a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -3,6 +3,8 @@ ## 1.0.0b9 (2026-01-23) +### Features Added + - Integrated with Foundry Tools - Add persistence for agent thread and checkpoint - Fixed WorkflowAgent concurrency issue diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index 38f326b89c0a..179d33b8f5ab 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -1,9 +1,10 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# pylint: disable=docstring-should-be-keyword __path__ = __import__("pkgutil").extend_path(__path__, __name__) -from typing import TYPE_CHECKING, Any, Callable, Optional, Union, overload +from typing import Callable, Optional, Union, overload from agent_framework import AgentProtocol, BaseAgent, Workflow, WorkflowBuilder from azure.core.credentials_async import AsyncTokenCredential @@ -31,9 +32,9 @@ def from_agent_framework( :param agent: The agent to adapt. :type agent: Union[BaseAgent, AgentProtocol] - :keyword credentials: Optional asynchronous token credential for authentication. - :type credentials: Optional[AsyncTokenCredential] - :keyword thread_repository: Optional thread repository for agent thread management. + :param credentials: Optional asynchronous token credential for authentication. + :type credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] + :param thread_repository: Optional thread repository for agent thread management. :type thread_repository: Optional[AgentThreadRepository] :return: An instance of AgentFrameworkAIAgentAdapter. @@ -60,11 +61,11 @@ def from_agent_framework( :param workflow: The workflow builder or factory function to adapt. :type workflow: Union[WorkflowBuilder, Callable[[], Workflow]] - :keyword credentials: Optional asynchronous token credential for authentication. - :type credentials: Optional[AsyncTokenCredential] - :keyword thread_repository: Optional thread repository for agent thread management. + :param credentials: Optional asynchronous token credential for authentication. + :type credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] + :param thread_repository: Optional thread repository for agent thread management. :type thread_repository: Optional[AgentThreadRepository] - :keyword checkpoint_repository: Optional checkpoint repository for workflow checkpointing. + :param checkpoint_repository: Optional checkpoint repository for workflow checkpointing. :type checkpoint_repository: Optional[CheckpointRepository] :return: An instance of AgentFrameworkWorkflowAdapter. :rtype: AgentFrameworkWorkflowAdapter @@ -85,11 +86,11 @@ def from_agent_framework( :param agent_or_workflow: The agent to adapt. :type agent_or_workflow: Optional[Union[BaseAgent, AgentProtocol]] - :keyword credentials: Optional asynchronous token credential for authentication. - :type credentials: Optional[AsyncTokenCredential] - :keyword thread_repository: Optional thread repository for agent thread management. + :param credentials: Optional asynchronous token credential for authentication. + :type credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] + :param thread_repository: Optional thread repository for agent thread management. :type thread_repository: Optional[AgentThreadRepository] - :keyword checkpoint_repository: Optional checkpoint repository for workflow checkpointing. + :param checkpoint_repository: Optional checkpoint repository for workflow checkpointing. :type checkpoint_repository: Optional[CheckpointRepository] :return: An instance of AgentFrameworkAgent. :rtype: AgentFrameworkAgent diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/mypy.ini b/sdk/agentserver/azure-ai-agentserver-agentframework/mypy.ini new file mode 100644 index 000000000000..5f8a8e301a2d --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/mypy.ini @@ -0,0 +1,9 @@ +[mypy] +explicit_package_bases = True +python_version = 3.10 + +[mypy-azure.*] +disable_error_code = syntax + +[mypy-samples.*] +ignore_errors = true \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index e3ef1d26475b..527c43728865 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -9,6 +9,7 @@ authors = [ ] license = "MIT" classifiers = [ + "Development Status :: 4 - Beta", "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index b05d70708716..9d861de04f6d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -3,6 +3,8 @@ ## 1.0.0b9 (2026-01-23) +### Features Added + - Integrated with Foundry Tools diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py index a998de7f9597..12b647d7adc7 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py @@ -34,7 +34,7 @@ ) from .operations._foundry_connected_tools import FoundryConnectedToolsOperations from .operations._foundry_hosted_mcp_tools import FoundryMcpToolsOperations -from .._exceptions import OAuthConsentRequiredError, ToolInvocationError +from .._exceptions import ToolInvocationError class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: disable=C4748 diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py index c4d5c4d96a28..bcfc2f5d7e96 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py @@ -7,8 +7,6 @@ from typing import ( Annotated, Any, - Awaitable, - Callable, ClassVar, Dict, Iterable, @@ -302,7 +300,7 @@ class SchemaDefinition(BaseModel): """ type: SchemaType = SchemaType.OBJECT - properties: Mapping[str, SchemaProperty] = field(default_factory=dict) + properties: Mapping[str, SchemaProperty] = field(default_factory=dict) # pylint: disable=E3701 required: Optional[Set[str]] = None def extract_from(self, diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py index bfc4a08d9a63..71c4601cb525 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_facade.py @@ -54,7 +54,8 @@ def ensure_foundry_tool(tool: FoundryToolLike) -> FoundryTool: # Pattern for Azure resource ID format: -# /subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts//projects//connections/ +# /subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts +# //projects//connections/ _RESOURCE_ID_PATTERN = re.compile( r"^/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.CognitiveServices/" r"accounts/[^/]+/projects/[^/]+/connections/(?P[^/]+)$", @@ -67,7 +68,8 @@ def _parse_connection_id(connection_id: str) -> str: Supports two formats: 1. Simple name: "my-connection-name" - 2. Resource ID: "/subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts//projects//connections/" + 2. Resource ID: "/subscriptions//resourceGroups//providers + /Microsoft.CognitiveServices/accounts//projects//connections/" :param connection_id: The connection identifier, either a simple name or a full resource ID. :type connection_id: str diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py index 24eb0fabbb21..9596124d9b55 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_resolver.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from abc import ABC, abstractmethod -from typing import Optional, Union +from typing import Union from ._catalog import FoundryToolCatalog from ._facade import FoundryToolLike, ensure_foundry_tool diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 68c9949805c2..3427688bb2b0 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -8,6 +8,7 @@ authors = [ ] license = "MIT" classifiers = [ + "Development Status :: 4 - Beta", "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", @@ -74,4 +75,5 @@ combine-as-imports = true breaking = false # incompatible python version pyright = false verifytypes = false -latestdependency = false \ No newline at end of file +latestdependency = false +dependencies = false \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index 43641a3de515..6d56228af1ff 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -3,6 +3,8 @@ ## 1.0.0b9 (2026-01-23) +### Features Added + - Integrated with Foundry Tools - Support Human-in-the-Loop diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index c9b844f4eaae..e68c6365d6a3 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -8,6 +8,7 @@ authors = [ ] license = "MIT" classifiers = [ + "Development Status :: 4 - Beta", "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", From 21174ed2484936c492f2c4c72012e9a924ae740b Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 23 Jan 2026 17:28:06 -0800 Subject: [PATCH 68/94] fix af sphinx --- .../ai/agentserver/agentframework/_workflow_agent_adapter.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py index c7bbc496c196..f5a05609f0dc 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py @@ -28,8 +28,7 @@ from .models.agent_framework_output_non_streaming_converter import ( AgentFrameworkOutputNonStreamingConverter, ) -from .persistence.agent_thread_repository import AgentThreadRepository -from .persistence.checkpoint_repository import CheckpointRepository +from .persistence import AgentThreadRepository, CheckpointRepository logger = get_logger() From 3ac92d655a46d7402eee6d7df1c1560f8058058f Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 23 Jan 2026 17:30:48 -0800 Subject: [PATCH 69/94] fix af sphinx --- .../agentframework/persistence/agent_thread_repository.py | 6 +++++- .../agentframework/persistence/checkpoint_repository.py | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py index 66528ff96213..fa6ef19554c8 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py @@ -10,7 +10,11 @@ class AgentThreadRepository(ABC): - """AgentThread repository to manage saved thread messages of agent threads and workflows.""" + """ + AgentThread repository to manage saved thread messages of agent threads and workflows. + + :meta private: + """ @abstractmethod async def get( diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py index 471d3a2f7f84..da673ff6af51 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py @@ -12,7 +12,11 @@ ) class CheckpointRepository(ABC): - """Repository interface for storing and retrieving checkpoints.""" + """ + Repository interface for storing and retrieving checkpoints. + + :meta private: + """ @abstractmethod async def get_or_create(self, conversation_id: str) -> Optional[CheckpointStorage]: """Retrieve or create a checkpoint storage by conversation ID. From dee0b306f435ffad1706233217e36845b701d889 Mon Sep 17 00:00:00 2001 From: melionel Date: Fri, 23 Jan 2026 17:51:38 -0800 Subject: [PATCH 70/94] Mengla/add_af_tool_ut (#44829) * add unittests for tools * add more tests * remove unnecessary test case * move to upper level --- .../chat_client_with_foundry_tool/README.md | 2 +- .../chat_client_with_foundry_tool.py | 2 +- .../tests/unit_tests/conftest.py | 31 +++ .../tests/unit_tests/test_foundry_tools.py | 188 ++++++++++++++++++ 4 files changed, 221 insertions(+), 2 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/conftest.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_tools.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/README.md index 956fc634eb11..d9fe177e850f 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/README.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/README.md @@ -59,7 +59,7 @@ The core pattern used by this sample: agent = AzureOpenAIChatClient( credential=DefaultAzureCredential(), middleware=FoundryToolsChatMiddleware( - tools=[{"type": "mcp", "project_connection_id": tool_connection_id}], + tools=[{"type": "web_search_preview"}, {"type": "mcp", "project_connection_id": tool_connection_id}], ), ).create_agent( name="FoundryToolAgent", diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py index cb9c3cd2c9c6..d8c75259d29b 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/chat_client_with_foundry_tool/chat_client_with_foundry_tool.py @@ -22,7 +22,7 @@ def main(): agent = AzureOpenAIChatClient( credential=DefaultAzureCredential(), middleware=FoundryToolsChatMiddleware( - tools=[{"type": "mcp", "project_connection_id": tool_connection_id}] + tools=[{"type": "web_search_preview"}, {"type": "mcp", "project_connection_id": tool_connection_id}] )).create_agent( name="FoundryToolAgent", instructions="You are a helpful assistant with access to various tools.", diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/conftest.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/conftest.py new file mode 100644 index 000000000000..cd00c924c030 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/conftest.py @@ -0,0 +1,31 @@ +""" +Pytest configuration and shared fixtures for unit tests. +""" + +# Workaround: importing agent_framework (via mcp) can fail with +# KeyError: 'pydantic.root_model' unless this module is imported first. +import pydantic.root_model # noqa: F401 + +import site +import sys +from pathlib import Path + + +# Ensure we don't import user-site packages that can conflict with the active +# environment (e.g., a user-installed cryptography wheel causing PyO3 errors). +try: + user_site = site.getusersitepackages() + if user_site: + sys.path[:] = [p for p in sys.path if p != user_site] +except Exception: + # Best-effort: if site isn't fully configured, proceed without filtering. + pass + +# Ensure package sources are importable during tests +tests_root = Path(__file__).resolve() +src_root = tests_root.parents[4] +packages_root = tests_root.parents[2] / "packages" + +for path in (packages_root, src_root): + if str(path) not in sys.path: + sys.path.insert(0, str(path)) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_tools.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_tools.py new file mode 100644 index 000000000000..82905405c88a --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_tools.py @@ -0,0 +1,188 @@ +import importlib +import inspect +from types import SimpleNamespace +from unittest.mock import AsyncMock + +from typing import Any + +import pytest +from agent_framework import AIFunction, ChatOptions +from pydantic import Field, create_model + +# Import schema models directly from client._models to avoid heavy azure.identity import +# chain triggered by azure.ai.agentserver.core.__init__.py +from azure.ai.agentserver.core.tools.client._models import ( + FoundryHostedMcpTool, + FoundryToolDetails, + ResolvedFoundryTool, + SchemaDefinition, + SchemaProperty, + SchemaType, +) + +# Load _foundry_tools module directly without triggering parent __init__ which has heavy deps +import importlib.util +import sys +from pathlib import Path + +foundry_tools_path = Path(__file__).parent.parent.parent / "azure" / "ai" / "agentserver" / "agentframework" / "_foundry_tools.py" +spec = importlib.util.spec_from_file_location("_foundry_tools", foundry_tools_path) +foundry_tools_module = importlib.util.module_from_spec(spec) +sys.modules["_foundry_tools"] = foundry_tools_module +spec.loader.exec_module(foundry_tools_module) + +FoundryToolClient = foundry_tools_module.FoundryToolClient +FoundryToolsChatMiddleware = foundry_tools_module.FoundryToolsChatMiddleware +_attach_signature_from_pydantic_model = foundry_tools_module._attach_signature_from_pydantic_model + + +@pytest.mark.unit +def test_attach_signature_from_pydantic_model_required_and_optional() -> None: + InputModel = create_model( + "InputModel", + required_int=(int, Field(description="required")), + optional_str=(str | None, Field(default=None, description="optional")), + ) + + async def tool_func(**kwargs): + return kwargs + + _attach_signature_from_pydantic_model(tool_func, InputModel) + + sig = inspect.signature(tool_func) + assert list(sig.parameters.keys()) == ["required_int", "optional_str"] + assert all(p.kind is inspect.Parameter.KEYWORD_ONLY for p in sig.parameters.values()) + assert sig.parameters["required_int"].default is inspect._empty + assert sig.parameters["optional_str"].default is None + + # Ensure annotations are also attached. + assert tool_func.__annotations__["required_int"] is int + assert tool_func.__annotations__["return"] is Any + + +def _make_resolved_tool(*, name: str = "my_tool", description: str = "desc") -> ResolvedFoundryTool: + schema = SchemaDefinition( + properties={ + "a": SchemaProperty(type=SchemaType.STRING, description="field a"), + "b": SchemaProperty(type=SchemaType.INTEGER, description="field b"), + }, + required={"a"}, + ) + definition = FoundryHostedMcpTool(name=name) + details = FoundryToolDetails(name=name, description=description, input_schema=schema) + return ResolvedFoundryTool(definition=definition, details=details) + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_to_aifunction_builds_pydantic_model_and_invokes(monkeypatch: pytest.MonkeyPatch) -> None: + resolved_tool = _make_resolved_tool(name="echo", description="Echo tool") + + invoke = AsyncMock(return_value={"ok": True}) + server_context = SimpleNamespace( + tools=SimpleNamespace( + invoke=invoke, + catalog=SimpleNamespace(list=AsyncMock()), + ) + ) + monkeypatch.setattr( + foundry_tools_module.AgentServerContext, + "get", + classmethod(lambda cls: server_context), + ) + + client = FoundryToolClient(tools=[]) + ai_func = client._to_aifunction(resolved_tool) + assert isinstance(ai_func, AIFunction) + assert ai_func.name == "echo" + assert ai_func.description == "Echo tool" + + # Signature should be attached from schema (a required, b optional) + sig = inspect.signature(ai_func.func) + assert list(sig.parameters.keys()) == ["a", "b"] + assert sig.parameters["a"].default is inspect._empty + assert sig.parameters["b"].default is None + + result = await ai_func.func(a="hi", b=123) + assert result == {"ok": True} + invoke.assert_awaited_once_with(resolved_tool, {"a": "hi", "b": 123}) + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_list_tools_uses_catalog_and_converts(monkeypatch: pytest.MonkeyPatch) -> None: + allowed = [FoundryHostedMcpTool(name="allowed_tool")] + resolved = [_make_resolved_tool(name="allowed_tool", description="Allowed")] + + catalog_list = AsyncMock(return_value=resolved) + server_context = SimpleNamespace( + tools=SimpleNamespace( + catalog=SimpleNamespace(list=catalog_list), + invoke=AsyncMock(), + ) + ) + monkeypatch.setattr( + foundry_tools_module.AgentServerContext, + "get", + classmethod(lambda cls: server_context), + ) + + client = FoundryToolClient(tools=allowed) + funcs = await client.list_tools() + + catalog_list.assert_awaited_once() + args, _kwargs = catalog_list.await_args + assert args[0] == list(allowed) + + assert len(funcs) == 1 + assert isinstance(funcs[0], AIFunction) + assert funcs[0].name == "allowed_tool" + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_middleware_process_creates_chat_options_when_missing(monkeypatch: pytest.MonkeyPatch) -> None: + middleware = FoundryToolsChatMiddleware(tools=[]) + + async def dummy_tool(**kwargs): + return kwargs + + DummyInput = create_model("DummyInput") + injected = [AIFunction(name="t", description="d", func=dummy_tool, input_model=DummyInput)] + monkeypatch.setattr(middleware._foundry_tool_client, "list_tools", AsyncMock(return_value=injected)) + + context = SimpleNamespace(chat_options=None) + next_fn = AsyncMock() + + await middleware.process(context, next_fn) + + assert isinstance(context.chat_options, ChatOptions) + assert context.chat_options.tools == injected + next_fn.assert_awaited_once_with(context) + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_middleware_process_appends_to_existing_chat_options(monkeypatch: pytest.MonkeyPatch) -> None: + middleware = FoundryToolsChatMiddleware(tools=[]) + + async def dummy_tool(**kwargs): + return kwargs + + DummyInput = create_model("DummyInput") + injected = [AIFunction(name="t2", description="d2", func=dummy_tool, input_model=DummyInput)] + monkeypatch.setattr(middleware._foundry_tool_client, "list_tools", AsyncMock(return_value=injected)) + + # Existing ChatOptions with no tools should become injected + context = SimpleNamespace(chat_options=ChatOptions()) + next_fn = AsyncMock() + await middleware.process(context, next_fn) + assert context.chat_options.tools == injected + + # Existing ChatOptions with tools should be appended + existing = [AIFunction(name="t1", description="d1", func=dummy_tool, input_model=DummyInput)] + context = SimpleNamespace(chat_options=ChatOptions(tools=existing)) + next_fn = AsyncMock() + await middleware.process(context, next_fn) + assert context.chat_options.tools == existing + injected + assert next_fn.await_count == 1 From e27ded676e3915817a98632a9dc744203ca5502c Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 23 Jan 2026 17:53:15 -0800 Subject: [PATCH 71/94] fix pylint --- sdk/agentserver/azure-ai-agentserver-agentframework/mypy.ini | 4 ---- .../azure/ai/agentserver/langgraph/langgraph.py | 2 +- .../azure/ai/agentserver/langgraph/tools/_builder.py | 2 +- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/mypy.ini b/sdk/agentserver/azure-ai-agentserver-agentframework/mypy.ini index 5f8a8e301a2d..e0bb0b83e2ce 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/mypy.ini +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/mypy.ini @@ -1,9 +1,5 @@ [mypy] explicit_package_bases = True -python_version = 3.10 - -[mypy-azure.*] -disable_error_code = syntax [mypy-samples.*] ignore_errors = true \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index aae3bc32ee35..13646778454c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -171,7 +171,7 @@ def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: config["configurable"] = configurable context.attach_to_config(config) - callbacks = config.get("callbacks", []) + callbacks = config.get("callbacks", []) # mypy: ignore-errors if self.azure_ai_tracer and self.azure_ai_tracer not in callbacks: callbacks.append(self.azure_ai_tracer) config["callbacks"] = callbacks diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py index 0ea9a2da80f2..85ef781a1fd9 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py @@ -60,4 +60,4 @@ def use_foundry_tools( # pylint: disable=C4743 foundry_tools = [ensure_foundry_tool(tool) for tool in model_or_tools] get_registry().extend(foundry_tools) - return FoundryToolBindingMiddleware(foundry_tools) + return FoundryToolBindingMiddleware(foundry_tools) From 6ce8fceac26bcfdc41b807fb9bf9535fa837e93f Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 23 Jan 2026 17:54:41 -0800 Subject: [PATCH 72/94] disable apistub for lg --- sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index e68c6365d6a3..a39b56cbe204 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -66,4 +66,5 @@ pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false whl_no_aio = false -mypy = false \ No newline at end of file +mypy = false +apistub = false \ No newline at end of file From 13443f22f8aae5ecfa16670d23644a6f7c1f452f Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 23 Jan 2026 18:48:10 -0800 Subject: [PATCH 73/94] fix build --- .../azure/ai/agentserver/langgraph/tools/_builder.py | 2 +- shared_requirements.txt | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py index 85ef781a1fd9..0ea9a2da80f2 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py @@ -60,4 +60,4 @@ def use_foundry_tools( # pylint: disable=C4743 foundry_tools = [ensure_foundry_tool(tool) for tool in model_or_tools] get_registry().extend(foundry_tools) - return FoundryToolBindingMiddleware(foundry_tools) + return FoundryToolBindingMiddleware(foundry_tools) diff --git a/shared_requirements.txt b/shared_requirements.txt index b5e1b85f184a..630973de2eb2 100644 --- a/shared_requirements.txt +++ b/shared_requirements.txt @@ -96,4 +96,5 @@ opentelemetry-exporter-otlp-proto-grpc agent-framework-core langchain langchain-openai -azure-ai-language-questionanswering \ No newline at end of file +azure-ai-language-questionanswering +cachetools \ No newline at end of file From ff0e557e8deb2f108f9daa595a10a86b5fea842b Mon Sep 17 00:00:00 2001 From: junanchen Date: Fri, 23 Jan 2026 19:22:34 -0800 Subject: [PATCH 74/94] fix spell, mypy, pylint --- .../ai/agentserver/agentframework/__init__.py | 2 +- .../persistence/agent_thread_repository.py | 2 +- .../cspell.json | 4 +++- .../human_in_the_loop_workflow_agent/README.md | 2 +- .../tests/unit_tests/test_foundry_tools.py | 8 ++++---- .../ai/agentserver/core/application/_options.py | 2 +- .../core/server/common/agent_run_context.py | 14 -------------- .../common/id_generator/foundry_id_generator.py | 2 +- .../azure-ai-agentserver-core/cspell.json | 5 +++-- .../ai/agentserver/langgraph/tools/_builder.py | 2 +- .../azure-ai-agentserver-langgraph/cspell.json | 4 +++- 11 files changed, 19 insertions(+), 28 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index 179d33b8f5ab..76ad4f904f67 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -52,7 +52,7 @@ def from_agent_framework( ) -> "AgentFrameworkWorkflowAdapter": """ Create an Agent Framework Workflow Adapter. - The arugument `workflow` can be either a WorkflowBuilder or a factory function + The argument `workflow` can be either a WorkflowBuilder or a factory function that returns a Workflow. It will be called to create a new Workflow instance and `.as_agent()` will be called as well for each incoming CreateResponse request. Please ensure that the diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py index fa6ef19554c8..f5b54d284798 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py @@ -22,7 +22,7 @@ async def get( conversation_id: str, agent: Optional[Union[AgentProtocol, WorkflowAgent]] = None, ) -> Optional[AgentThread]: - """Retrieve the savedt thread for a given conversation ID. + """Retrieve the saved thread for a given conversation ID. :param conversation_id: The conversation ID. :type conversation_id: str diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json b/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json index 951bfab2c88a..af30eb4f2846 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json @@ -7,7 +7,9 @@ "pysort", "redef", "aifunction", - "ainvoke" + "ainvoke", + "hitl", + "HITL" ], "ignorePaths": [ "*.csv", diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md index 172422f87c7f..d7a6dd978fec 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/human_in_the_loop_workflow_agent/README.md @@ -128,7 +128,7 @@ Respond by sending a `CreateResponse` request with `function_call_output` messag { "agent": {"name": "local_agent", "type": "agent_reference"}, "stream": false, - "convseration": {"id": ""}, + "conversation": {"id": ""}, "input": [ { "call_id": "", diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_tools.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_tools.py index 82905405c88a..ba30e06cfe78 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_tools.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_tools.py @@ -128,15 +128,15 @@ async def test_list_tools_uses_catalog_and_converts(monkeypatch: pytest.MonkeyPa ) client = FoundryToolClient(tools=allowed) - funcs = await client.list_tools() + functions = await client.list_tools() catalog_list.assert_awaited_once() args, _kwargs = catalog_list.await_args assert args[0] == list(allowed) - assert len(funcs) == 1 - assert isinstance(funcs[0], AIFunction) - assert funcs[0].name == "allowed_tool" + assert len(functions) == 1 + assert isinstance(functions[0], AIFunction) + assert functions[0].name == "allowed_tool" @pytest.mark.unit diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py index dc80c1538327..d70270261e7b 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_options.py @@ -20,7 +20,7 @@ class AgentServerOptions(TypedDict): project_endpoint: NotRequired[str] credential: NotRequired[Union[AsyncTokenCredential, TokenCredential]] http: NotRequired["HttpServerOptions"] - toos: NotRequired["ToolsOptions"] + tools: NotRequired["ToolsOptions"] class HttpServerOptions(TypedDict): diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 2464179a119f..980f9c9d38f3 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -23,8 +23,6 @@ def __init__(self, payload: dict, **kwargs: Any) -> None: self._response_id = self._id_generator.response_id self._conversation_id = self._id_generator.conversation_id self._stream = self.request.get("stream", False) - self._user_info = kwargs.get("user_info", None) - self._agent_tools = kwargs.get("agent_tools", []) @property def raw_payload(self) -> dict: @@ -67,18 +65,6 @@ def get_conversation_object(self) -> ResponseConversation1: return None # type: ignore return ResponseConversation1(id=self._conversation_id) - def get_tools(self) -> list: - # request tools take precedence over agent tools - # TODO: remove this method - request_tools = self.request.get("tools", []) - if not request_tools: - return self._agent_tools - return request_tools - - def get_user_info(self) -> UserInfo: - # TODO: remove this method - return self._user_info - def _deserialize_create_response(payload: dict) -> CreateResponse: _deserialized = CreateResponse(**payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py index 1082242cbf51..2c1a2d154d91 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py @@ -37,7 +37,7 @@ def from_request(cls, payload: dict) -> "FoundryIdGenerator": if isinstance(conv_id_raw, str): conv_id = conv_id_raw elif isinstance(conv_id_raw, dict): - conv_id = conv_id_raw.get("id", None) + conv_id = conv_id_raw.get("id", None) # type: ignore[assignment] else: conv_id = None return cls(response_id, conv_id) diff --git a/sdk/agentserver/azure-ai-agentserver-core/cspell.json b/sdk/agentserver/azure-ai-agentserver-core/cspell.json index 55131ced0609..d5003af37fe1 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-core/cspell.json @@ -17,9 +17,10 @@ "DETFL", "SETFL", "Planifica", - "mcptools", "ainvoke", - "oauthreq" + "oauthreq", + "hitl", + "HITL" ], "ignorePaths": [ "*.csv", diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py index 0ea9a2da80f2..d73544a707c5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py @@ -60,4 +60,4 @@ def use_foundry_tools( # pylint: disable=C4743 foundry_tools = [ensure_foundry_tool(tool) for tool in model_or_tools] get_registry().extend(foundry_tools) - return FoundryToolBindingMiddleware(foundry_tools) + return FoundryToolBindingMiddleware(foundry_tools) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json b/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json index 1ea68a37f8d5..3201d7d662a3 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json @@ -6,7 +6,9 @@ "envtemplate", "ainvoke", "asetup", - "mcptools" + "hitl", + "HITL", + "awrap" ], "ignorePaths": [ "*.csv", From 39a4b219101538075a7eba23271b78ad63ade403 Mon Sep 17 00:00:00 2001 From: junanchen Date: Mon, 26 Jan 2026 15:58:21 -0800 Subject: [PATCH 75/94] remove "from __future__ import annotations" as we support py>=3.10 --- .../azure/ai/agentserver/agentframework/_agent_framework.py | 1 - .../azure/ai/agentserver/agentframework/_ai_agent_adapter.py | 1 - .../azure/ai/agentserver/agentframework/_foundry_tools.py | 1 - .../agentframework/models/agent_framework_input_converters.py | 1 - .../models/agent_framework_output_non_streaming_converter.py | 1 - .../models/agent_framework_output_streaming_converter.py | 1 - .../ai/agentserver/agentframework/models/agent_id_generator.py | 1 - .../ai/agentserver/agentframework/models/utils/async_iter.py | 1 - .../azure/ai/agentserver/core/application/_package_metadata.py | 1 - .../core/server/common/id_generator/foundry_id_generator.py | 1 - .../azure/ai/agentserver/core/tools/_exceptions.py | 1 - .../azure/ai/agentserver/core/tools/client/operations/_base.py | 1 - .../azure/ai/agentserver/core/utils/_credential.py | 1 - .../samples/bilingual_weekend_planner/main.py | 1 - .../ai/agentserver/langgraph/models/response_api_converter.py | 1 - .../langgraph/models/response_api_default_converter.py | 1 - .../azure/ai/agentserver/langgraph/tools/_chat_model.py | 1 - .../azure/ai/agentserver/langgraph/tools/_middleware.py | 1 - .../azure-ai-agentserver-langgraph/samples/custom_state/main.py | 1 - .../samples/mcp_apikey/mcp_apikey.py | 1 - 20 files changed, 20 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py index cb7793038408..06a2ca8c1411 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py @@ -2,7 +2,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=logging-fstring-interpolation,no-name-in-module,no-member,do-not-import-asyncio -from __future__ import annotations import os from typing import Any, AsyncGenerator, Optional, TYPE_CHECKING, Union, Callable diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py index 100f75ed3d82..30ee1b0cf2e9 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py @@ -2,7 +2,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=no-name-in-module,import-error -from __future__ import annotations from typing import Any, AsyncGenerator, Optional, Union diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py index 64120308a872..c2ee0e50746c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py @@ -3,7 +3,6 @@ # --------------------------------------------------------- # pylint: disable=client-accepts-api-version-keyword,missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs # pylint: disable=no-name-in-module,import-error -from __future__ import annotations import inspect from typing import Any, Awaitable, Callable, Dict, List, Optional, Sequence diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py index 9ba678e7bebf..0b621efcfde5 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py @@ -3,7 +3,6 @@ # --------------------------------------------------------- # pylint: disable=too-many-nested-blocks,too-many-return-statements,too-many-branches # mypy: disable-error-code="no-redef" -from __future__ import annotations from typing import Dict, List, Optional diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index 4984b2fc0423..167206c40ea1 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from __future__ import annotations import datetime import json diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 805f3fc79ead..273f3f711332 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -3,7 +3,6 @@ # --------------------------------------------------------- # pylint: disable=attribute-defined-outside-init,protected-access,unnecessary-lambda-assignment # mypy: disable-error-code="call-overload,assignment,arg-type,override" -from __future__ import annotations import datetime import json diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py index da4045898a5e..a694e514eb55 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py @@ -9,7 +9,6 @@ returned so callers can decide how to handle absence. """ -from __future__ import annotations from typing import Optional diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py index fdf3b2fbb2a3..ab36f95a3bdd 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from __future__ import annotations from collections.abc import AsyncIterable, AsyncIterator, Callable from typing import TypeVar, Optional, Tuple diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py index 5701110e5c7f..2392e0496136 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from __future__ import annotations import platform from dataclasses import dataclass diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py index 2c1a2d154d91..a7cd78a391a2 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py @@ -2,7 +2,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from __future__ import annotations import base64 import os diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py index a5fe7726e9f1..8e8c3f88accb 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from __future__ import annotations from typing import TYPE_CHECKING diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py index a3c552fe2575..7bf8aa09a7ed 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from __future__ import annotations from abc import ABC import json diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py index 398a8c46fd5d..715e942a596c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from __future__ import annotations import asyncio # pylint: disable=C4763 import inspect diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py b/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py index 099d8dc45181..c4719eff92b8 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py @@ -1,7 +1,6 @@ # mypy: ignore-errors """Bilingual weekend planner sample with full GenAI telemetry capture.""" -from __future__ import annotations import json import logging diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py index 32cbf93a4bfb..1bd778568452 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py @@ -18,7 +18,6 @@ provided async iterator). Keep them pure transformation layers so they are testable. """ -from __future__ import annotations from abc import ABC, abstractmethod from typing import Any, AsyncIterable, AsyncIterator, Dict, TypedDict, Union diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py index cfe5229e3634..94705e790bc8 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from __future__ import annotations import time from collections.abc import Callable diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py index 4ca422b88c41..bef818fec89d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from __future__ import annotations from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py index c226e51e72ac..802e51e9f3ea 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from __future__ import annotations from typing import Awaitable, Callable, ClassVar, List diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py index ec45dceccfc8..174afbb9bea5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py @@ -1,4 +1,3 @@ -from __future__ import annotations import json import os diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/mcp_apikey/mcp_apikey.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/mcp_apikey/mcp_apikey.py index 12f5c50aadae..d1a363cf63c9 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/mcp_apikey/mcp_apikey.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/mcp_apikey/mcp_apikey.py @@ -1,5 +1,4 @@ # Copyright (c) Microsoft. All rights reserved. -from __future__ import annotations import asyncio import os From 955031bde68787f681028cf0f1913b2f18b964a3 Mon Sep 17 00:00:00 2001 From: junanchen Date: Mon, 26 Jan 2026 16:42:15 -0800 Subject: [PATCH 76/94] fix error introduced by removing "from __future__ import annotations" --- .../models/agent_framework_output_streaming_converter.py | 1 + .../azure/ai/agentserver/core/tools/_exceptions.py | 1 + 2 files changed, 2 insertions(+) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 273f3f711332..805f3fc79ead 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -3,6 +3,7 @@ # --------------------------------------------------------- # pylint: disable=attribute-defined-outside-init,protected-access,unnecessary-lambda-assignment # mypy: disable-error-code="call-overload,assignment,arg-type,override" +from __future__ import annotations import datetime import json diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py index 8e8c3f88accb..a5fe7726e9f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/_exceptions.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from __future__ import annotations from typing import TYPE_CHECKING From 01195a5119f6d4e2eb68476547316c5bd2af71df Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Mon, 26 Jan 2026 17:56:09 -0800 Subject: [PATCH 77/94] support implicit item type: item without type field is considered as message (#44857) --- .../agentframework/models/agent_framework_input_converters.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py index 0b621efcfde5..4a1a951ed63e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py @@ -82,9 +82,9 @@ def _transform_input_internal( if text_parts: messages.append(" ".join(text_parts)) - # Case 2: Explicit message params (user/assistant/system) + # Case 2: message params (user/assistant/system) elif ( - item.get("type") == "message" + item.get("type") in ("", None, "message") and item.get("role") is not None and item.get("content") is not None ): From b6c876997d7b7d6591aacd83c24296d11558a09a Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Mon, 26 Jan 2026 18:29:52 -0800 Subject: [PATCH 78/94] [agentserver] make project endpoint optional (#44856) * make project endpoint optional * add UT --- .../azure/ai/agentserver/core/server/base.py | 22 +--- .../ai/agentserver/core/tools/__init__.py | 3 +- .../core/tools/runtime/_runtime.py | 53 +++++++- .../unit_tests/tools/runtime/test_runtime.py | 120 +++++++++++++++++- 4 files changed, 179 insertions(+), 19 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 2afbed6e99a8..cf5983110a87 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -12,6 +12,9 @@ from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential +from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette @@ -23,21 +26,12 @@ from starlette.routing import Route from starlette.types import ASGIApp -from azure.core.credentials import TokenCredential -from azure.core.credentials_async import AsyncTokenCredential -from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential - from ._context import AgentServerContext -from ..models import projects as project_models +from .common.agent_run_context import AgentRunContext from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, get_project_endpoint, request_context -from ..models import ( - Response as OpenAIResponse, - ResponseStreamEvent, -) -from .common.agent_run_context import AgentRunContext - -from ..tools import DefaultFoundryToolRuntime, UserInfoContextMiddleware +from ..models import Response as OpenAIResponse, ResponseStreamEvent, projects as project_models +from ..tools import UserInfoContextMiddleware, create_tool_runtime from ..utils._credential import AsyncTokenCredentialAdapter logger = get_logger() @@ -100,9 +94,7 @@ def __init__(self, project_endpoint: Optional[str] = None) -> None: self.credentials = AsyncTokenCredentialAdapter(credentials) if credentials else AsyncDefaultTokenCredential() project_endpoint = get_project_endpoint() or project_endpoint - if not project_endpoint: - raise ValueError("Project endpoint is required.") - AgentServerContext(DefaultFoundryToolRuntime(project_endpoint, self.credentials)) + AgentServerContext(create_tool_runtime(project_endpoint, self.credentials)) async def runs_endpoint(request): # Set up tracing context and span diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py index 34c58d65cfd6..fa58e50368bf 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/__init__.py @@ -32,7 +32,7 @@ from .runtime._facade import FoundryToolFacade, FoundryToolLike, ensure_foundry_tool from .runtime._invoker import FoundryToolInvoker, DefaultFoundryToolInvoker from .runtime._resolver import FoundryToolInvocationResolver, DefaultFoundryToolInvocationResolver -from .runtime._runtime import FoundryToolRuntime, DefaultFoundryToolRuntime +from .runtime._runtime import create_tool_runtime, FoundryToolRuntime, DefaultFoundryToolRuntime from .runtime._starlette import UserInfoContextMiddleware from .runtime._user import UserProvider, ContextVarUserProvider @@ -71,6 +71,7 @@ "FoundryToolInvocationResolver", "DefaultFoundryToolInvocationResolver", # Runtime + "create_tool_runtime", "FoundryToolRuntime", "DefaultFoundryToolRuntime", # Starlette diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py index 8bc77759ecd6..eed4f63c7228 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py @@ -2,7 +2,8 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import os -from typing import Any, AsyncContextManager, Dict, Optional, Union +from abc import ABC, abstractmethod +from typing import Any, AsyncContextManager, ClassVar, Dict, Optional, Union from azure.core.credentials_async import AsyncTokenCredential @@ -15,10 +16,29 @@ from ...constants import Constants -class FoundryToolRuntime(AsyncContextManager["FoundryToolRuntime"]): +def create_tool_runtime(project_endpoint: str | None, + credential: AsyncTokenCredential | None) -> "FoundryToolRuntime": + """Create a Foundry tool runtime. + Returns a DefaultFoundryToolRuntime if both project_endpoint and credential are provided, + otherwise returns a ThrowingFoundryToolRuntime which raises errors on usage. + + :param project_endpoint: The project endpoint. + :type project_endpoint: str | None + :param credential: The credential. + :type credential: AsyncTokenCredential | None + :return: The Foundry tool runtime. + :rtype: FoundryToolRuntime + """ + if project_endpoint and credential: + return DefaultFoundryToolRuntime(project_endpoint=project_endpoint, credential=credential) + else: + return ThrowingFoundryToolRuntime() + +class FoundryToolRuntime(AsyncContextManager["FoundryToolRuntime"], ABC): """Base class for Foundry tool runtimes.""" @property + @abstractmethod def catalog(self) -> FoundryToolCatalog: """The tool catalog. @@ -28,6 +48,7 @@ def catalog(self) -> FoundryToolCatalog: raise NotImplementedError @property + @abstractmethod def invocation(self) -> FoundryToolInvocationResolver: """The tool invocation resolver. @@ -91,3 +112,31 @@ async def __aenter__(self) -> "DefaultFoundryToolRuntime": async def __aexit__(self, exc_type, exc_value, traceback): await self._client.__aexit__(exc_type, exc_value, traceback) + + +class ThrowingFoundryToolRuntime(FoundryToolRuntime): + """A FoundryToolRuntime that raises errors on usage.""" + _ERROR_MESSAGE: ClassVar[str] = ("FoundryToolRuntime is not configured. " + "Please provide a valid project endpoint and credential.") + + @property + def catalog(self) -> FoundryToolCatalog: + """The tool catalog. + + :raises RuntimeError: Always raised to indicate the runtime is not configured. + """ + raise RuntimeError(self._ERROR_MESSAGE) + + @property + def invocation(self) -> FoundryToolInvocationResolver: + """The tool invocation resolver. + + :raises RuntimeError: Always raised to indicate the runtime is not configured. + """ + raise RuntimeError(self._ERROR_MESSAGE) + + async def __aenter__(self) -> "ThrowingFoundryToolRuntime": + return self + + async def __aexit__(self, exc_type, exc_value, traceback): + pass \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_runtime.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_runtime.py index e42fc29a76cd..a99935662941 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_runtime.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/tools/runtime/test_runtime.py @@ -6,7 +6,11 @@ import pytest from unittest.mock import AsyncMock, MagicMock, patch -from azure.ai.agentserver.core.tools.runtime._runtime import DefaultFoundryToolRuntime +from azure.ai.agentserver.core.tools.runtime._runtime import ( + create_tool_runtime, + DefaultFoundryToolRuntime, + ThrowingFoundryToolRuntime, +) from azure.ai.agentserver.core.tools.runtime._catalog import DefaultFoundryToolCatalog from azure.ai.agentserver.core.tools.runtime._resolver import DefaultFoundryToolInvocationResolver from azure.ai.agentserver.core.tools.runtime._user import ContextVarUserProvider @@ -281,3 +285,117 @@ async def test_aexit_called_on_exception( raise ValueError("Test error") mock_client_instance.__aexit__.assert_called_once() + + +class TestCreateToolRuntime: + """Tests for create_tool_runtime factory function.""" + + @patch("azure.ai.agentserver.core.tools.runtime._runtime.FoundryToolClient") + def test_create_tool_runtime_returns_default_runtime_with_valid_params( + self, + mock_client_class, + mock_credential + ): + """Test create_tool_runtime returns DefaultFoundryToolRuntime when both params are provided.""" + mock_client_class.return_value = MagicMock() + endpoint = "https://test-project.azure.com" + + runtime = create_tool_runtime(project_endpoint=endpoint, credential=mock_credential) + + assert isinstance(runtime, DefaultFoundryToolRuntime) + + def test_create_tool_runtime_returns_throwing_runtime_when_endpoint_is_none( + self, + mock_credential + ): + """Test create_tool_runtime returns ThrowingFoundryToolRuntime when endpoint is None.""" + runtime = create_tool_runtime(project_endpoint=None, credential=mock_credential) + + assert isinstance(runtime, ThrowingFoundryToolRuntime) + + def test_create_tool_runtime_returns_throwing_runtime_when_credential_is_none(self): + """Test create_tool_runtime returns ThrowingFoundryToolRuntime when credential is None.""" + runtime = create_tool_runtime(project_endpoint="https://test.azure.com", credential=None) + + assert isinstance(runtime, ThrowingFoundryToolRuntime) + + def test_create_tool_runtime_returns_throwing_runtime_when_both_are_none(self): + """Test create_tool_runtime returns ThrowingFoundryToolRuntime when both params are None.""" + runtime = create_tool_runtime(project_endpoint=None, credential=None) + + assert isinstance(runtime, ThrowingFoundryToolRuntime) + + def test_create_tool_runtime_returns_throwing_runtime_when_endpoint_is_empty_string( + self, + mock_credential + ): + """Test create_tool_runtime returns ThrowingFoundryToolRuntime when endpoint is empty string.""" + runtime = create_tool_runtime(project_endpoint="", credential=mock_credential) + + assert isinstance(runtime, ThrowingFoundryToolRuntime) + + +class TestThrowingFoundryToolRuntime: + """Tests for ThrowingFoundryToolRuntime.""" + + def test_catalog_raises_runtime_error(self): + """Test catalog property raises RuntimeError.""" + runtime = ThrowingFoundryToolRuntime() + + with pytest.raises(RuntimeError) as exc_info: + _ = runtime.catalog + + assert "FoundryToolRuntime is not configured" in str(exc_info.value) + assert "project endpoint and credential" in str(exc_info.value) + + def test_invocation_raises_runtime_error(self): + """Test invocation property raises RuntimeError.""" + runtime = ThrowingFoundryToolRuntime() + + with pytest.raises(RuntimeError) as exc_info: + _ = runtime.invocation + + assert "FoundryToolRuntime is not configured" in str(exc_info.value) + assert "project endpoint and credential" in str(exc_info.value) + + @pytest.mark.asyncio + async def test_invoke_raises_runtime_error(self): + """Test invoke method raises RuntimeError (via invocation property).""" + runtime = ThrowingFoundryToolRuntime() + + with pytest.raises(RuntimeError) as exc_info: + await runtime.invoke({"type": "test"}, {"arg": "value"}) + + assert "FoundryToolRuntime is not configured" in str(exc_info.value) + + @pytest.mark.asyncio + async def test_aenter_returns_self(self): + """Test __aenter__ returns the runtime instance.""" + runtime = ThrowingFoundryToolRuntime() + + async with runtime as r: + assert r is runtime + + @pytest.mark.asyncio + async def test_aexit_completes_successfully(self): + """Test __aexit__ completes without error.""" + runtime = ThrowingFoundryToolRuntime() + + # Should not raise any exception + async with runtime: + pass + + @pytest.mark.asyncio + async def test_context_manager_does_not_suppress_exceptions(self): + """Test context manager does not suppress exceptions.""" + runtime = ThrowingFoundryToolRuntime() + + with pytest.raises(ValueError): + async with runtime: + raise ValueError("Test error") + + def test_error_message_is_class_variable(self): + """Test _ERROR_MESSAGE is defined as a class variable.""" + assert hasattr(ThrowingFoundryToolRuntime, "_ERROR_MESSAGE") + assert isinstance(ThrowingFoundryToolRuntime._ERROR_MESSAGE, str) + From 4707218be0161a2e89c167632f422e0ea0627145 Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Tue, 27 Jan 2026 14:27:19 -0800 Subject: [PATCH 79/94] [agentserver] Bump version to 1.0.0b10 (#44858) * Bump version to 1.0.0b10 * Revert "remove "from __future__ import annotations" as we support py>=3.10" This reverts commit 39a4b219101538075a7eba23271b78ad63ade403. * fix pylint --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 7 +++++++ .../ai/agentserver/agentframework/_agent_framework.py | 1 + .../agentserver/agentframework/_ai_agent_adapter.py | 1 + .../ai/agentserver/agentframework/_foundry_tools.py | 1 + .../azure/ai/agentserver/agentframework/_version.py | 2 +- .../models/agent_framework_input_converters.py | 1 + .../agent_framework_output_non_streaming_converter.py | 1 + .../agentframework/models/agent_id_generator.py | 1 + .../agentframework/models/utils/async_iter.py | 1 + .../azure-ai-agentserver-core/CHANGELOG.md | 6 ++++++ .../azure/ai/agentserver/core/_version.py | 2 +- .../agentserver/core/application/_package_metadata.py | 1 + .../azure/ai/agentserver/core/server/base.py | 7 ++++--- .../core/server/common/agent_run_context.py | 8 +++----- .../common/id_generator/foundry_id_generator.py | 1 + .../agentserver/core/tools/client/operations/_base.py | 1 + .../ai/agentserver/core/tools/runtime/_runtime.py | 11 ++++++++--- .../azure/ai/agentserver/core/utils/_credential.py | 1 + .../samples/bilingual_weekend_planner/main.py | 1 + .../azure-ai-agentserver-langgraph/CHANGELOG.md | 6 ++++++ .../azure/ai/agentserver/langgraph/_version.py | 2 +- .../langgraph/models/response_api_converter.py | 1 + .../models/response_api_default_converter.py | 1 + .../azure/ai/agentserver/langgraph/tools/_builder.py | 2 +- .../ai/agentserver/langgraph/tools/_chat_model.py | 1 + .../ai/agentserver/langgraph/tools/_middleware.py | 1 + .../samples/custom_state/main.py | 1 + .../samples/mcp_apikey/mcp_apikey.py | 1 + 28 files changed, 56 insertions(+), 15 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index e75e2e293b1a..d9605e795da3 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,6 +1,13 @@ # Release History +## 1.0.0b10 (2026-01-27) + +### Bugs Fixed + +- Support implicit message item type. +- Make AZURE_AI_PROJECTS_ENDPOINT optional. + ## 1.0.0b9 (2026-01-23) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py index 06a2ca8c1411..cb7793038408 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py @@ -2,6 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=logging-fstring-interpolation,no-name-in-module,no-member,do-not-import-asyncio +from __future__ import annotations import os from typing import Any, AsyncGenerator, Optional, TYPE_CHECKING, Union, Callable diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py index 30ee1b0cf2e9..100f75ed3d82 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_ai_agent_adapter.py @@ -2,6 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=no-name-in-module,import-error +from __future__ import annotations from typing import Any, AsyncGenerator, Optional, Union diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py index c2ee0e50746c..64120308a872 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py @@ -3,6 +3,7 @@ # --------------------------------------------------------- # pylint: disable=client-accepts-api-version-keyword,missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs # pylint: disable=no-name-in-module,import-error +from __future__ import annotations import inspect from typing import Any, Awaitable, Callable, Dict, List, Optional, Sequence diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index b1c2836b6921..9ab0a006e0d0 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b9" +VERSION = "1.0.0b10" diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py index 4a1a951ed63e..a21e5b9c44c7 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py @@ -3,6 +3,7 @@ # --------------------------------------------------------- # pylint: disable=too-many-nested-blocks,too-many-return-statements,too-many-branches # mypy: disable-error-code="no-redef" +from __future__ import annotations from typing import Dict, List, Optional diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index 167206c40ea1..4984b2fc0423 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from __future__ import annotations import datetime import json diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py index a694e514eb55..da4045898a5e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py @@ -9,6 +9,7 @@ returned so callers can decide how to handle absence. """ +from __future__ import annotations from typing import Optional diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py index ab36f95a3bdd..fdf3b2fbb2a3 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from __future__ import annotations from collections.abc import AsyncIterable, AsyncIterator, Callable from typing import TypeVar, Optional, Tuple diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index 9d861de04f6d..8cd2e9b9a861 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,6 +1,12 @@ # Release History +## 1.0.0b10 (2026-01-27) + +### Bugs Fixed + +- Make AZURE_AI_PROJECTS_ENDPOINT optional. + ## 1.0.0b9 (2026-01-23) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index b1c2836b6921..9ab0a006e0d0 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b9" +VERSION = "1.0.0b10" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py index 2392e0496136..5701110e5c7f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from __future__ import annotations import platform from dataclasses import dataclass diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index cf5983110a87..7ec666b52741 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -12,9 +12,6 @@ from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn -from azure.core.credentials import TokenCredential -from azure.core.credentials_async import AsyncTokenCredential -from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette @@ -26,6 +23,10 @@ from starlette.routing import Route from starlette.types import ASGIApp +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential +from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential + from ._context import AgentServerContext from .common.agent_run_context import AgentRunContext from ..constants import Constants diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 980f9c9d38f3..2f61882abc59 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -1,13 +1,11 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from typing import Any +from .id_generator.foundry_id_generator import FoundryIdGenerator +from .id_generator.id_generator import IdGenerator from ...logger import get_logger from ...models import CreateResponse from ...models.projects import AgentId, AgentReference, ResponseConversation1 -from .id_generator.foundry_id_generator import FoundryIdGenerator -from .id_generator.id_generator import IdGenerator -from ...tools import UserInfo logger = get_logger() @@ -16,7 +14,7 @@ class AgentRunContext: """ :meta private: """ - def __init__(self, payload: dict, **kwargs: Any) -> None: + def __init__(self, payload: dict) -> None: self._raw_payload = payload self._request = _deserialize_create_response(payload) self._id_generator = FoundryIdGenerator.from_request(payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py index a7cd78a391a2..2c1a2d154d91 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py @@ -2,6 +2,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from __future__ import annotations import base64 import os diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py index 7bf8aa09a7ed..a3c552fe2575 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/operations/_base.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from __future__ import annotations from abc import ABC import json diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py index eed4f63c7228..6335d6ee7c58 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_runtime.py @@ -1,6 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from __future__ import annotations + import os from abc import ABC, abstractmethod from typing import Any, AsyncContextManager, ClassVar, Dict, Optional, Union @@ -31,8 +33,7 @@ def create_tool_runtime(project_endpoint: str | None, """ if project_endpoint and credential: return DefaultFoundryToolRuntime(project_endpoint=project_endpoint, credential=credential) - else: - return ThrowingFoundryToolRuntime() + return ThrowingFoundryToolRuntime() class FoundryToolRuntime(AsyncContextManager["FoundryToolRuntime"], ABC): """Base class for Foundry tool runtimes.""" @@ -123,6 +124,8 @@ class ThrowingFoundryToolRuntime(FoundryToolRuntime): def catalog(self) -> FoundryToolCatalog: """The tool catalog. + :returns: The tool catalog. + :rtype: FoundryToolCatalog :raises RuntimeError: Always raised to indicate the runtime is not configured. """ raise RuntimeError(self._ERROR_MESSAGE) @@ -131,6 +134,8 @@ def catalog(self) -> FoundryToolCatalog: def invocation(self) -> FoundryToolInvocationResolver: """The tool invocation resolver. + :returns: The tool invocation resolver. + :rtype: FoundryToolInvocationResolver :raises RuntimeError: Always raised to indicate the runtime is not configured. """ raise RuntimeError(self._ERROR_MESSAGE) @@ -139,4 +144,4 @@ async def __aenter__(self) -> "ThrowingFoundryToolRuntime": return self async def __aexit__(self, exc_type, exc_value, traceback): - pass \ No newline at end of file + pass diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py index 715e942a596c..398a8c46fd5d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from __future__ import annotations import asyncio # pylint: disable=C4763 import inspect diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py b/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py index c4719eff92b8..099d8dc45181 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py @@ -1,6 +1,7 @@ # mypy: ignore-errors """Bilingual weekend planner sample with full GenAI telemetry capture.""" +from __future__ import annotations import json import logging diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index 6d56228af1ff..1d94fe7ba05e 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,6 +1,12 @@ # Release History +## 1.0.0b10 (2026-01-27) + +### Bugs Fixed + +- Make AZURE_AI_PROJECTS_ENDPOINT optional. + ## 1.0.0b9 (2026-01-23) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index b1c2836b6921..9ab0a006e0d0 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b9" +VERSION = "1.0.0b10" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py index 1bd778568452..32cbf93a4bfb 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py @@ -18,6 +18,7 @@ provided async iterator). Keep them pure transformation layers so they are testable. """ +from __future__ import annotations from abc import ABC, abstractmethod from typing import Any, AsyncIterable, AsyncIterator, Dict, TypedDict, Union diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py index 94705e790bc8..cfe5229e3634 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from __future__ import annotations import time from collections.abc import Callable diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py index d73544a707c5..0ea9a2da80f2 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py @@ -60,4 +60,4 @@ def use_foundry_tools( # pylint: disable=C4743 foundry_tools = [ensure_foundry_tool(tool) for tool in model_or_tools] get_registry().extend(foundry_tools) - return FoundryToolBindingMiddleware(foundry_tools) \ No newline at end of file + return FoundryToolBindingMiddleware(foundry_tools) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py index bef818fec89d..4ca422b88c41 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from __future__ import annotations from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py index 802e51e9f3ea..c226e51e72ac 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from __future__ import annotations from typing import Awaitable, Callable, ClassVar, List diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py index 174afbb9bea5..ec45dceccfc8 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py @@ -1,3 +1,4 @@ +from __future__ import annotations import json import os diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/mcp_apikey/mcp_apikey.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/mcp_apikey/mcp_apikey.py index d1a363cf63c9..12f5c50aadae 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/mcp_apikey/mcp_apikey.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/mcp_apikey/mcp_apikey.py @@ -1,4 +1,5 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations import asyncio import os From c755880628d38473c23724aefe0154351ca4ab51 Mon Sep 17 00:00:00 2001 From: junanchen Date: Tue, 27 Jan 2026 18:08:29 -0800 Subject: [PATCH 80/94] [temp] set "azure-monitor-opentelemetry<1.8.5" in af & lg to re-enable traces --- .../azure-ai-agentserver-agentframework/pyproject.toml | 3 ++- sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 527c43728865..0a7aae8aa128 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -21,7 +21,8 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core==1.0.0b9", + "azure-monitor-opentelemetry<1.8.5", + "azure-ai-agentserver-core==1.0.0b10", "agent-framework-azure-ai>=1.0.0b251112,<=1.0.0b260107", "agent-framework-core>=1.0.0b251112,<=1.0.0b260107", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index a39b56cbe204..bd95022bfcd2 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -20,7 +20,8 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core==1.0.0b9", + "azure-monitor-opentelemetry<1.8.5", + "azure-ai-agentserver-core==1.0.0b10", "langchain>=1.0.3", "langchain-openai>=1.0.3", "langchain-azure-ai[opentelemetry]~=1.0.0", From 20a9dac18424c8ab8677e9b46a2c4226b29abb0a Mon Sep 17 00:00:00 2001 From: junanchen Date: Tue, 27 Jan 2026 19:03:38 -0800 Subject: [PATCH 81/94] add upper limit "opentelemetry-exporter-otlp-proto-grpc>=1.36.0,<=1.39.0" --- .../azure-ai-agentserver-agentframework/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 0a7aae8aa128..359318c5a8e0 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -25,7 +25,7 @@ dependencies = [ "azure-ai-agentserver-core==1.0.0b10", "agent-framework-azure-ai>=1.0.0b251112,<=1.0.0b260107", "agent-framework-core>=1.0.0b251112,<=1.0.0b260107", - "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", + "opentelemetry-exporter-otlp-proto-grpc>=1.36.0,<=1.39.0", ] [build-system] From 54313552d6b05cd2dbef0d60e66e57d3067ca497 Mon Sep 17 00:00:00 2001 From: junanchen Date: Tue, 27 Jan 2026 21:36:22 -0800 Subject: [PATCH 82/94] limit "azure-monitor-opentelemetry>=1.5.0,<1.8.5" in core --- .../azure-ai-agentserver-agentframework/pyproject.toml | 1 - sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 2 +- sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 359318c5a8e0..0ede428d03a6 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -21,7 +21,6 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-monitor-opentelemetry<1.8.5", "azure-ai-agentserver-core==1.0.0b10", "agent-framework-azure-ai>=1.0.0b251112,<=1.0.0b260107", "agent-framework-core>=1.0.0b251112,<=1.0.0b260107", diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 3427688bb2b0..55307e69540d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-monitor-opentelemetry>=1.5.0", + "azure-monitor-opentelemetry>=1.5.0,<1.8.5", "azure-ai-projects>=1.1.0b4", "azure-ai-agents==1.2.0b5", "azure-core>=1.35.0", diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index bd95022bfcd2..f20fcfee1401 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -20,7 +20,6 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-monitor-opentelemetry<1.8.5", "azure-ai-agentserver-core==1.0.0b10", "langchain>=1.0.3", "langchain-openai>=1.0.3", From 52cf1f59cfd9bdba643ff388a955e65c0c7c05a8 Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Thu, 29 Jan 2026 00:02:24 -0800 Subject: [PATCH 83/94] [agentserver] Change conversation id to optional (#44910) * make conversation id optional * Moved the LangGraph checkpointer guard back to agent_run and switched it to a dedicated exception so the core base server handles it (now surfaces as server_error), with tests updated to assert the new behavior. --- .../agentframework/_agent_framework.py | 11 +-- .../agentframework/_workflow_agent_adapter.py | 7 +- .../persistence/agent_thread_repository.py | 51 ++++++++------ .../persistence/checkpoint_repository.py | 16 +++-- .../test_conversation_id_optional.py | 38 +++++++++++ .../core/server/common/agent_run_context.py | 4 +- .../id_generator/foundry_id_generator.py | 8 ++- .../common/test_foundry_id_generator.py | 27 ++++++++ .../ai/agentserver/langgraph/_exceptions.py | 13 ++++ .../ai/agentserver/langgraph/langgraph.py | 12 +++- .../models/response_api_default_converter.py | 4 +- .../test_conversation_id_optional.py | 68 +++++++++++++++++++ 12 files changed, 219 insertions(+), 40 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_conversation_id_optional.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_exceptions.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py index cb7793038408..32c171ec9eff 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py @@ -184,9 +184,10 @@ async def _load_agent_thread( :rtype: Optional[AgentThread] """ if self._thread_repository: - agent_thread = await self._thread_repository.get(context.conversation_id) + conversation_id = context.conversation_id + agent_thread = await self._thread_repository.get(conversation_id) if agent_thread: - logger.info(f"Loaded agent thread for conversation: {context.conversation_id}") + logger.info(f"Loaded agent thread for conversation: {conversation_id}") return agent_thread return agent.get_new_thread() return None @@ -202,9 +203,9 @@ async def _save_agent_thread(self, context: AgentRunContext, agent_thread: Agent :return: None :rtype: None """ - if agent_thread and self._thread_repository: - await self._thread_repository.set(context.conversation_id, agent_thread) - logger.info(f"Saved agent thread for conversation: {context.conversation_id}") + if agent_thread and self._thread_repository and (conversation_id := context.conversation_id): + await self._thread_repository.set(conversation_id, agent_thread) + logger.info(f"Saved agent thread for conversation: {conversation_id}") def _run_streaming_updates( self, diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py index f5a05609f0dc..f95ae294ff2c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py @@ -59,9 +59,10 @@ async def agent_run( # pylint: disable=too-many-statements checkpoint_storage = None selected_checkpoint = None - if self._checkpoint_repository: - checkpoint_storage = await self._checkpoint_repository.get_or_create(context.conversation_id) - selected_checkpoint = await self._get_latest_checkpoint(checkpoint_storage) + if self._checkpoint_repository and (conversation_id := context.conversation_id): + checkpoint_storage = await self._checkpoint_repository.get_or_create(conversation_id) + if checkpoint_storage: + selected_checkpoint = await self._get_latest_checkpoint(checkpoint_storage) if selected_checkpoint: summary = get_checkpoint_summary(selected_checkpoint) if summary.status == "completed": diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py index f5b54d284798..2a43dbb5aee8 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/agent_thread_repository.py @@ -19,13 +19,13 @@ class AgentThreadRepository(ABC): @abstractmethod async def get( self, - conversation_id: str, + conversation_id: Optional[str], agent: Optional[Union[AgentProtocol, WorkflowAgent]] = None, ) -> Optional[AgentThread]: """Retrieve the saved thread for a given conversation ID. :param conversation_id: The conversation ID. - :type conversation_id: str + :type conversation_id: Optional[str] :param agent: The agent instance. If provided, it can be used to deserialize the thread. :type agent: Optional[Union[AgentProtocol, WorkflowAgent]] @@ -34,11 +34,11 @@ async def get( """ @abstractmethod - async def set(self, conversation_id: str, thread: AgentThread) -> None: + async def set(self, conversation_id: Optional[str], thread: AgentThread) -> None: """Save the thread for a given conversation ID. :param conversation_id: The conversation ID. - :type conversation_id: str + :type conversation_id: Optional[str] :param thread: The thread to save. :type thread: AgentThread """ @@ -51,32 +51,35 @@ def __init__(self) -> None: async def get( self, - conversation_id: str, + conversation_id: Optional[str], agent: Optional[Union[AgentProtocol, WorkflowAgent]] = None, ) -> Optional[AgentThread]: """Retrieve the saved thread for a given conversation ID. :param conversation_id: The conversation ID. - :type conversation_id: str + :type conversation_id: Optional[str] :param agent: The agent instance. It will be used for in-memory repository for interface consistency. :type agent: Optional[Union[AgentProtocol, WorkflowAgent]] :return: The saved AgentThread if available, None otherwise. :rtype: Optional[AgentThread] """ + if not conversation_id: + return None if conversation_id in self._inventory: return self._inventory[conversation_id] return None - async def set(self, conversation_id: str, thread: AgentThread) -> None: + async def set(self, conversation_id: Optional[str], thread: AgentThread) -> None: """Save the thread for a given conversation ID. :param conversation_id: The conversation ID. - :type conversation_id: str + :type conversation_id: Optional[str] :param thread: The thread to save. :type thread: AgentThread """ - if conversation_id and thread: - self._inventory[conversation_id] = thread + if not conversation_id or not thread: + return + self._inventory[conversation_id] = thread class SerializedAgentThreadRepository(AgentThreadRepository): @@ -92,13 +95,13 @@ def __init__(self, agent: AgentProtocol) -> None: async def get( self, - conversation_id: str, + conversation_id: Optional[str], agent: Optional[Union[AgentProtocol, WorkflowAgent]] = None, ) -> Optional[AgentThread]: """Retrieve the saved thread for a given conversation ID. :param conversation_id: The conversation ID. - :type conversation_id: str + :type conversation_id: Optional[str] :param agent: The agent instance. If provided, it can be used to deserialize the thread. Otherwise, the repository's agent will be used. :type agent: Optional[Union[AgentProtocol, WorkflowAgent]] @@ -106,6 +109,8 @@ async def get( :return: The saved AgentThread if available, None otherwise. :rtype: Optional[AgentThread] """ + if not conversation_id: + return None serialized_thread = await self.read_from_storage(conversation_id) if serialized_thread: agent_to_use = agent or self._agent @@ -113,33 +118,35 @@ async def get( return thread return None - async def set(self, conversation_id: str, thread: AgentThread) -> None: + async def set(self, conversation_id: Optional[str], thread: AgentThread) -> None: """Save the thread for a given conversation ID. :param conversation_id: The conversation ID. - :type conversation_id: str + :type conversation_id: Optional[str] :param thread: The thread to save. :type thread: AgentThread """ + if not conversation_id: + return serialized_thread = await thread.serialize() await self.write_to_storage(conversation_id, serialized_thread) - async def read_from_storage(self, conversation_id: str) -> Optional[Any]: + async def read_from_storage(self, conversation_id: Optional[str]) -> Optional[Any]: """Read the serialized thread from storage. :param conversation_id: The conversation ID. - :type conversation_id: str + :type conversation_id: Optional[str] :return: The serialized thread if available, None otherwise. :rtype: Optional[Any] """ raise NotImplementedError("read_from_storage is not implemented.") - async def write_to_storage(self, conversation_id: str, serialized_thread: Any) -> None: + async def write_to_storage(self, conversation_id: Optional[str], serialized_thread: Any) -> None: """Write the serialized thread to storage. :param conversation_id: The conversation ID. - :type conversation_id: str + :type conversation_id: Optional[str] :param serialized_thread: The serialized thread to save. :type serialized_thread: Any :return: None @@ -155,7 +162,9 @@ def __init__(self, agent: AgentProtocol, storage_path: str) -> None: self._storage_path = storage_path os.makedirs(self._storage_path, exist_ok=True) - async def read_from_storage(self, conversation_id: str) -> Optional[Any]: + async def read_from_storage(self, conversation_id: Optional[str]) -> Optional[Any]: + if not conversation_id: + return None file_path = self._get_file_path(conversation_id) if os.path.exists(file_path): with open(file_path, "r", encoding="utf-8") as f: @@ -164,7 +173,9 @@ async def read_from_storage(self, conversation_id: str) -> Optional[Any]: return json.loads(serialized_thread) return None - async def write_to_storage(self, conversation_id: str, serialized_thread: Any) -> None: + async def write_to_storage(self, conversation_id: Optional[str], serialized_thread: Any) -> None: + if not conversation_id: + return serialized_str = json.dumps(serialized_thread) file_path = self._get_file_path(conversation_id) with open(file_path, "w", encoding="utf-8") as f: diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py index da673ff6af51..9848d01f6b10 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/checkpoint_repository.py @@ -18,11 +18,11 @@ class CheckpointRepository(ABC): :meta private: """ @abstractmethod - async def get_or_create(self, conversation_id: str) -> Optional[CheckpointStorage]: + async def get_or_create(self, conversation_id: Optional[str]) -> Optional[CheckpointStorage]: """Retrieve or create a checkpoint storage by conversation ID. :param conversation_id: The unique identifier for the checkpoint. - :type conversation_id: str + :type conversation_id: Optional[str] :return: The CheckpointStorage if found or created, None otherwise. :rtype: Optional[CheckpointStorage] """ @@ -33,14 +33,16 @@ class InMemoryCheckpointRepository(CheckpointRepository): def __init__(self) -> None: self._inventory: dict[str, CheckpointStorage] = {} - async def get_or_create(self, conversation_id: str) -> Optional[CheckpointStorage]: + async def get_or_create(self, conversation_id: Optional[str]) -> Optional[CheckpointStorage]: """Retrieve or create a checkpoint storage by conversation ID. :param conversation_id: The unique identifier for the checkpoint. - :type conversation_id: str + :type conversation_id: Optional[str] :return: The CheckpointStorage if found or created, None otherwise. :rtype: Optional[CheckpointStorage] """ + if not conversation_id: + return None if conversation_id not in self._inventory: self._inventory[conversation_id] = InMemoryCheckpointStorage() return self._inventory[conversation_id] @@ -53,14 +55,16 @@ def __init__(self, storage_path: str) -> None: self._inventory: dict[str, CheckpointStorage] = {} os.makedirs(self._storage_path, exist_ok=True) - async def get_or_create(self, conversation_id: str) -> Optional[CheckpointStorage]: + async def get_or_create(self, conversation_id: Optional[str]) -> Optional[CheckpointStorage]: """Retrieve or create a checkpoint storage by conversation ID. :param conversation_id: The unique identifier for the checkpoint. - :type conversation_id: str + :type conversation_id: Optional[str] :return: The CheckpointStorage if found or created, None otherwise. :rtype: Optional[CheckpointStorage] """ + if not conversation_id: + return None if conversation_id not in self._inventory: self._inventory[conversation_id] = FileCheckpointStorage(self._get_dir_path(conversation_id)) return self._inventory[conversation_id] diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_conversation_id_optional.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_conversation_id_optional.py new file mode 100644 index 000000000000..85fdc34f6498 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_conversation_id_optional.py @@ -0,0 +1,38 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from unittest.mock import Mock + +import pytest +from agent_framework import AgentThread, InMemoryCheckpointStorage + +from azure.ai.agentserver.agentframework.persistence.agent_thread_repository import ( + InMemoryAgentThreadRepository, +) +from azure.ai.agentserver.agentframework.persistence.checkpoint_repository import ( + InMemoryCheckpointRepository, +) + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_inmemory_thread_repository_ignores_missing_conversation_id() -> None: + repo = InMemoryAgentThreadRepository() + thread = Mock(spec=AgentThread) + + await repo.set(None, thread) + assert await repo.get(None) is None + + await repo.set("conv-1", thread) + assert await repo.get("conv-1") is thread + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_inmemory_checkpoint_repository_returns_none_without_conversation_id() -> None: + repo = InMemoryCheckpointRepository() + + assert await repo.get_or_create(None) is None + + storage = await repo.get_or_create("conv-1") + assert isinstance(storage, InMemoryCheckpointStorage) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 2f61882abc59..87c32926bde4 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -1,6 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from typing import Optional + from .id_generator.foundry_id_generator import FoundryIdGenerator from .id_generator.id_generator import IdGenerator from ...logger import get_logger @@ -39,7 +41,7 @@ def response_id(self) -> str: return self._response_id @property - def conversation_id(self) -> str: + def conversation_id(self) -> Optional[str]: return self._conversation_id @property diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py index 2c1a2d154d91..01ac72289e4e 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py @@ -27,8 +27,12 @@ class FoundryIdGenerator(IdGenerator): def __init__(self, response_id: Optional[str], conversation_id: Optional[str]): self.response_id = response_id or self._new_id("resp") - self.conversation_id = conversation_id or self._new_id("conv") - self._partition_id = self._extract_partition_id(self.conversation_id) + self.conversation_id = conversation_id + partition_source = self.conversation_id or self.response_id + try: + self._partition_id = self._extract_partition_id(partition_source) + except ValueError: + self._partition_id = self._secure_entropy(18) @classmethod def from_request(cls, payload: dict) -> "FoundryIdGenerator": diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py new file mode 100644 index 000000000000..a46f45f7c739 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py @@ -0,0 +1,27 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from azure.ai.agentserver.core.server.common.id_generator.foundry_id_generator import FoundryIdGenerator + + +def test_conversation_id_none_uses_response_partition(): + response_id = FoundryIdGenerator._new_id("resp") + generator = FoundryIdGenerator(response_id=response_id, conversation_id=None) + + assert generator.conversation_id is None + + expected_partition = FoundryIdGenerator._extract_partition_id(response_id) + generated_id = generator.generate("msg") + assert FoundryIdGenerator._extract_partition_id(generated_id) == expected_partition + + +def test_conversation_id_present_uses_conversation_partition(): + response_id = FoundryIdGenerator._new_id("resp") + conversation_id = FoundryIdGenerator._new_id("conv") + generator = FoundryIdGenerator(response_id=response_id, conversation_id=conversation_id) + + assert generator.conversation_id == conversation_id + + expected_partition = FoundryIdGenerator._extract_partition_id(conversation_id) + generated_id = generator.generate("msg") + assert FoundryIdGenerator._extract_partition_id(generated_id) == expected_partition diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_exceptions.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_exceptions.py new file mode 100644 index 000000000000..27dc2e5f1e9b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_exceptions.py @@ -0,0 +1,13 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import Optional + + +class LangGraphMissingConversationIdError(ValueError): + def __init__(self, message: Optional[str] = None) -> None: + super().__init__( + message + or "conversation.id is required when a LangGraph checkpointer is enabled. " + "Provide conversation.id or disable the checkpointer." + ) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 13646778454c..95e220ee978b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -15,6 +15,7 @@ from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.tools import OAuthConsentRequiredError # pylint:disable=import-error,no-name-in-module from ._context import LanggraphRunContext +from ._exceptions import LangGraphMissingConversationIdError from .models.response_api_converter import GraphInputArguments, ResponseAPIConverter from .models.response_api_default_converter import ResponseAPIDefaultConverter from .models.utils import is_state_schema_valid @@ -66,6 +67,7 @@ async def agent_run(self, context: AgentRunContext): # Resolve graph - always resolve if it's a factory function to get fresh graph each time # For factories, get a new graph instance per request to avoid concurrency issues try: + self._check_missing_thread_id(context) lg_run_context = await self.setup_lg_run_context(context) input_arguments = await self.converter.convert_request(lg_run_context) self.ensure_runnable_config(input_arguments, lg_run_context) @@ -115,6 +117,11 @@ def get_trace_attributes(self): attrs["service.namespace"] = "azure.ai.agentserver.langgraph" return attrs + def _check_missing_thread_id(self, context: AgentRunContext) -> None: + checkpointer = getattr(self._graph, "checkpointer", None) + if checkpointer and checkpointer is not False and not context.conversation_id: + raise LangGraphMissingConversationIdError() + async def agent_run_non_stream(self, input_arguments: GraphInputArguments): """ Run the agent with non-streaming response. @@ -125,7 +132,6 @@ async def agent_run_non_stream(self, input_arguments: GraphInputArguments): :return: The response of the agent run. :rtype: dict """ - try: result = await self._graph.ainvoke(**input_arguments) output = await self.converter.convert_response_non_stream(result, input_arguments["context"]) @@ -167,7 +173,9 @@ def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: """ config = input_arguments.get("config", {}) configurable = config.get("configurable", {}) - configurable["thread_id"] = input_arguments["context"].agent_run.conversation_id + thread_id = input_arguments["context"].agent_run.conversation_id + if thread_id: + configurable["thread_id"] = thread_id config["configurable"] = configurable context.attach_to_config(config) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py index cfe5229e3634..912e3f9632c5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py @@ -148,8 +148,10 @@ def _convert_request_input( return converter.convert() async def _aget_state(self, context: LanggraphRunContext) -> Optional[StateSnapshot]: + if not (thread_id := context.agent_run.conversation_id): + return None config = RunnableConfig( - configurable={"thread_id": context.agent_run.conversation_id}, + configurable={"thread_id": thread_id}, ) if self._graph.checkpointer: state = await self._graph.aget_state(config=config) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py new file mode 100644 index 000000000000..a337f494e595 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py @@ -0,0 +1,68 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from types import SimpleNamespace + +import pytest + +from azure.ai.agentserver.langgraph._exceptions import LangGraphMissingConversationIdError +from azure.ai.agentserver.langgraph.langgraph import LangGraphAdapter +from azure.ai.agentserver.langgraph.models.response_api_default_converter import ResponseAPIDefaultConverter + + +class _DummyConverter: + async def convert_request(self, context): # pragma: no cover - guard should short-circuit first + raise AssertionError("convert_request should not be called for this test") + + async def convert_response_non_stream(self, output, context): # pragma: no cover - guard should short-circuit + raise AssertionError("convert_response_non_stream should not be called for this test") + + async def convert_response_stream(self, output, context): # pragma: no cover - guard should short-circuit + raise AssertionError("convert_response_stream should not be called for this test") + + +class _DummyGraph: + def __init__(self) -> None: + self.checkpointer = object() + self.last_config = None + + async def aget_state(self, config): + self.last_config = config + return "state" + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_aget_state_skips_without_conversation_id() -> None: + graph = _DummyGraph() + converter = ResponseAPIDefaultConverter(graph) # type: ignore[arg-type] + context = SimpleNamespace(agent_run=SimpleNamespace(conversation_id=None)) + + state = await converter._aget_state(context) # type: ignore[arg-type] + + assert state is None + assert graph.last_config is None + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_aget_state_uses_conversation_id() -> None: + graph = _DummyGraph() + converter = ResponseAPIDefaultConverter(graph) # type: ignore[arg-type] + context = SimpleNamespace(agent_run=SimpleNamespace(conversation_id="conv-1")) + + state = await converter._aget_state(context) # type: ignore[arg-type] + + assert state == "state" + assert graph.last_config["configurable"]["thread_id"] == "conv-1" + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_agent_run_requires_conversation_id_with_checkpointer_raises() -> None: + graph = _DummyGraph() + adapter = LangGraphAdapter(graph, converter=_DummyConverter()) # type: ignore[arg-type] + context = SimpleNamespace(conversation_id=None, stream=False) + + with pytest.raises(LangGraphMissingConversationIdError): + await adapter.agent_run(context) From 5a21f4dbab9c9ac66f68914d3336975ec50fd2e9 Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Thu, 29 Jan 2026 10:42:26 -0800 Subject: [PATCH 84/94] [agentserver] Create `AGENTS.md`, `PLANNING.md`, `TASK.md` (#44913) * init AGENTS.md * init AGENTS.md, PLANNING.md and TASK.md * Task.md template * Task.md template * Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * move task.md tempalte instructions to agents.md --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- sdk/agentserver/AGENTS.md | 198 ++++++++++++++++++++++++++++++++++++ sdk/agentserver/PLANNING.md | 100 ++++++++++++++++++ sdk/agentserver/TASK.md | 4 + 3 files changed, 302 insertions(+) create mode 100644 sdk/agentserver/AGENTS.md create mode 100644 sdk/agentserver/PLANNING.md create mode 100644 sdk/agentserver/TASK.md diff --git a/sdk/agentserver/AGENTS.md b/sdk/agentserver/AGENTS.md new file mode 100644 index 000000000000..c59728752408 --- /dev/null +++ b/sdk/agentserver/AGENTS.md @@ -0,0 +1,198 @@ +# AGENTS.md + +This file provides comprehensive guidance to Coding Agents(Codex, Claude Code, GitHub Copilot, etc.) when working with Python code in this repository. + + +## 🎯 (Read-first) Project Awareness & Context + +- **Always read `PLANNING.md`** at the start of a new conversation to understand the project's architecture, goals, style, and constraints. +- **Check `TASK.md`** before starting a new task. If the task isn't listed, add it with a brief description and today's date. + +## 🏆 Core Development Philosophy + +### KISS (Keep It Simple, Stupid) + +Simplicity should be a key goal in design. Choose straightforward solutions over complex ones whenever possible. Simple solutions are easier to understand, maintain, and debug. + +### YAGNI (You Aren't Gonna Need It) + +Avoid building functionality on speculation. Implement features only when they are needed, not when you anticipate they might be useful in the future. + +### Design Principles + +- **Dependency Inversion**: High-level modules should not depend on low-level modules. Both should depend on abstractions. +- **Open/Closed Principle**: Software entities should be open for extension but closed for modification. +- **Single Responsibility**: Each function, class, and module should have one clear purpose. +- **Fail Fast**: Check for potential errors early and raise exceptions immediately when issues occur. + +### Implementation Patterns + +- **Explicit validation at boundaries**: Validate inputs and identifiers early; reject malformed descriptors and missing required fields. +- **Separation of resolution and execution**: Keep discovery/selection separate from invocation to allow late binding and interchangeable implementations. +- **Context-aware execution**: Thread request/user context through calls via scoped providers; avoid global mutable state. +- **Cache with safety**: Use bounded TTL caching with concurrency-safe in-flight de-duplication; invalidate on errors. +- **Stable naming and deterministic mapping**: Derive stable, unique names deterministically to avoid collisions. +- **Graceful defaults, loud misconfiguration**: Provide sensible defaults when optional data is missing; raise clear errors when required configuration is absent. +- **Thin integration layers**: Use adapters/middleware to translate between layers without leaking internals. + +## ✅ Work Process (required) + +- **Before coding**: confirm the task in `TASK.md` → **Now**. +- **While working**: Add new sub-tasks or TODOs discovered during development to `TASK.md` under a "Discovered During Work" section. +- **After finishing**: mark the task done immediately, and note what changed (files/areas) in `TASK.md`. +- **Update CHANGELOG.md only when required** by release policy. + +### TASK.md template +```markdown +## Now (active) +- [ ] YYYY-MM-DD — + - Scope: + - Exit criteria: + +## Next (queued) +- [ ] YYYY-MM-DD — + +## Discovered During Work +- [ ] YYYY-MM-DD — + +## Done +- [x] YYYY-MM-DD — +``` + +## 📎 Style & Conventions & Standards + +### File and Function Limits + +- **Never create a file longer than 500 lines of code**. If approaching this limit, refactor by splitting into modules. +- **Functions should be under 50 lines** with a single, clear responsibility. +- **Classes should be under 100 lines** and represent a single concept or entity. +- **Organize code into clearly separated modules**, grouped by feature or responsibility. +- **Line length should be max 120 characters**, as enforced by Ruff in `pyproject.toml`. +- **Use the standard repo workflow**: from the package root, run tests and linters via `tox` (for example, `tox -e pytest` or `tox -e pylint`) rather than relying on a custom virtual environment name. +- **Keep modules focused and cohesive**; split by feature responsibility when a file grows large or mixes concerns. +- **Avoid drive-by refactors** unless required by the task. +- **Preserve public API stability** and match existing patterns in the package you touch. + +### Naming Conventions + +- **Variables and functions**: `snake_case` +- **Classes**: `PascalCase` +- **Constants**: `UPPER_SNAKE_CASE` +- **Private attributes/methods**: `_leading_underscore` +- **Type aliases**: `PascalCase` +- **Enum values**: `UPPER_SNAKE_CASE` + +### Project File Conventions + +- **Follow per-package `pyproject.toml`** and `[tool.azure-sdk-build]` settings. +- **Do not edit generated code**. +- **Do not edit files with the header** `Code generated by Microsoft (R) Python Code Generator.` + - `sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/` + - `sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py` + - `sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py` + - `sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py` +- **Do not introduce secrets or credentials.** +- **Do not disable TLS/SSL verification** without explicit approval. +- **Keep logs free of sensitive data.** + +### Python Code Standards + +- **Follow existing package patterns** and Azure SDK for Python guidelines. +- **Type hints and async patterns** should match existing code. +- **Respect Ruff settings** (line length 120, isort rules) in each package `pyproject.toml`. +- **Avoid new dependencies** unless necessary and approved. + +### Docstring Standards +Use reStructuredText (reST) / Sphinx Style docstrings for all public functions, classes, and modules: + +```python +def calculate_discount( + price: Decimal, + discount_percent: float, + min_amount: Decimal = Decimal("0.01") +) -> Decimal: + """Calculate the discounted price for a product. + + :param price: The original price of the product. + :type price: Decimal + :param discount_percent: The discount percentage to apply (0-100). + :type discount_percent: float + :param min_amount: The minimum amount after discount (default is 0.01). + :type min_amount: Decimal + :return: The price after applying the discount, not less than min_amount. + :rtype: Decimal + :raises ValueError: If discount_percent is not between 0 and 100. + + Example: + Calculate a 15% discount on a $100 product: + + .. code-block:: python + + calculate_discount(Decimal("100.00"), 15.0) + """ +``` + +### Error Handling Standards + +- Prefer explicit validation at API boundaries and raise errors **as early as possible**. +- Use standard Python exceptions (`ValueError`, `TypeError`, `KeyError`, etc.) when they accurately describe the problem. +- When a domain-specific error is needed, define a clear, documented exception type and reuse it consistently. +- Do **not** silently swallow exceptions. Either handle them meaningfully (with clear recovery behavior) or let them propagate. +- Preserve the original traceback when re-raising (`raise` without arguments) so issues remain diagnosable. +- Fail fast on programmer errors (e.g., inconsistent state, impossible branches) using assertions or explicit exceptions. +- For public APIs, validate user input and return helpful, actionable messages without leaking secrets or internal implementation details. + +#### Exception Best Practices + +- Avoid `except Exception:` and **never** use bare `except:`; always catch the most specific exception type possible. +- Keep `try` blocks **small** and focused so that it is clear which statements may raise the handled exception. +- When adding context to an error, use either `raise NewError("message") from exc` or log the context and re-raise with `raise`. +- Do not use exceptions for normal control flow; reserve them for truly exceptional or error conditions. +- When a function can raise non-obvious exceptions, document them in the docstring under a `:raises:` section. +- In asynchronous code, make sure exceptions are not lost in background tasks; gather and handle them explicitly where needed. + +### Logging Standards + +- Use the standard library `logging` module for all diagnostic output; **do not** use `print` in library or service code. +- Create a module-level logger via `logger = logging.getLogger(__name__)` and use it consistently within that module. +- Choose log levels appropriately: + - `logger.debug(...)` for detailed diagnostics and tracing. + - `logger.info(...)` for high-level lifecycle events (startup, shutdown, major state changes). + - `logger.warning(...)` for recoverable issues or unexpected-but-tolerated conditions. + - `logger.error(...)` for failures where the current operation cannot succeed. + - `logger.critical(...)` for unrecoverable conditions affecting process health. +- Never log secrets, credentials, access tokens, full connection strings, or sensitive customer data. +- When logging exceptions, prefer `logger.exception("message")` inside an `except` block so the traceback is included. +- Keep log messages clear and structured (include identifiers like request IDs, resource names, or correlation IDs when available). +## ⚠️ Important Notes + +- **NEVER ASSUME OR GUESS** - When in doubt, ask for clarification +- **Always verify file paths and module names** before use +- **Keep this file (`AGENTS.md`) updated** when adding new patterns or dependencies +- **Test your code** - No feature is complete without tests +- **Document your decisions** - Future developers (including yourself) will thank you + +## 📚 Documentation & Explainability + +- **Keep samples runnable** and focused on SDK usage. +- **Follow third-party dependency guidance** in `CONTRIBUTING.md`. + +## 🛠️ Development Environment + +### ✅ Testing & Quality Gates (tox) + +Run from a package root (e.g., `sdk/agentserver/azure-ai-agentserver-core`). + +- `tox run -e sphinx -c ../../../eng/tox/tox.ini --root .` + - Docs output: `.tox/sphinx/tmp/dist/site/index.html` +- `tox run -e pylint -c ../../../eng/tox/tox.ini --root .` + - Uses repo `pylintrc`; `next-pylint` uses `eng/pylintrc` +- `tox run -e mypy -c ../../../eng/tox/tox.ini --root .` +- `tox run -e pyright -c ../../../eng/tox/tox.ini --root .` +- `tox run -e verifytypes -c ../../../eng/tox/tox.ini --root .` +- `tox run -e whl -c ../../../eng/tox/tox.ini --root .` +- `tox run -e sdist -c ../../../eng/tox/tox.ini --root .` +- `tox run -e samples -c ../../../eng/tox/tox.ini --root .` (runs all samples) +- `tox run -e apistub -c ../../../eng/tox/tox.ini --root .` + +Check each package `pyproject.toml` under `[tool.azure-sdk-build]` to see which checks are enabled/disabled. diff --git a/sdk/agentserver/PLANNING.md b/sdk/agentserver/PLANNING.md new file mode 100644 index 000000000000..6a65e2925a83 --- /dev/null +++ b/sdk/agentserver/PLANNING.md @@ -0,0 +1,100 @@ +# 🧭 PLANNING.md + +## 🎯 What this project is +AgentServer is a set of Python packages under `sdk/agentserver` that host agents for +Azure AI Foundry. The core package provides the runtime/server, tooling runtime, and +Responses API models, while the adapter packages wrap popular frameworks. The primary +users are SDK consumers who want to run agents locally and deploy them as Foundry-hosted +containers. Work is “done” when adapters faithfully translate framework execution into +Responses API-compatible outputs and the packages pass their expected tests and samples. + +**Behavioral/policy rules live in `AGENTS.md`.** This document is architecture + repo map + doc index. + +## 🎯 Goals / Non-goals +Goals: +- Keep a stable architecture snapshot and repo map for fast onboarding. +- Document key request/response flows, including streaming. +- Clarify the development workflow and testing expectations for AgentServer packages. + +Non-goals: +- Detailed API documentation (belongs in package docs and docstrings). +- Per-initiative plans (belong in `TASK.md` or a dedicated plan file). +- Speculative refactors (align with KISS/YAGNI in `AGENTS.md`). + +## 🧩 Architecture (snapshot) +### 🏗️ Project Structure +- **azure-ai-agentserver-core**: Core library + - Runtime/context + - HTTP gateway + - Foundry integrations + - Responses API protocol (current) +- **azure-ai-agentserver-agentframework**: adapters for Agent Framework agents/workflows, + thread and checkpoint persistence. +- **azure-ai-agentserver-langgraph**: adapter and converters for LangGraph agents and + Response API events. + +### Current vs target +- Current: OpenAI Responses API protocol lives in `azure-ai-agentserver-core` alongside + core runtime and HTTP gateway code; framework adapters layer on top. +- Target (planned, not fully implemented): + - Core layer: app/runtime/context, foundry integrations (tools, checkpointing), HTTP gateway + - Protocol layer: Responses API in its own package + - Framework layer: adapters (agentframework, langgraph, other frameworks) + +### Key flows +- Request path: `/runs` or `/responses` → `AgentRunContext` → agent execution → Responses + API payload. +- Streaming path: generator/async generator → SSE event stream. +- Framework adapter path: framework input → converter → Response API output (streaming + or non-streaming). +- Tools path: Foundry tool runtime invoked via core `tools/runtime` APIs. + +## 🗺️ Repo map +- `azure-ai-agentserver-core`: Core library (runtime/context, HTTP gateway, Foundry integrations, + Responses API protocol today). +- `azure-ai-agentserver-agentframework`: Agent Framework adapter. +- `azure-ai-agentserver-langgraph`: LangGraph adapter. +- Core runtime and models: `azure-ai-agentserver-core/azure/ai/agentserver/core/` +- Agent Framework adapter: `azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/` +- LangGraph adapter: `azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/` +- Samples: `azure-ai-agentserver-*/samples/` +- Tests: `azure-ai-agentserver-*/tests/` +- Package docs (Sphinx inputs): `azure-ai-agentserver-*/doc/` +- Repo-wide guidance: `CONTRIBUTING.md`, `doc/dev/tests.md`, `doc/eng_sys_checks.md` + +## 📚 Doc index +### **Read repo-wide guidance**: +- `CONTRIBUTING.md` +- `doc/dev/tests.md` +- `doc/eng_sys_checks.md` + +### **Read the package READMEs**: + - `sdk/agentserver/azure-ai-agentserver-core/README.md` + - `sdk/agentserver/azure-ai-agentserver-agentframework/README.md` + - `sdk/agentserver/azure-ai-agentserver-langgraph/README.md` + +### “If you need X, look at Y” +- Enable/disable checks for a package → that package `pyproject.toml` → `[tool.azure-sdk-build]` +- How to run tests / live-recorded tests → `doc/dev/tests.md` +- Engineering system checks / gates → `doc/eng_sys_checks.md` +- Adapter conversion behavior → the relevant adapter package + its tests + samples + +## ✅ Testing strategy +- Unit/integration tests live in each package’s `tests/` directory. +- Samples are part of validation via the `samples` tox environment. +- For live/recorded testing patterns, follow `doc/dev/tests.md`. + +## 🚀 Rollout / migrations +- Preserve public API stability and follow Azure SDK release policy. +- Do not modify generated code (see paths in `AGENTS.md`). +- CI checks are controlled per package in `pyproject.toml` under + `[tool.azure-sdk-build]`. + +## ⚠️ Risks / edge cases +- Streaming event ordering and keep-alive behavior. +- Credential handling (async credentials and adapters). +- Response API schema compatibility across adapters. +- Tool invocation failures and error surfacing. + +## 📌 Progress +See `TASK.md` for active work items; no checklists here. diff --git a/sdk/agentserver/TASK.md b/sdk/agentserver/TASK.md new file mode 100644 index 000000000000..b5d683d4e486 --- /dev/null +++ b/sdk/agentserver/TASK.md @@ -0,0 +1,4 @@ +# TASK.md + +## Done +- [x] 2026-01-29 — Create `AGENTS.md`, `PLANNING.md`, `TASK.md`. \ No newline at end of file From b1c7a9ce47071d89efa726c97d290a444ec0b79c Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Fri, 30 Jan 2026 14:53:56 -0800 Subject: [PATCH 85/94] [agentserver] Restore previous OTel context in streaming generator (#44930) --- sdk/agentserver/TASK.md | 7 ++++++- .../azure/ai/agentserver/core/server/base.py | 5 +++-- .../tests/unit_tests/server/test_otel_context.py | 14 ++++++++++++++ 3 files changed, 23 insertions(+), 3 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_otel_context.py diff --git a/sdk/agentserver/TASK.md b/sdk/agentserver/TASK.md index b5d683d4e486..936bc04c71c3 100644 --- a/sdk/agentserver/TASK.md +++ b/sdk/agentserver/TASK.md @@ -1,4 +1,9 @@ # TASK.md +## Now (active) + ## Done -- [x] 2026-01-29 — Create `AGENTS.md`, `PLANNING.md`, `TASK.md`. \ No newline at end of file +- [x] 2026-01-30 — Restore previous OTel context in streaming generator + - Files: azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py, + azure-ai-agentserver-core/tests/unit_tests/server/test_otel_context.py +- [x] 2026-01-29 — Create `AGENTS.md`, `PLANNING.md`, `TASK.md`. diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 7ec666b52741..9fd5557cd4e1 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -128,7 +128,8 @@ async def runs_endpoint(request): async def gen_async(ex): ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier) - token = otel_context.attach(ctx) + prev_ctx = otel_context.get_current() + otel_context.attach(ctx) seq = 0 try: if ex: @@ -154,7 +155,7 @@ async def gen_async(ex): message=_format_error(ex), param="") yield _event_to_sse_chunk(err) - otel_context.detach(token) + otel_context.attach(prev_ctx) return StreamingResponse(gen_async(ex), media_type="text/event-stream") diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_otel_context.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_otel_context.py new file mode 100644 index 000000000000..f5ee395ff48f --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_otel_context.py @@ -0,0 +1,14 @@ +import pytest +from opentelemetry import context as otel_context +from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator + + +@pytest.mark.asyncio +async def test_streaming_context_restore_uses_previous_context() -> None: + prev_ctx = otel_context.get_current() + ctx = TraceContextTextMapPropagator().extract(carrier={}) + + otel_context.attach(ctx) + otel_context.attach(prev_ctx) + + assert otel_context.get_current() is prev_ctx From 227034bee0c27763bcd756a7d739b79b31139648 Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Fri, 30 Jan 2026 15:48:22 -0800 Subject: [PATCH 86/94] [agentserver] Attach package metadata to OpenAIResponse.metadata + header (#44929) * use git worktree * [agentserver] Attach package metadata to OpenAIResponse.metadata + header * [agentserver] Attach package metadata to OpenAIResponse.metadata + header --- sdk/agentserver/AGENTS.md | 2 + sdk/agentserver/TASK.md | 7 + .../agentserver/core/application/__init__.py | 11 +- .../agentserver/core/application/_metadata.py | 100 +++++++++++++ .../core/application/_package_metadata.py | 50 ------- .../core/server/_response_metadata.py | 42 ++++++ .../azure/ai/agentserver/core/server/base.py | 21 ++- .../core/tools/client/_configuration.py | 2 +- .../server/test_response_metadata.py | 140 ++++++++++++++++++ 9 files changed, 321 insertions(+), 54 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_metadata.py delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py diff --git a/sdk/agentserver/AGENTS.md b/sdk/agentserver/AGENTS.md index c59728752408..5a5f20dc457c 100644 --- a/sdk/agentserver/AGENTS.md +++ b/sdk/agentserver/AGENTS.md @@ -24,9 +24,11 @@ Avoid building functionality on speculation. Implement features only when they a - **Open/Closed Principle**: Software entities should be open for extension but closed for modification. - **Single Responsibility**: Each function, class, and module should have one clear purpose. - **Fail Fast**: Check for potential errors early and raise exceptions immediately when issues occur. +- **Encapsulation**: Hide internal state and require all interaction to be performed through an object's methods. ### Implementation Patterns +- **Type-Safe**: Prefer explicit, type‑safe structures (TypedDicts, dataclasses, enums, unions) over Dict, Any, or other untyped containers, unless there is no viable alternative. - **Explicit validation at boundaries**: Validate inputs and identifiers early; reject malformed descriptors and missing required fields. - **Separation of resolution and execution**: Keep discovery/selection separate from invocation to allow late binding and interchangeable implementations. - **Context-aware execution**: Thread request/user context through calls via scoped providers; avoid global mutable state. diff --git a/sdk/agentserver/TASK.md b/sdk/agentserver/TASK.md index 936bc04c71c3..30be9c675399 100644 --- a/sdk/agentserver/TASK.md +++ b/sdk/agentserver/TASK.md @@ -3,6 +3,13 @@ ## Now (active) ## Done + +- [x] 2026-01-30 — Attach agent server metadata to OpenAIResponse.metadata + header + - Files: azure-ai-agentserver-core/azure/ai/agentserver/core/application/_metadata.py, + azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py, + azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py, + azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py, + azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py - [x] 2026-01-30 — Restore previous OTel context in streaming generator - Files: azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py, azure-ai-agentserver-core/tests/unit_tests/server/test_otel_context.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py index 6e70a718531c..052f9894497b 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py @@ -5,8 +5,17 @@ __path__ = __import__('pkgutil').extend_path(__path__, __name__) __all__ = [ + "AgentServerMetadata", "PackageMetadata", + "RuntimeMetadata", + "get_current_app", "set_current_app" ] -from ._package_metadata import PackageMetadata, set_current_app +from ._metadata import ( + AgentServerMetadata, + PackageMetadata, + RuntimeMetadata, + get_current_app, + set_current_app, +) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_metadata.py new file mode 100644 index 000000000000..1ddc3d2d2e9e --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_metadata.py @@ -0,0 +1,100 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +import os +import platform +from dataclasses import dataclass, field +from importlib.metadata import Distribution, PackageNotFoundError +from typing import Mapping + + +@dataclass(frozen=True, kw_only=True) +class PackageMetadata: + name: str + version: str + + @staticmethod + def from_dist(dist_name: str) -> "PackageMetadata": + try: + ver = Distribution.from_name(dist_name).version + except PackageNotFoundError: + ver = "" + + return PackageMetadata( + name=dist_name, + version=ver, + ) + + +@dataclass(frozen=True, kw_only=True) +class RuntimeMetadata: + python_version: str = field(default_factory=platform.python_version) + platform: str = field(default_factory=platform.platform) + host_name: str = "" + replica_name: str = "" + + @staticmethod + def from_aca_app_env() -> "RuntimeMetadata | None": + host_name = os.environ.get("CONTAINER_APP_REVISION_FQDN") + replica_name = os.environ.get("CONTAINER_APP_REPLICA_NAME") + + if not host_name and not replica_name: + return None + + return RuntimeMetadata( + host_name=host_name or "", + replica_name=replica_name or "", + ) + + @staticmethod + def resolve(host_name: str | None = None, replica_name: str | None = None) -> "RuntimeMetadata": + runtime = RuntimeMetadata.from_aca_app_env() + + override = RuntimeMetadata(host_name=host_name or "", replica_name=replica_name or "") + return runtime.merged_with(override) if runtime else override + + def merged_with(self, override: "RuntimeMetadata | None") -> "RuntimeMetadata": + if override is None: + return self + + return RuntimeMetadata( + python_version=override.python_version or self.python_version, + platform=override.platform or self.platform, + host_name=override.host_name or self.host_name, + replica_name=override.replica_name or self.replica_name, + ) + + +@dataclass(frozen=True) +class AgentServerMetadata: + package: PackageMetadata + runtime: RuntimeMetadata + + def as_user_agent(self, component: str | None = None) -> str: + component_value = f" {component}" if component else "" + return ( + f"{self.package.name}/{self.package.version} " + f"Python {self.runtime.python_version}{component_value} " + f"({self.runtime.platform})" + ) + + +_default = AgentServerMetadata( + package=PackageMetadata.from_dist("azure-ai-agentserver-core"), + runtime=RuntimeMetadata.resolve(), +) +_app: AgentServerMetadata = _default + + +def set_current_app(app: PackageMetadata, runtime: RuntimeMetadata | None = None) -> None: + global _app # pylint: disable=W0603 + resolved_runtime = RuntimeMetadata.resolve() + merged_runtime = resolved_runtime.merged_with(runtime) + _app = AgentServerMetadata(package=app, runtime=merged_runtime) + + +def get_current_app() -> AgentServerMetadata: + global _app # pylint: disable=W0602 + return _app diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py deleted file mode 100644 index 5701110e5c7f..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/application/_package_metadata.py +++ /dev/null @@ -1,50 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -from __future__ import annotations - -import platform -from dataclasses import dataclass -from importlib.metadata import Distribution, PackageNotFoundError - - -@dataclass(frozen=True) -class PackageMetadata: - name: str - version: str - python_version: str - platform: str - - @staticmethod - def from_dist(dist_name: str): - try: - ver = Distribution.from_name(dist_name).version - except PackageNotFoundError: - ver = "" - - return PackageMetadata( - name=dist_name, - version=ver, - python_version=platform.python_version(), - platform=platform.platform(), - ) - - def as_user_agent(self, component: str | None = None) -> str: - return (f"{self.name}/{self.version} " - f"Python {self.python_version} " - f"{component} " if component else "" - f"({self.platform})") - - -_default = PackageMetadata.from_dist("azure-ai-agentserver-core") -_app: PackageMetadata = _default - - -def set_current_app(app: PackageMetadata) -> None: - global _app # pylint: disable=W0603 - _app = app - - -def get_current_app() -> PackageMetadata: - global _app # pylint: disable=W0602 - return _app diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py new file mode 100644 index 000000000000..fb73c2a3fe7d --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py @@ -0,0 +1,42 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +import json +from dataclasses import asdict +from typing import Dict + +from ..application._metadata import get_current_app +from ..models import Response as OpenAIResponse, ResponseStreamEvent +from ..models.projects import ( + ResponseCompletedEvent, + ResponseCreatedEvent, + ResponseInProgressEvent, +) + +HEADER_NAME = "x-aml-foundry-agents-metadata" +METADATA_KEY = "foundry_agents_metadata" + + +def _metadata_json() -> str: + payload = asdict(get_current_app()) + return json.dumps(payload) + + +def build_foundry_agents_metadata_headers() -> Dict[str, str]: + """Return header dict containing the foundry metadata header.""" + return {HEADER_NAME: _metadata_json()} + + +def attach_foundry_metadata_to_response(response: OpenAIResponse) -> None: + """Attach metadata into response.metadata[METADATA_KEY].""" + meta = response.metadata or {} + meta[METADATA_KEY] = _metadata_json() + response.metadata = meta + + +def try_attach_foundry_metadata_to_event(event: ResponseStreamEvent) -> None: + """Attach metadata to supported stream events; skip others.""" + if isinstance(event, (ResponseCreatedEvent, ResponseInProgressEvent, ResponseCompletedEvent)): + attach_foundry_metadata_to_response(event.response) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 9fd5557cd4e1..4f0032b64bd3 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -28,6 +28,11 @@ from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential from ._context import AgentServerContext +from ._response_metadata import ( + attach_foundry_metadata_to_response, + build_foundry_agents_metadata_headers, + try_attach_foundry_metadata_to_event, +) from .common.agent_run_context import AgentRunContext from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, get_project_endpoint, request_context @@ -124,7 +129,9 @@ async def runs_endpoint(request): result = resp if not ex else project_models.ResponseError( code=project_models.ResponseErrorCode.SERVER_ERROR, message=_format_error(ex)) - return JSONResponse(result.as_dict()) + if not ex: + attach_foundry_metadata_to_response(result) + return JSONResponse(result.as_dict(), headers=self.create_response_headers()) async def gen_async(ex): ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier) @@ -141,6 +148,7 @@ async def gen_async(ex): # Keep-alive signal yield _keep_alive_comment() else: + try_attach_foundry_metadata_to_event(event) seq += 1 yield _event_to_sse_chunk(event) logger.info("End of processing CreateResponse request.") @@ -157,7 +165,11 @@ async def gen_async(ex): yield _event_to_sse_chunk(err) otel_context.attach(prev_ctx) - return StreamingResponse(gen_async(ex), media_type="text/event-stream") + return StreamingResponse( + gen_async(ex), + media_type="text/event-stream", + headers=self.create_response_headers(), + ) async def liveness_endpoint(request): result = await self.agent_liveness(request) @@ -398,6 +410,11 @@ def setup_otlp_exporter(self, endpoint, provider): provider.add_span_processor(processor) logger.info(f"Tracing setup with OTLP exporter: {endpoint}") + def create_response_headers(self) -> dict[str, str]: + headers = {} + headers.update(build_foundry_agents_metadata_headers()) + return headers + def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: event_data = json.dumps(event.as_dict()) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py index c496ef563216..e09c80ed83f8 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_configuration.py @@ -5,7 +5,7 @@ from azure.core.credentials_async import AsyncTokenCredential from azure.core.pipeline import policies -from ...application._package_metadata import get_current_app +from ...application._metadata import get_current_app class FoundryToolClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py new file mode 100644 index 000000000000..c2e3bea53287 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py @@ -0,0 +1,140 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +import json + +from azure.ai.agentserver.core.application import ( + PackageMetadata, + RuntimeMetadata, + get_current_app, + set_current_app, +) +from azure.ai.agentserver.core.models import Response as OpenAIResponse +from azure.ai.agentserver.core.models.projects import ResponseCreatedEvent, ResponseErrorEvent +from azure.ai.agentserver.core.server._response_metadata import ( + METADATA_KEY, + attach_foundry_metadata_to_response, + build_foundry_agents_metadata_headers, + try_attach_foundry_metadata_to_event, +) + + +def _set_test_app(): + previous = get_current_app() + set_current_app( + PackageMetadata( + name="test-package", + version="1.2.3", + ), + RuntimeMetadata( + python_version="3.11.0", + platform="test-platform", + host_name="test-host", + replica_name="test-replica", + ), + ) + return previous + + +def _expected_payload() -> dict[str, dict[str, str]]: + return { + "package": { + "name": "test-package", + "version": "1.2.3", + }, + "runtime": { + "python_version": "3.11.0", + "platform": "test-platform", + "host_name": "test-host", + "replica_name": "test-replica", + }, + } + + +def test_build_foundry_agents_metadata_headers_returns_json(): + previous = _set_test_app() + try: + headers = build_foundry_agents_metadata_headers() + payload = json.loads(headers["x-aml-foundry-agents-metadata"]) + assert payload == _expected_payload() + finally: + set_current_app(previous.package, previous.runtime) + + +def test_attach_foundry_metadata_to_response_sets_metadata_key(): + previous = _set_test_app() + try: + response = OpenAIResponse({"object": "response", "id": "resp", "metadata": {}}) + attach_foundry_metadata_to_response(response) + assert METADATA_KEY in response.metadata + assert json.loads(response.metadata[METADATA_KEY]) == _expected_payload() + finally: + set_current_app(previous.package, previous.runtime) + + +def test_try_attach_foundry_metadata_to_event_attaches_for_supported_events(): + previous = _set_test_app() + try: + response = OpenAIResponse({"object": "response", "id": "resp", "metadata": {}}) + event = ResponseCreatedEvent({"sequence_number": 0, "response": response}) + try_attach_foundry_metadata_to_event(event) + assert METADATA_KEY in response.metadata + + unsupported = ResponseErrorEvent( + {"sequence_number": 1, "code": "server_error", "message": "boom", "param": ""} + ) + try_attach_foundry_metadata_to_event(unsupported) + finally: + set_current_app(previous.package, previous.runtime) + + +def test_runtime_metadata_merge_overrides_non_empty_fields(): + base = RuntimeMetadata( + python_version="3.10.0", + platform="base-platform", + host_name="base-host", + replica_name="base-replica", + ) + override = RuntimeMetadata( + python_version="", + platform="override-platform", + host_name="", + replica_name="override-replica", + ) + + merged = base.merged_with(override) + + assert merged.python_version == "3.10.0" + assert merged.platform == "override-platform" + assert merged.host_name == "base-host" + assert merged.replica_name == "override-replica" + + +def test_runtime_metadata_resolve_falls_back_when_env_missing(monkeypatch): + monkeypatch.delenv("CONTAINER_APP_REVISION_FQDN", raising=False) + monkeypatch.delenv("CONTAINER_APP_REPLICA_NAME", raising=False) + runtime = RuntimeMetadata.resolve() + + assert runtime.host_name == "" + assert runtime.replica_name == "" + assert runtime.python_version + assert runtime.platform + + +def test_runtime_metadata_resolve_aca_env(monkeypatch): + monkeypatch.setenv("CONTAINER_APP_REVISION_FQDN", "aca-host") + monkeypatch.setenv("CONTAINER_APP_REPLICA_NAME", "aca-replica") + runtime = RuntimeMetadata.resolve() + + assert runtime.host_name == "aca-host" + assert runtime.replica_name == "aca-replica" + + +def test_runtime_metadata_resolve_explicit_overrides(monkeypatch): + monkeypatch.setenv("CONTAINER_APP_REVISION_FQDN", "aca-host") + monkeypatch.setenv("CONTAINER_APP_REPLICA_NAME", "aca-replica") + + runtime = RuntimeMetadata.resolve(host_name="override-host", replica_name="override-replica") + + assert runtime.host_name == "override-host" + assert runtime.replica_name == "override-replica" From d7992359d7ce762bd6a240d19dd5c996bda4a2c0 Mon Sep 17 00:00:00 2001 From: Declan Date: Tue, 3 Feb 2026 14:16:36 -0800 Subject: [PATCH 87/94] docs: add CLAUDE.md shim referencing AGENTS.md (#44984) --- sdk/agentserver/CLAUDE.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 sdk/agentserver/CLAUDE.md diff --git a/sdk/agentserver/CLAUDE.md b/sdk/agentserver/CLAUDE.md new file mode 100644 index 000000000000..43c994c2d361 --- /dev/null +++ b/sdk/agentserver/CLAUDE.md @@ -0,0 +1 @@ +@AGENTS.md From a76fbee15a09fafc77f1d271f26e51d93921c2b8 Mon Sep 17 00:00:00 2001 From: Declan Date: Wed, 4 Feb 2026 20:27:03 -0800 Subject: [PATCH 88/94] [Hosted Agents] Implement managed checkpoints feature for AgentFramework (#45004) * [Hosted Agents] Implement managed checkpoints feature for AgentFramework * misc: use project_endpoint instead of project_id and foundry_endpoint * misc: use get_project_endpoint * misc: refine checkpoint init logic * misc: Add a mutual-exclusion check * misc: move the check into workflow_agent_adapter --- sdk/agentserver/TASK.md | 6 + .../ai/agentserver/agentframework/__init__.py | 46 +++- .../agentframework/_workflow_agent_adapter.py | 42 +++- .../agentframework/persistence/__init__.py | 4 + .../_foundry_checkpoint_repository.py | 99 +++++++++ .../_foundry_checkpoint_storage.py | 182 ++++++++++++++++ .../tests/unit_tests/mocks/__init__.py | 8 + .../mocks/mock_checkpoint_client.py | 156 ++++++++++++++ .../test_foundry_checkpoint_repository.py | 111 ++++++++++ .../test_foundry_checkpoint_storage.py | 165 ++++++++++++++ .../test_from_agent_framework_managed.py | 93 ++++++++ .../agentserver/core/checkpoints/__init__.py | 18 ++ .../core/checkpoints/client/__init__.py | 18 ++ .../core/checkpoints/client/_client.py | 153 +++++++++++++ .../core/checkpoints/client/_configuration.py | 37 ++++ .../core/checkpoints/client/_models.py | 201 ++++++++++++++++++ .../checkpoints/client/operations/__init__.py | 12 ++ .../checkpoints/client/operations/_items.py | 199 +++++++++++++++++ .../client/operations/_sessions.py | 133 ++++++++++++ 19 files changed, 1666 insertions(+), 17 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_checkpoint_repository.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_checkpoint_storage.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_from_agent_framework_managed.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_configuration.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_models.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/_items.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/_sessions.py diff --git a/sdk/agentserver/TASK.md b/sdk/agentserver/TASK.md index 30be9c675399..ff03e777585b 100644 --- a/sdk/agentserver/TASK.md +++ b/sdk/agentserver/TASK.md @@ -4,6 +4,12 @@ ## Done +- [x] 2026-02-04 — Implement managed checkpoints feature + - Files: core/checkpoints/ (new), agentframework/persistence/_foundry_checkpoint_*.py (new), + agentframework/__init__.py (modified) + - Added: FoundryCheckpointClient, FoundryCheckpointStorage, FoundryCheckpointRepository + - Modified: from_agent_framework() with managed_checkpoints, foundry_endpoint, project_id params + - [x] 2026-01-30 — Attach agent server metadata to OpenAIResponse.metadata + header - Files: azure-ai-agentserver-core/azure/ai/agentserver/core/application/_metadata.py, azure-ai-agentserver-core/azure/ai/agentserver/core/application/__init__.py, diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index 76ad4f904f67..85d0c9584ea9 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -17,7 +17,7 @@ from ._ai_agent_adapter import AgentFrameworkAIAgentAdapter from ._workflow_agent_adapter import AgentFrameworkWorkflowAdapter from ._foundry_tools import FoundryToolsChatMiddleware -from .persistence import AgentThreadRepository, CheckpointRepository +from .persistence import AgentThreadRepository, CheckpointRepository, FoundryCheckpointRepository @overload @@ -49,6 +49,8 @@ def from_agent_framework( credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, thread_repository: Optional[AgentThreadRepository] = None, checkpoint_repository: Optional[CheckpointRepository] = None, + managed_checkpoints: bool = False, + project_endpoint: Optional[str] = None, ) -> "AgentFrameworkWorkflowAdapter": """ Create an Agent Framework Workflow Adapter. @@ -67,6 +69,12 @@ def from_agent_framework( :type thread_repository: Optional[AgentThreadRepository] :param checkpoint_repository: Optional checkpoint repository for workflow checkpointing. :type checkpoint_repository: Optional[CheckpointRepository] + :param managed_checkpoints: If True, use Azure AI Foundry managed checkpoint storage. + :type managed_checkpoints: bool + :param project_endpoint: The Azure AI Foundry project endpoint. If not provided, + will be read from AZURE_AI_PROJECT_ENDPOINT environment variable. + Example: "https://.services.ai.azure.com/api/projects/" + :type project_endpoint: Optional[str] :return: An instance of AgentFrameworkWorkflowAdapter. :rtype: AgentFrameworkWorkflowAdapter """ @@ -77,7 +85,9 @@ def from_agent_framework( /, credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, thread_repository: Optional[AgentThreadRepository] = None, - checkpoint_repository: Optional[CheckpointRepository] = None + checkpoint_repository: Optional[CheckpointRepository] = None, + managed_checkpoints: bool = False, + project_endpoint: Optional[str] = None, ) -> "AgentFrameworkAgent": """ Create an Agent Framework Adapter from either an AgentProtocol/BaseAgent or a @@ -92,22 +102,38 @@ def from_agent_framework( :type thread_repository: Optional[AgentThreadRepository] :param checkpoint_repository: Optional checkpoint repository for workflow checkpointing. :type checkpoint_repository: Optional[CheckpointRepository] + :param managed_checkpoints: If True, use Azure AI Foundry managed checkpoint storage. + :type managed_checkpoints: bool + :param project_endpoint: The Azure AI Foundry project endpoint. If not provided, + will be read from AZURE_AI_PROJECT_ENDPOINT environment variable. + Example: "https://.services.ai.azure.com/api/projects/" + :type project_endpoint: Optional[str] :return: An instance of AgentFrameworkAgent. :rtype: AgentFrameworkAgent :raises TypeError: If neither or both of agent and workflow are provided, or if the provided types are incorrect. + :raises ValueError: If managed_checkpoints=True but required parameters are missing, + or if both managed_checkpoints=True and checkpoint_repository are provided. """ if isinstance(agent_or_workflow, WorkflowBuilder): - return AgentFrameworkWorkflowAdapter(workflow_factory=agent_or_workflow.build, - credentials=credentials, - thread_repository=thread_repository, - checkpoint_repository=checkpoint_repository) + return AgentFrameworkWorkflowAdapter( + workflow_factory=agent_or_workflow.build, + credentials=credentials, + thread_repository=thread_repository, + checkpoint_repository=checkpoint_repository, + managed_checkpoints=managed_checkpoints, + project_endpoint=project_endpoint, + ) if isinstance(agent_or_workflow, Callable): # type: ignore - return AgentFrameworkWorkflowAdapter(workflow_factory=agent_or_workflow, - credentials=credentials, - thread_repository=thread_repository, - checkpoint_repository=checkpoint_repository) + return AgentFrameworkWorkflowAdapter( + workflow_factory=agent_or_workflow, + credentials=credentials, + thread_repository=thread_repository, + checkpoint_repository=checkpoint_repository, + managed_checkpoints=managed_checkpoints, + project_endpoint=project_endpoint, + ) # raise TypeError("workflow must be a WorkflowBuilder or callable returning a Workflow") if isinstance(agent_or_workflow, (AgentProtocol, BaseAgent)): diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py index f95ae294ff2c..d5cf61ad62ae 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py @@ -16,7 +16,7 @@ from azure.core.credentials_async import AsyncTokenCredential from azure.ai.agentserver.core import AgentRunContext -from azure.ai.agentserver.core.logger import get_logger +from azure.ai.agentserver.core.logger import get_logger, get_project_endpoint from azure.ai.agentserver.core.models import ( Response as OpenAIResponse, ResponseStreamEvent, @@ -28,19 +28,47 @@ from .models.agent_framework_output_non_streaming_converter import ( AgentFrameworkOutputNonStreamingConverter, ) -from .persistence import AgentThreadRepository, CheckpointRepository +from .persistence import AgentThreadRepository, CheckpointRepository, FoundryCheckpointRepository logger = get_logger() class AgentFrameworkWorkflowAdapter(AgentFrameworkAgent): """Adapter to run WorkflowBuilder agents within the Agent Framework CBAgent structure.""" - def __init__(self, - workflow_factory: Callable[[], Workflow], - credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, - thread_repository: Optional[AgentThreadRepository] = None, - checkpoint_repository: Optional[CheckpointRepository] = None) -> None: + def __init__( + self, + workflow_factory: Callable[[], Workflow], + credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, + thread_repository: Optional[AgentThreadRepository] = None, + checkpoint_repository: Optional[CheckpointRepository] = None, + managed_checkpoints: bool = False, + project_endpoint: Optional[str] = None, + ) -> None: super().__init__(credentials, thread_repository) self._workflow_factory = workflow_factory + + # Validate mutual exclusion of managed_checkpoints and checkpoint_repository + if managed_checkpoints and checkpoint_repository is not None: + raise ValueError( + "Cannot use both managed_checkpoints=True and checkpoint_repository. " + "Use managed_checkpoints=True for Azure AI Foundry managed storage, " + "or provide your own checkpoint_repository, but not both." + ) + + # Handle managed checkpoints + if managed_checkpoints: + resolved_endpoint = get_project_endpoint() or project_endpoint + if not resolved_endpoint: + raise ValueError( + "project_endpoint is required when managed_checkpoints=True. " + "Set AZURE_AI_PROJECT_ENDPOINT environment variable or pass project_endpoint parameter." + ) + if not credentials: + raise ValueError("credentials are required when managed_checkpoints=True") + checkpoint_repository = FoundryCheckpointRepository( + project_endpoint=resolved_endpoint, + credential=credentials, + ) + self._checkpoint_repository = checkpoint_repository async def agent_run( # pylint: disable=too-many-statements diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py index cf07cb449d00..d59d1154650c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/__init__.py @@ -9,6 +9,8 @@ InMemoryCheckpointRepository, FileCheckpointRepository, ) +from ._foundry_checkpoint_storage import FoundryCheckpointStorage +from ._foundry_checkpoint_repository import FoundryCheckpointRepository __all__ = [ "AgentThreadRepository", @@ -18,4 +20,6 @@ "CheckpointRepository", "InMemoryCheckpointRepository", "FileCheckpointRepository", + "FoundryCheckpointStorage", + "FoundryCheckpointRepository", ] diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py new file mode 100644 index 000000000000..96b80615445e --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py @@ -0,0 +1,99 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Foundry-backed checkpoint repository implementation.""" + +import logging +from typing import Dict, Optional, Union + +from agent_framework import CheckpointStorage +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential + +from azure.ai.agentserver.core.checkpoints.client import ( + CheckpointSession, + FoundryCheckpointClient, +) + +from .checkpoint_repository import CheckpointRepository +from ._foundry_checkpoint_storage import FoundryCheckpointStorage + +logger = logging.getLogger(__name__) + + +class FoundryCheckpointRepository(CheckpointRepository): + """Repository that creates FoundryCheckpointStorage instances per conversation. + + Manages checkpoint sessions on the Foundry backend, creating sessions on demand + and caching storage instances for reuse within the same process. + + :param project_endpoint: The Azure AI Foundry project endpoint URL. + Example: "https://.services.ai.azure.com/api/projects/" + :type project_endpoint: str + :param credential: Credential for authentication. + :type credential: Union[AsyncTokenCredential, TokenCredential] + """ + + def __init__( + self, + project_endpoint: str, + credential: Union[AsyncTokenCredential, TokenCredential], + ) -> None: + """Initialize the Foundry checkpoint repository. + + :param project_endpoint: The Azure AI Foundry project endpoint URL. + :type project_endpoint: str + :param credential: Credential for authentication. + :type credential: Union[AsyncTokenCredential, TokenCredential] + """ + # Convert sync credential to async if needed + if not isinstance(credential, AsyncTokenCredential): + # For now, we require async credentials + raise TypeError( + "FoundryCheckpointRepository requires an AsyncTokenCredential. " + "Please use an async credential like DefaultAzureCredential." + ) + + self._client = FoundryCheckpointClient(project_endpoint, credential) + self._inventory: Dict[str, CheckpointStorage] = {} + + async def get_or_create( + self, conversation_id: Optional[str] + ) -> Optional[CheckpointStorage]: + """Get or create a checkpoint storage for the given conversation. + + :param conversation_id: The conversation ID (maps to session_id on backend). + :type conversation_id: Optional[str] + :return: CheckpointStorage instance or None if no conversation_id. + :rtype: Optional[CheckpointStorage] + """ + if not conversation_id: + return None + + if conversation_id not in self._inventory: + # Ensure session exists on backend + await self._ensure_session(conversation_id) + self._inventory[conversation_id] = FoundryCheckpointStorage( + client=self._client, + session_id=conversation_id, + ) + logger.debug( + "Created FoundryCheckpointStorage for conversation %s", + conversation_id, + ) + + return self._inventory[conversation_id] + + async def _ensure_session(self, session_id: str) -> None: + """Ensure a session exists on the backend, creating if needed. + + :param session_id: The session identifier. + :type session_id: str + """ + session = CheckpointSession(session_id=session_id) + await self._client.upsert_session(session) + logger.debug("Ensured session %s exists on Foundry", session_id) + + async def close(self) -> None: + """Close the underlying HTTP client.""" + await self._client.close() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py new file mode 100644 index 000000000000..65e5b2f7b3c4 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py @@ -0,0 +1,182 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Foundry-backed checkpoint storage implementation.""" + +import json +import logging +from typing import List, Optional + +from agent_framework import WorkflowCheckpoint + +from azure.ai.agentserver.core.checkpoints.client import ( + CheckpointItem, + CheckpointItemId, + FoundryCheckpointClient, +) + +logger = logging.getLogger(__name__) + + +class FoundryCheckpointStorage: + """CheckpointStorage implementation backed by Azure AI Foundry. + + Implements the agent_framework.CheckpointStorage protocol by delegating + to the FoundryCheckpointClient HTTP client for remote storage. + + :param client: The Foundry checkpoint client. + :type client: FoundryCheckpointClient + :param session_id: The session identifier (maps to conversation_id). + :type session_id: str + """ + + def __init__(self, client: FoundryCheckpointClient, session_id: str) -> None: + """Initialize the Foundry checkpoint storage. + + :param client: The Foundry checkpoint client. + :type client: FoundryCheckpointClient + :param session_id: The session identifier. + :type session_id: str + """ + self._client = client + self._session_id = session_id + + async def save_checkpoint(self, checkpoint: WorkflowCheckpoint) -> str: + """Save a checkpoint and return its ID. + + :param checkpoint: The workflow checkpoint to save. + :type checkpoint: WorkflowCheckpoint + :return: The checkpoint ID. + :rtype: str + """ + serialized = self._serialize_checkpoint(checkpoint) + item = CheckpointItem( + session_id=self._session_id, + item_id=checkpoint.checkpoint_id, + data=serialized, + parent_id=None, + ) + result = await self._client.create_items([item]) + if result: + logger.debug( + "Saved checkpoint %s to Foundry session %s", + checkpoint.checkpoint_id, + self._session_id, + ) + return result[0].item_id + return checkpoint.checkpoint_id + + async def load_checkpoint(self, checkpoint_id: str) -> Optional[WorkflowCheckpoint]: + """Load a checkpoint by ID. + + :param checkpoint_id: The checkpoint identifier. + :type checkpoint_id: str + :return: The workflow checkpoint if found, None otherwise. + :rtype: Optional[WorkflowCheckpoint] + """ + item_id = CheckpointItemId( + session_id=self._session_id, + item_id=checkpoint_id, + ) + item = await self._client.read_item(item_id) + if item is None: + return None + checkpoint = self._deserialize_checkpoint(item.data) + logger.debug( + "Loaded checkpoint %s from Foundry session %s", + checkpoint_id, + self._session_id, + ) + return checkpoint + + async def list_checkpoint_ids( + self, workflow_id: Optional[str] = None + ) -> List[str]: + """List checkpoint IDs. + + If workflow_id is provided, filter by that workflow. + + :param workflow_id: Optional workflow identifier for filtering. + :type workflow_id: Optional[str] + :return: List of checkpoint identifiers. + :rtype: List[str] + """ + item_ids = await self._client.list_item_ids(self._session_id) + ids = [item_id.item_id for item_id in item_ids] + + # Filter by workflow_id if provided + if workflow_id is not None: + filtered_ids = [] + for checkpoint_id in ids: + checkpoint = await self.load_checkpoint(checkpoint_id) + if checkpoint and checkpoint.workflow_id == workflow_id: + filtered_ids.append(checkpoint_id) + return filtered_ids + + return ids + + async def list_checkpoints( + self, workflow_id: Optional[str] = None + ) -> List[WorkflowCheckpoint]: + """List checkpoint objects. + + If workflow_id is provided, filter by that workflow. + + :param workflow_id: Optional workflow identifier for filtering. + :type workflow_id: Optional[str] + :return: List of workflow checkpoints. + :rtype: List[WorkflowCheckpoint] + """ + ids = await self.list_checkpoint_ids(workflow_id=None) + checkpoints: List[WorkflowCheckpoint] = [] + + for checkpoint_id in ids: + checkpoint = await self.load_checkpoint(checkpoint_id) + if checkpoint is not None: + if workflow_id is None or checkpoint.workflow_id == workflow_id: + checkpoints.append(checkpoint) + + return checkpoints + + async def delete_checkpoint(self, checkpoint_id: str) -> bool: + """Delete a checkpoint by ID. + + :param checkpoint_id: The checkpoint identifier. + :type checkpoint_id: str + :return: True if the checkpoint was deleted, False otherwise. + :rtype: bool + """ + item_id = CheckpointItemId( + session_id=self._session_id, + item_id=checkpoint_id, + ) + deleted = await self._client.delete_item(item_id) + if deleted: + logger.debug( + "Deleted checkpoint %s from Foundry session %s", + checkpoint_id, + self._session_id, + ) + return deleted + + def _serialize_checkpoint(self, checkpoint: WorkflowCheckpoint) -> bytes: + """Serialize a WorkflowCheckpoint to bytes. + + :param checkpoint: The workflow checkpoint. + :type checkpoint: WorkflowCheckpoint + :return: Serialized checkpoint data. + :rtype: bytes + """ + checkpoint_dict = checkpoint.to_dict() + return json.dumps(checkpoint_dict, ensure_ascii=False).encode("utf-8") + + def _deserialize_checkpoint(self, data: bytes) -> WorkflowCheckpoint: + """Deserialize bytes to WorkflowCheckpoint. + + :param data: Serialized checkpoint data. + :type data: bytes + :return: The workflow checkpoint. + :rtype: WorkflowCheckpoint + """ + checkpoint_dict = json.loads(data.decode("utf-8")) + return WorkflowCheckpoint.from_dict(checkpoint_dict) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/__init__.py new file mode 100644 index 000000000000..4436d04866df --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/__init__.py @@ -0,0 +1,8 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Mock implementations for testing.""" + +from .mock_checkpoint_client import MockFoundryCheckpointClient + +__all__ = ["MockFoundryCheckpointClient"] diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py new file mode 100644 index 000000000000..ffc1e2fcc4c1 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py @@ -0,0 +1,156 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Mock implementation of FoundryCheckpointClient for testing.""" + +from typing import Any, Dict, List, Optional + +from azure.ai.agentserver.core.checkpoints.client import ( + CheckpointItem, + CheckpointItemId, + CheckpointSession, +) + + +class MockFoundryCheckpointClient: + """In-memory mock for FoundryCheckpointClient for unit testing. + + Stores checkpoints in memory without making any HTTP calls. + """ + + def __init__(self, endpoint: str = "https://mock.endpoint") -> None: + """Initialize the mock client. + + :param endpoint: The mock endpoint URL. + :type endpoint: str + """ + self._endpoint = endpoint + self._sessions: Dict[str, CheckpointSession] = {} + self._items: Dict[str, CheckpointItem] = {} + + def _item_key(self, item_id: CheckpointItemId) -> str: + """Generate a unique key for a checkpoint item. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: The unique key. + :rtype: str + """ + return f"{item_id.session_id}:{item_id.item_id}" + + # Session operations + + async def upsert_session(self, session: CheckpointSession) -> CheckpointSession: + """Create or update a checkpoint session. + + :param session: The checkpoint session to upsert. + :type session: CheckpointSession + :return: The upserted checkpoint session. + :rtype: CheckpointSession + """ + self._sessions[session.session_id] = session + return session + + async def read_session(self, session_id: str) -> Optional[CheckpointSession]: + """Read a checkpoint session by ID. + + :param session_id: The session identifier. + :type session_id: str + :return: The checkpoint session if found, None otherwise. + :rtype: Optional[CheckpointSession] + """ + return self._sessions.get(session_id) + + async def delete_session(self, session_id: str) -> None: + """Delete a checkpoint session. + + :param session_id: The session identifier. + :type session_id: str + """ + self._sessions.pop(session_id, None) + # Also delete all items in the session + keys_to_delete = [ + key for key, item in self._items.items() if item.session_id == session_id + ] + for key in keys_to_delete: + del self._items[key] + + # Item operations + + async def create_items(self, items: List[CheckpointItem]) -> List[CheckpointItem]: + """Create checkpoint items in batch. + + :param items: The checkpoint items to create. + :type items: List[CheckpointItem] + :return: The created checkpoint items. + :rtype: List[CheckpointItem] + """ + for item in items: + key = self._item_key(item.to_item_id()) + self._items[key] = item + return items + + async def read_item(self, item_id: CheckpointItemId) -> Optional[CheckpointItem]: + """Read a checkpoint item by ID. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: The checkpoint item if found, None otherwise. + :rtype: Optional[CheckpointItem] + """ + key = self._item_key(item_id) + return self._items.get(key) + + async def delete_item(self, item_id: CheckpointItemId) -> bool: + """Delete a checkpoint item. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: True if the item was deleted, False if not found. + :rtype: bool + """ + key = self._item_key(item_id) + if key in self._items: + del self._items[key] + return True + return False + + async def list_item_ids( + self, session_id: str, parent_id: Optional[str] = None + ) -> List[CheckpointItemId]: + """List checkpoint item IDs for a session. + + :param session_id: The session identifier. + :type session_id: str + :param parent_id: Optional parent item identifier for filtering. + :type parent_id: Optional[str] + :return: List of checkpoint item identifiers. + :rtype: List[CheckpointItemId] + """ + result = [] + for item in self._items.values(): + if item.session_id == session_id: + if parent_id is None or item.parent_id == parent_id: + result.append(item.to_item_id()) + return result + + # Context manager methods + + async def close(self) -> None: + """Close the client (no-op for mock).""" + pass + + async def __aenter__(self) -> "MockFoundryCheckpointClient": + """Enter the async context manager. + + :return: The client instance. + :rtype: MockFoundryCheckpointClient + """ + return self + + async def __aexit__(self, *exc_details: Any) -> None: + """Exit the async context manager. + + :param exc_details: Exception details if an exception occurred. + """ + pass diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_checkpoint_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_checkpoint_repository.py new file mode 100644 index 000000000000..ddbbe616b5e5 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_checkpoint_repository.py @@ -0,0 +1,111 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for FoundryCheckpointRepository.""" + +import pytest + +from azure.ai.agentserver.agentframework.persistence import ( + FoundryCheckpointRepository, + FoundryCheckpointStorage, +) + +from .mocks import MockFoundryCheckpointClient + + +class TestableFoundryCheckpointRepository(FoundryCheckpointRepository): + """Testable version that accepts a mock client.""" + + def __init__(self, client: MockFoundryCheckpointClient) -> None: + """Initialize with a mock client (bypass credential requirements).""" + self._client = client # type: ignore[assignment] + self._inventory = {} + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_get_or_create_returns_none_without_conversation_id() -> None: + """Test that get_or_create returns None when conversation_id is None.""" + client = MockFoundryCheckpointClient() + repo = TestableFoundryCheckpointRepository(client=client) + + result = await repo.get_or_create(None) + + assert result is None + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_get_or_create_returns_none_for_empty_string() -> None: + """Test that get_or_create returns None when conversation_id is empty.""" + client = MockFoundryCheckpointClient() + repo = TestableFoundryCheckpointRepository(client=client) + + result = await repo.get_or_create("") + + assert result is None + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_get_or_create_creates_storage_on_first_access() -> None: + """Test that get_or_create creates storage on first access.""" + client = MockFoundryCheckpointClient() + repo = TestableFoundryCheckpointRepository(client=client) + + storage = await repo.get_or_create("conv-123") + + assert storage is not None + assert isinstance(storage, FoundryCheckpointStorage) + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_get_or_create_creates_session_on_first_access() -> None: + """Test that get_or_create creates session on the backend.""" + client = MockFoundryCheckpointClient() + repo = TestableFoundryCheckpointRepository(client=client) + + await repo.get_or_create("conv-123") + + # Verify session was created + session = await client.read_session("conv-123") + assert session is not None + assert session.session_id == "conv-123" + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_get_or_create_caches_storage_instances() -> None: + """Test that get_or_create returns cached storage on subsequent calls.""" + client = MockFoundryCheckpointClient() + repo = TestableFoundryCheckpointRepository(client=client) + + storage1 = await repo.get_or_create("conv-123") + storage2 = await repo.get_or_create("conv-123") + + assert storage1 is storage2 + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_get_or_create_creates_separate_storage_per_conversation() -> None: + """Test that get_or_create creates separate storage per conversation.""" + client = MockFoundryCheckpointClient() + repo = TestableFoundryCheckpointRepository(client=client) + + storage1 = await repo.get_or_create("conv-1") + storage2 = await repo.get_or_create("conv-2") + + assert storage1 is not storage2 + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_close_closes_client() -> None: + """Test that close closes the underlying client.""" + client = MockFoundryCheckpointClient() + repo = TestableFoundryCheckpointRepository(client=client) + + # Should not raise + await repo.close() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_checkpoint_storage.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_checkpoint_storage.py new file mode 100644 index 000000000000..fb1b8d9c0e30 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_foundry_checkpoint_storage.py @@ -0,0 +1,165 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for FoundryCheckpointStorage.""" + +import pytest +from agent_framework import WorkflowCheckpoint + +from azure.ai.agentserver.agentframework.persistence import FoundryCheckpointStorage + +from .mocks import MockFoundryCheckpointClient + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_save_checkpoint_returns_checkpoint_id() -> None: + """Test that save_checkpoint returns the checkpoint ID.""" + client = MockFoundryCheckpointClient() + storage = FoundryCheckpointStorage(client=client, session_id="session-1") + + checkpoint = WorkflowCheckpoint( + checkpoint_id="cp-123", + workflow_id="wf-1", + ) + + result = await storage.save_checkpoint(checkpoint) + + assert result == "cp-123" + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_load_checkpoint_returns_checkpoint() -> None: + """Test that load_checkpoint returns the checkpoint.""" + client = MockFoundryCheckpointClient() + storage = FoundryCheckpointStorage(client=client, session_id="session-1") + + checkpoint = WorkflowCheckpoint( + checkpoint_id="cp-123", + workflow_id="wf-1", + iteration_count=5, + ) + + await storage.save_checkpoint(checkpoint) + loaded = await storage.load_checkpoint("cp-123") + + assert loaded is not None + assert loaded.checkpoint_id == "cp-123" + assert loaded.workflow_id == "wf-1" + assert loaded.iteration_count == 5 + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_load_checkpoint_returns_none_for_missing() -> None: + """Test that load_checkpoint returns None for missing checkpoint.""" + client = MockFoundryCheckpointClient() + storage = FoundryCheckpointStorage(client=client, session_id="session-1") + + loaded = await storage.load_checkpoint("nonexistent") + + assert loaded is None + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_list_checkpoint_ids_returns_all_ids() -> None: + """Test that list_checkpoint_ids returns all checkpoint IDs.""" + client = MockFoundryCheckpointClient() + storage = FoundryCheckpointStorage(client=client, session_id="session-1") + + cp1 = WorkflowCheckpoint(checkpoint_id="cp-1", workflow_id="wf-1") + cp2 = WorkflowCheckpoint(checkpoint_id="cp-2", workflow_id="wf-1") + cp3 = WorkflowCheckpoint(checkpoint_id="cp-3", workflow_id="wf-2") + + await storage.save_checkpoint(cp1) + await storage.save_checkpoint(cp2) + await storage.save_checkpoint(cp3) + + ids = await storage.list_checkpoint_ids() + + assert set(ids) == {"cp-1", "cp-2", "cp-3"} + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_list_checkpoint_ids_filters_by_workflow() -> None: + """Test that list_checkpoint_ids filters by workflow_id.""" + client = MockFoundryCheckpointClient() + storage = FoundryCheckpointStorage(client=client, session_id="session-1") + + cp1 = WorkflowCheckpoint(checkpoint_id="cp-1", workflow_id="wf-1") + cp2 = WorkflowCheckpoint(checkpoint_id="cp-2", workflow_id="wf-1") + cp3 = WorkflowCheckpoint(checkpoint_id="cp-3", workflow_id="wf-2") + + await storage.save_checkpoint(cp1) + await storage.save_checkpoint(cp2) + await storage.save_checkpoint(cp3) + + ids = await storage.list_checkpoint_ids(workflow_id="wf-1") + + assert set(ids) == {"cp-1", "cp-2"} + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_list_checkpoints_returns_all_checkpoints() -> None: + """Test that list_checkpoints returns all checkpoints.""" + client = MockFoundryCheckpointClient() + storage = FoundryCheckpointStorage(client=client, session_id="session-1") + + cp1 = WorkflowCheckpoint(checkpoint_id="cp-1", workflow_id="wf-1") + cp2 = WorkflowCheckpoint(checkpoint_id="cp-2", workflow_id="wf-2") + + await storage.save_checkpoint(cp1) + await storage.save_checkpoint(cp2) + + checkpoints = await storage.list_checkpoints() + + assert len(checkpoints) == 2 + ids = {cp.checkpoint_id for cp in checkpoints} + assert ids == {"cp-1", "cp-2"} + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_delete_checkpoint_returns_true_for_existing() -> None: + """Test that delete_checkpoint returns True for existing checkpoint.""" + client = MockFoundryCheckpointClient() + storage = FoundryCheckpointStorage(client=client, session_id="session-1") + + checkpoint = WorkflowCheckpoint(checkpoint_id="cp-123", workflow_id="wf-1") + await storage.save_checkpoint(checkpoint) + + deleted = await storage.delete_checkpoint("cp-123") + + assert deleted is True + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_delete_checkpoint_returns_false_for_missing() -> None: + """Test that delete_checkpoint returns False for missing checkpoint.""" + client = MockFoundryCheckpointClient() + storage = FoundryCheckpointStorage(client=client, session_id="session-1") + + deleted = await storage.delete_checkpoint("nonexistent") + + assert deleted is False + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_delete_checkpoint_removes_from_storage() -> None: + """Test that delete_checkpoint actually removes the checkpoint.""" + client = MockFoundryCheckpointClient() + storage = FoundryCheckpointStorage(client=client, session_id="session-1") + + checkpoint = WorkflowCheckpoint(checkpoint_id="cp-123", workflow_id="wf-1") + await storage.save_checkpoint(checkpoint) + + await storage.delete_checkpoint("cp-123") + loaded = await storage.load_checkpoint("cp-123") + + assert loaded is None diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_from_agent_framework_managed.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_from_agent_framework_managed.py new file mode 100644 index 000000000000..a0cfbea51f6c --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_from_agent_framework_managed.py @@ -0,0 +1,93 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for from_agent_framework with managed checkpoints.""" + +import os +import pytest +from unittest.mock import Mock, AsyncMock, patch + +from azure.core.credentials_async import AsyncTokenCredential + + +@pytest.mark.unit +def test_managed_checkpoints_requires_project_endpoint() -> None: + """Test that managed_checkpoints=True requires project_endpoint when env var not set.""" + from azure.ai.agentserver.agentframework import from_agent_framework + from agent_framework import WorkflowBuilder + + builder = WorkflowBuilder() + mock_credential = Mock(spec=AsyncTokenCredential) + + # Ensure environment variable is not set + with patch.dict(os.environ, {}, clear=True): + with pytest.raises(ValueError) as exc_info: + from_agent_framework( + builder, + credentials=mock_credential, + managed_checkpoints=True, + project_endpoint=None, + ) + + assert "project_endpoint" in str(exc_info.value) + + +@pytest.mark.unit +def test_managed_checkpoints_requires_credentials() -> None: + """Test that managed_checkpoints=True requires credentials.""" + from azure.ai.agentserver.agentframework import from_agent_framework + from agent_framework import WorkflowBuilder + + builder = WorkflowBuilder() + + with pytest.raises(ValueError) as exc_info: + from_agent_framework( + builder, + credentials=None, + managed_checkpoints=True, + project_endpoint="https://test.services.ai.azure.com/api/projects/test-project", + ) + + assert "credentials" in str(exc_info.value) + + +@pytest.mark.unit +def test_managed_checkpoints_false_does_not_require_parameters() -> None: + """Test that managed_checkpoints=False does not require project_endpoint.""" + from azure.ai.agentserver.agentframework import from_agent_framework + from agent_framework import WorkflowBuilder + + builder = WorkflowBuilder() + + # Should not raise + adapter = from_agent_framework( + builder, + managed_checkpoints=False, + ) + + assert adapter is not None + + +@pytest.mark.unit +def test_managed_checkpoints_and_checkpoint_repository_are_mutually_exclusive() -> None: + """Test that managed_checkpoints=True and checkpoint_repository cannot be used together.""" + from azure.ai.agentserver.agentframework import from_agent_framework + from azure.ai.agentserver.agentframework.persistence import InMemoryCheckpointRepository + from agent_framework import WorkflowBuilder + + builder = WorkflowBuilder() + mock_credential = Mock(spec=AsyncTokenCredential) + checkpoint_repo = InMemoryCheckpointRepository() + + with pytest.raises(ValueError) as exc_info: + from_agent_framework( + builder, + credentials=mock_credential, + managed_checkpoints=True, + checkpoint_repository=checkpoint_repo, + project_endpoint="https://test.services.ai.azure.com/api/projects/test-project", + ) + + assert "Cannot use both" in str(exc_info.value) + assert "managed_checkpoints" in str(exc_info.value) + assert "checkpoint_repository" in str(exc_info.value) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py new file mode 100644 index 000000000000..f9d6ed3d8aa8 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py @@ -0,0 +1,18 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Checkpoint storage module for Azure AI Agent Server.""" + +from .client import FoundryCheckpointClient +from .client._models import ( + CheckpointItem, + CheckpointItemId, + CheckpointSession, +) + +__all__ = [ + "CheckpointItem", + "CheckpointItemId", + "CheckpointSession", + "FoundryCheckpointClient", +] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py new file mode 100644 index 000000000000..34f30f16c5d9 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py @@ -0,0 +1,18 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Checkpoint client module for Azure AI Agent Server.""" + +from ._client import FoundryCheckpointClient +from ._models import ( + CheckpointItem, + CheckpointItemId, + CheckpointSession, +) + +__all__ = [ + "CheckpointItem", + "CheckpointItemId", + "CheckpointSession", + "FoundryCheckpointClient", +] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py new file mode 100644 index 000000000000..8c16c1151b9e --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py @@ -0,0 +1,153 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Asynchronous client for Azure AI Foundry checkpoint storage API.""" + +from typing import Any, AsyncContextManager, List, Optional + +from azure.core import AsyncPipelineClient +from azure.core.credentials_async import AsyncTokenCredential +from azure.core.tracing.decorator_async import distributed_trace_async + +from ._configuration import FoundryCheckpointClientConfiguration +from ._models import CheckpointItem, CheckpointItemId, CheckpointSession +from .operations import CheckpointItemOperations, CheckpointSessionOperations + + +class FoundryCheckpointClient(AsyncContextManager["FoundryCheckpointClient"]): + """Asynchronous client for Azure AI Foundry checkpoint storage API. + + This client provides access to checkpoint storage for workflow state persistence, + enabling checkpoint save, load, list, and delete operations. + + :param endpoint: The fully qualified project endpoint for the Azure AI Foundry service. + Example: "https://.services.ai.azure.com/api/projects/" + :type endpoint: str + :param credential: Credential for authenticating requests to the service. + Use credentials from azure-identity like DefaultAzureCredential. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + """ + + def __init__( + self, + endpoint: str, + credential: "AsyncTokenCredential", + ) -> None: + """Initialize the asynchronous Azure AI Checkpoint Client. + + :param endpoint: The project endpoint URL (includes project context). + :type endpoint: str + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + """ + config = FoundryCheckpointClientConfiguration(credential) + self._client: AsyncPipelineClient = AsyncPipelineClient( + base_url=endpoint, config=config + ) + self._sessions = CheckpointSessionOperations(self._client) + self._items = CheckpointItemOperations(self._client) + + # Session operations + + @distributed_trace_async + async def upsert_session(self, session: CheckpointSession) -> CheckpointSession: + """Create or update a checkpoint session. + + :param session: The checkpoint session to upsert. + :type session: CheckpointSession + :return: The upserted checkpoint session. + :rtype: CheckpointSession + """ + return await self._sessions.upsert(session) + + @distributed_trace_async + async def read_session(self, session_id: str) -> Optional[CheckpointSession]: + """Read a checkpoint session by ID. + + :param session_id: The session identifier. + :type session_id: str + :return: The checkpoint session if found, None otherwise. + :rtype: Optional[CheckpointSession] + """ + return await self._sessions.read(session_id) + + @distributed_trace_async + async def delete_session(self, session_id: str) -> None: + """Delete a checkpoint session. + + :param session_id: The session identifier. + :type session_id: str + """ + await self._sessions.delete(session_id) + + # Item operations + + @distributed_trace_async + async def create_items(self, items: List[CheckpointItem]) -> List[CheckpointItem]: + """Create checkpoint items in batch. + + :param items: The checkpoint items to create. + :type items: List[CheckpointItem] + :return: The created checkpoint items. + :rtype: List[CheckpointItem] + """ + return await self._items.create_batch(items) + + @distributed_trace_async + async def read_item(self, item_id: CheckpointItemId) -> Optional[CheckpointItem]: + """Read a checkpoint item by ID. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: The checkpoint item if found, None otherwise. + :rtype: Optional[CheckpointItem] + """ + return await self._items.read(item_id) + + @distributed_trace_async + async def delete_item(self, item_id: CheckpointItemId) -> bool: + """Delete a checkpoint item. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: True if the item was deleted, False if not found. + :rtype: bool + """ + return await self._items.delete(item_id) + + @distributed_trace_async + async def list_item_ids( + self, session_id: str, parent_id: Optional[str] = None + ) -> List[CheckpointItemId]: + """List checkpoint item IDs for a session. + + :param session_id: The session identifier. + :type session_id: str + :param parent_id: Optional parent item identifier for filtering. + :type parent_id: Optional[str] + :return: List of checkpoint item identifiers. + :rtype: List[CheckpointItemId] + """ + return await self._items.list_ids(session_id, parent_id) + + # Context manager methods + + async def close(self) -> None: + """Close the underlying HTTP pipeline.""" + await self._client.close() + + async def __aenter__(self) -> "FoundryCheckpointClient": + """Enter the async context manager. + + :return: The client instance. + :rtype: FoundryCheckpointClient + """ + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + """Exit the async context manager. + + :param exc_details: Exception details if an exception occurred. + """ + await self._client.__aexit__(*exc_details) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_configuration.py new file mode 100644 index 000000000000..cd9ed9ee7ff7 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_configuration.py @@ -0,0 +1,37 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Configuration for Azure AI Checkpoint Client.""" + +from azure.core.configuration import Configuration +from azure.core.credentials_async import AsyncTokenCredential +from azure.core.pipeline import policies + +from ...application._metadata import get_current_app + + +class FoundryCheckpointClientConfiguration(Configuration): + """Configuration for Azure AI Checkpoint Client. + + Manages authentication, endpoint configuration, and policy settings for the + Azure AI Checkpoint Client. This class is used internally by the client and should + not typically be instantiated directly. + + :param credential: Azure TokenCredential for authentication. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + """ + + def __init__(self, credential: "AsyncTokenCredential") -> None: + super().__init__() + + self.retry_policy = policies.AsyncRetryPolicy() + self.logging_policy = policies.NetworkTraceLoggingPolicy() + self.request_id_policy = policies.RequestIdPolicy() + self.http_logging_policy = policies.HttpLoggingPolicy() + self.user_agent_policy = policies.UserAgentPolicy( + base_user_agent=get_current_app().as_user_agent("FoundryCheckpointClient") + ) + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( + credential, "https://ai.azure.com/.default" + ) + self.redirect_policy = policies.AsyncRedirectPolicy() diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_models.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_models.py new file mode 100644 index 000000000000..b6e61aa2b6f6 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_models.py @@ -0,0 +1,201 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Data models for checkpoint storage API.""" + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional + +from pydantic import BaseModel, Field + + +@dataclass +class CheckpointSession: + """Represents a checkpoint session. + + A session maps to a conversation and groups related checkpoints together. + + :ivar session_id: The session identifier (maps to conversation_id). + :ivar metadata: Optional metadata for the session. + """ + + session_id: str + metadata: Optional[Dict[str, Any]] = None + + +@dataclass +class CheckpointItemId: + """Identifier for a checkpoint item. + + :ivar session_id: The session identifier this checkpoint belongs to. + :ivar item_id: The unique checkpoint item identifier. + :ivar parent_id: Optional parent checkpoint identifier for hierarchical checkpoints. + """ + + session_id: str + item_id: str + parent_id: Optional[str] = None + + +@dataclass +class CheckpointItem: + """Represents a single checkpoint item. + + Contains the serialized checkpoint data along with identifiers. + + :ivar session_id: The session identifier this checkpoint belongs to. + :ivar item_id: The unique checkpoint item identifier. + :ivar data: Serialized checkpoint data as bytes. + :ivar parent_id: Optional parent checkpoint identifier. + """ + + session_id: str + item_id: str + data: bytes + parent_id: Optional[str] = None + + def to_item_id(self) -> CheckpointItemId: + """Convert to a CheckpointItemId. + + :return: The checkpoint item identifier. + :rtype: CheckpointItemId + """ + return CheckpointItemId( + session_id=self.session_id, + item_id=self.item_id, + parent_id=self.parent_id, + ) + + +# Pydantic models for API request/response serialization + + +class CheckpointSessionRequest(BaseModel): + """Request model for creating/updating a checkpoint session.""" + + session_id: str = Field(alias="sessionId") + metadata: Optional[Dict[str, Any]] = None + + model_config = {"populate_by_name": True} + + @classmethod + def from_session(cls, session: CheckpointSession) -> "CheckpointSessionRequest": + """Create a request from a CheckpointSession. + + :param session: The checkpoint session. + :type session: CheckpointSession + :return: The request model. + :rtype: CheckpointSessionRequest + """ + return cls( + session_id=session.session_id, + metadata=session.metadata, + ) + + +class CheckpointSessionResponse(BaseModel): + """Response model for checkpoint session operations.""" + + session_id: str = Field(alias="sessionId") + metadata: Optional[Dict[str, Any]] = None + etag: Optional[str] = None + + model_config = {"populate_by_name": True} + + def to_session(self) -> CheckpointSession: + """Convert to a CheckpointSession. + + :return: The checkpoint session. + :rtype: CheckpointSession + """ + return CheckpointSession( + session_id=self.session_id, + metadata=self.metadata, + ) + + +class CheckpointItemIdResponse(BaseModel): + """Response model for checkpoint item identifiers.""" + + session_id: str = Field(alias="sessionId") + item_id: str = Field(alias="itemId") + parent_id: Optional[str] = Field(default=None, alias="parentId") + + model_config = {"populate_by_name": True} + + def to_item_id(self) -> CheckpointItemId: + """Convert to a CheckpointItemId. + + :return: The checkpoint item identifier. + :rtype: CheckpointItemId + """ + return CheckpointItemId( + session_id=self.session_id, + item_id=self.item_id, + parent_id=self.parent_id, + ) + + +class CheckpointItemRequest(BaseModel): + """Request model for creating checkpoint items.""" + + session_id: str = Field(alias="sessionId") + item_id: str = Field(alias="itemId") + data: str # Base64-encoded bytes + parent_id: Optional[str] = Field(default=None, alias="parentId") + + model_config = {"populate_by_name": True} + + @classmethod + def from_item(cls, item: CheckpointItem) -> "CheckpointItemRequest": + """Create a request from a CheckpointItem. + + :param item: The checkpoint item. + :type item: CheckpointItem + :return: The request model. + :rtype: CheckpointItemRequest + """ + import base64 + + return cls( + session_id=item.session_id, + item_id=item.item_id, + data=base64.b64encode(item.data).decode("utf-8"), + parent_id=item.parent_id, + ) + + +class CheckpointItemResponse(BaseModel): + """Response model for checkpoint item operations.""" + + session_id: str = Field(alias="sessionId") + item_id: str = Field(alias="itemId") + data: str # Base64-encoded bytes + parent_id: Optional[str] = Field(default=None, alias="parentId") + etag: Optional[str] = None + + model_config = {"populate_by_name": True} + + def to_item(self) -> CheckpointItem: + """Convert to a CheckpointItem. + + :return: The checkpoint item. + :rtype: CheckpointItem + """ + import base64 + + return CheckpointItem( + session_id=self.session_id, + item_id=self.item_id, + data=base64.b64decode(self.data), + parent_id=self.parent_id, + ) + + +class ListCheckpointItemIdsResponse(BaseModel): + """Response model for listing checkpoint item identifiers.""" + + value: List[CheckpointItemIdResponse] = Field(default_factory=list) + next_link: Optional[str] = Field(default=None, alias="nextLink") + + model_config = {"populate_by_name": True} diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/__init__.py new file mode 100644 index 000000000000..42ba9bacd20b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/__init__.py @@ -0,0 +1,12 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Checkpoint operations module for Azure AI Agent Server.""" + +from ._items import CheckpointItemOperations +from ._sessions import CheckpointSessionOperations + +__all__ = [ + "CheckpointItemOperations", + "CheckpointSessionOperations", +] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/_items.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/_items.py new file mode 100644 index 000000000000..8c3fa4331446 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/_items.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Operations for checkpoint items.""" + +from typing import Any, ClassVar, Dict, List, Optional + +from azure.core import AsyncPipelineClient +from azure.core.exceptions import ResourceNotFoundError +from azure.core.pipeline.transport import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async + +from ....tools.client.operations._base import BaseOperations +from .._models import ( + CheckpointItem, + CheckpointItemId, + CheckpointItemRequest, + CheckpointItemResponse, + ListCheckpointItemIdsResponse, +) + + +class CheckpointItemOperations(BaseOperations): + """Operations for managing checkpoint items.""" + + _API_VERSION: ClassVar[str] = "2025-11-15-preview" + + _HEADERS: ClassVar[Dict[str, str]] = { + "Content-Type": "application/json", + "Accept": "application/json", + } + + _QUERY_PARAMS: ClassVar[Dict[str, Any]] = {"api-version": _API_VERSION} + + def _items_path(self, item_id: Optional[str] = None) -> str: + """Get the API path for item operations. + + :param item_id: Optional item identifier. + :type item_id: Optional[str] + :return: The API path. + :rtype: str + """ + base = "/checkpoints/items" + return f"{base}/{item_id}" if item_id else base + + def _build_create_batch_request(self, items: List[CheckpointItem]) -> HttpRequest: + """Build the HTTP request for creating items in batch. + + :param items: The checkpoint items to create. + :type items: List[CheckpointItem] + :return: The HTTP request. + :rtype: HttpRequest + """ + request_models = [CheckpointItemRequest.from_item(item) for item in items] + return self._client.post( + self._items_path(), + params=self._QUERY_PARAMS, + headers=self._HEADERS, + content=[model.model_dump(by_alias=True) for model in request_models], + ) + + def _build_read_request(self, item_id: CheckpointItemId) -> HttpRequest: + """Build the HTTP request for reading an item. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: The HTTP request. + :rtype: HttpRequest + """ + params = dict(self._QUERY_PARAMS) + params["sessionId"] = item_id.session_id + if item_id.parent_id: + params["parentId"] = item_id.parent_id + return self._client.get( + self._items_path(item_id.item_id), + params=params, + headers=self._HEADERS, + ) + + def _build_delete_request(self, item_id: CheckpointItemId) -> HttpRequest: + """Build the HTTP request for deleting an item. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: The HTTP request. + :rtype: HttpRequest + """ + params = dict(self._QUERY_PARAMS) + params["sessionId"] = item_id.session_id + if item_id.parent_id: + params["parentId"] = item_id.parent_id + return self._client.delete( + self._items_path(item_id.item_id), + params=params, + headers=self._HEADERS, + ) + + def _build_list_ids_request( + self, session_id: str, parent_id: Optional[str] = None + ) -> HttpRequest: + """Build the HTTP request for listing item IDs. + + :param session_id: The session identifier. + :type session_id: str + :param parent_id: Optional parent item identifier. + :type parent_id: Optional[str] + :return: The HTTP request. + :rtype: HttpRequest + """ + params = dict(self._QUERY_PARAMS) + params["sessionId"] = session_id + if parent_id: + params["parentId"] = parent_id + return self._client.get( + self._items_path(), + params=params, + headers=self._HEADERS, + ) + + @distributed_trace_async + async def create_batch(self, items: List[CheckpointItem]) -> List[CheckpointItem]: + """Create checkpoint items in batch. + + :param items: The checkpoint items to create. + :type items: List[CheckpointItem] + :return: The created checkpoint items. + :rtype: List[CheckpointItem] + """ + if not items: + return [] + + request = self._build_create_batch_request(items) + response = await self._send_request(request) + async with response: + json_response = self._extract_response_json(response) + if isinstance(json_response, list): + return [ + CheckpointItemResponse.model_validate(item).to_item() + for item in json_response + ] + # Single item response + return [CheckpointItemResponse.model_validate(json_response).to_item()] + + @distributed_trace_async + async def read(self, item_id: CheckpointItemId) -> Optional[CheckpointItem]: + """Read a checkpoint item by ID. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: The checkpoint item if found, None otherwise. + :rtype: Optional[CheckpointItem] + """ + request = self._build_read_request(item_id) + try: + response = await self._send_request(request) + async with response: + json_response = self._extract_response_json(response) + item_response = CheckpointItemResponse.model_validate(json_response) + return item_response.to_item() + except ResourceNotFoundError: + return None + + @distributed_trace_async + async def delete(self, item_id: CheckpointItemId) -> bool: + """Delete a checkpoint item. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: True if the item was deleted, False if not found. + :rtype: bool + """ + request = self._build_delete_request(item_id) + try: + response = await self._send_request(request) + async with response: + pass # No response body expected + return True + except ResourceNotFoundError: + return False + + @distributed_trace_async + async def list_ids( + self, session_id: str, parent_id: Optional[str] = None + ) -> List[CheckpointItemId]: + """List checkpoint item IDs for a session. + + :param session_id: The session identifier. + :type session_id: str + :param parent_id: Optional parent item identifier for filtering. + :type parent_id: Optional[str] + :return: List of checkpoint item identifiers. + :rtype: List[CheckpointItemId] + """ + request = self._build_list_ids_request(session_id, parent_id) + response = await self._send_request(request) + async with response: + json_response = self._extract_response_json(response) + list_response = ListCheckpointItemIdsResponse.model_validate(json_response) + return [item.to_item_id() for item in list_response.value] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/_sessions.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/_sessions.py new file mode 100644 index 000000000000..b3587f8baf72 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/operations/_sessions.py @@ -0,0 +1,133 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Operations for checkpoint sessions.""" + +from typing import Any, ClassVar, Dict, Optional + +from azure.core import AsyncPipelineClient +from azure.core.exceptions import ResourceNotFoundError +from azure.core.pipeline.transport import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async + +from ....tools.client.operations._base import BaseOperations +from .._models import ( + CheckpointSession, + CheckpointSessionRequest, + CheckpointSessionResponse, +) + + +class CheckpointSessionOperations(BaseOperations): + """Operations for managing checkpoint sessions.""" + + _API_VERSION: ClassVar[str] = "2025-11-15-preview" + + _HEADERS: ClassVar[Dict[str, str]] = { + "Content-Type": "application/json", + "Accept": "application/json", + } + + _QUERY_PARAMS: ClassVar[Dict[str, Any]] = {"api-version": _API_VERSION} + + def _session_path(self, session_id: Optional[str] = None) -> str: + """Get the API path for session operations. + + :param session_id: Optional session identifier. + :type session_id: Optional[str] + :return: The API path. + :rtype: str + """ + base = "/checkpoints/sessions" + return f"{base}/{session_id}" if session_id else base + + def _build_upsert_request(self, session: CheckpointSession) -> HttpRequest: + """Build the HTTP request for upserting a session. + + :param session: The checkpoint session. + :type session: CheckpointSession + :return: The HTTP request. + :rtype: HttpRequest + """ + request_model = CheckpointSessionRequest.from_session(session) + return self._client.put( + self._session_path(session.session_id), + params=self._QUERY_PARAMS, + headers=self._HEADERS, + content=request_model.model_dump(by_alias=True), + ) + + def _build_read_request(self, session_id: str) -> HttpRequest: + """Build the HTTP request for reading a session. + + :param session_id: The session identifier. + :type session_id: str + :return: The HTTP request. + :rtype: HttpRequest + """ + return self._client.get( + self._session_path(session_id), + params=self._QUERY_PARAMS, + headers=self._HEADERS, + ) + + def _build_delete_request(self, session_id: str) -> HttpRequest: + """Build the HTTP request for deleting a session. + + :param session_id: The session identifier. + :type session_id: str + :return: The HTTP request. + :rtype: HttpRequest + """ + return self._client.delete( + self._session_path(session_id), + params=self._QUERY_PARAMS, + headers=self._HEADERS, + ) + + @distributed_trace_async + async def upsert(self, session: CheckpointSession) -> CheckpointSession: + """Create or update a checkpoint session. + + :param session: The checkpoint session to upsert. + :type session: CheckpointSession + :return: The upserted checkpoint session. + :rtype: CheckpointSession + """ + request = self._build_upsert_request(session) + response = await self._send_request(request) + async with response: + json_response = self._extract_response_json(response) + session_response = CheckpointSessionResponse.model_validate(json_response) + return session_response.to_session() + + @distributed_trace_async + async def read(self, session_id: str) -> Optional[CheckpointSession]: + """Read a checkpoint session by ID. + + :param session_id: The session identifier. + :type session_id: str + :return: The checkpoint session if found, None otherwise. + :rtype: Optional[CheckpointSession] + """ + request = self._build_read_request(session_id) + try: + response = await self._send_request(request) + async with response: + json_response = self._extract_response_json(response) + session_response = CheckpointSessionResponse.model_validate(json_response) + return session_response.to_session() + except ResourceNotFoundError: + return None + + @distributed_trace_async + async def delete(self, session_id: str) -> None: + """Delete a checkpoint session. + + :param session_id: The session identifier. + :type session_id: str + """ + request = self._build_delete_request(session_id) + response = await self._send_request(request) + async with response: + pass # No response body expected From 6ff3669fb1873c0c339708487bce86aeb0c4673b Mon Sep 17 00:00:00 2001 From: melionel Date: Fri, 6 Feb 2026 09:40:11 -0800 Subject: [PATCH 89/94] never leave output as null in response complete event (#45053) --- .../models/agent_framework_output_streaming_converter.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 805f3fc79ead..d506476a4cc9 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -459,6 +459,8 @@ def _build_response(self, status: str) -> OpenAIResponse: "created_at": self._response_created_at, "conversation": self._context.get_conversation_object(), } - if status == "completed" and self._completed_output_items: + + # set output even if _completed_output_items is empty, never leave the output as null + if status == "completed": response_data["output"] = self._completed_output_items return OpenAIResponse(response_data) From 2b1ddaba70189bc043470630d91a1c8693b16b06 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty <1634042+ganeshyb@users.noreply.github.com> Date: Fri, 6 Feb 2026 09:40:18 -0800 Subject: [PATCH 90/94] Fix: Handle empty schema type in MCP tool manifests (#45051) Some MCP tool servers (e.g. HuggingFace) return tool manifests where certain parameters have an empty string for their JSON Schema 'type' field. This causes a Pydantic ValidationError when the SDK tries to deserialise the ListFoundryConnectedToolsResponse, because SchemaType only accepts the six standard JSON Schema types. Changes: - SchemaProperty.type is now Optional[SchemaType] with a model_validator that coerces empty type strings to None. - FoundryToolClient._create_ai_function skips parameters whose type is None with a warning log, instead of crashing with an AttributeError. --- .../agentserver/agentframework/_foundry_tools.py | 4 ++++ .../ai/agentserver/core/tools/client/_models.py | 14 +++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py index 64120308a872..f936d32e4ec1 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_foundry_tools.py @@ -71,6 +71,10 @@ def _to_aifunction(self, foundry_tool: "ResolvedFoundryTool") -> AIFunction: # Build field definitions for the Pydantic model field_definitions: Dict[str, Any] = {} for field_name, field_info in properties.items(): + if field_info.type is None: + logger.warning("Skipping field '%s' in tool '%s': unknown or empty schema type.", + field_name, foundry_tool.name) + continue field_type = field_info.type.py_type field_description = field_info.description or "" is_required = field_name in required_fields diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py index bcfc2f5d7e96..6302c6de5ade 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_models.py @@ -272,8 +272,20 @@ class SchemaProperty(BaseModel): keyword; it is *not* “this property is required in a parent object”.) """ - type: SchemaType + type: Optional[SchemaType] = None + """The schema node type (e.g., ``string``, ``object``, ``array``). May be ``None`` + if the upstream tool manifest supplies an empty or unrecognised type string.""" description: Optional[str] = None + + @model_validator(mode="before") + @classmethod + def _coerce_empty_type(cls, data: Any) -> Any: + """Coerce an empty ``type`` string to ``None`` so that properties with + invalid or missing type information are still deserialised instead of + raising a validation error.""" + if isinstance(data, dict) and data.get("type") == "": + data = {**data, "type": None} + return data items: Optional["SchemaProperty"] = None properties: Optional[Mapping[str, "SchemaProperty"]] = None default: Any = None From 8a9d66ccf86097a5e4359eadce3ff118f15f9917 Mon Sep 17 00:00:00 2001 From: Declan Date: Fri, 6 Feb 2026 11:49:17 -0800 Subject: [PATCH 91/94] [Hosted Agents] Implement managed checkpoints feature for Langgraph (#45041) * [Hosted Agents] Implement managed checkpoints feature for Langgraph * misc: preserves all other compile parameters * chore: refactor AgentFramework to remove managed_checkpoints flag * chore: refactor langgraph to remove managed_checkpoints flag --- .../ai/agentserver/agentframework/__init__.py | 26 +- .../agentframework/_workflow_agent_adapter.py | 30 +- .../test_from_agent_framework_managed.py | 80 +-- .../ai/agentserver/langgraph/__init__.py | 23 +- .../langgraph/checkpointer/__init__.py | 8 + .../checkpointer/_foundry_checkpoint_saver.py | 606 ++++++++++++++++++ .../langgraph/checkpointer/_item_id.py | 96 +++ .../tests/unit_tests/checkpointer/__init__.py | 4 + .../test_foundry_checkpoint_saver.py | 441 +++++++++++++ .../unit_tests/checkpointer/test_item_id.py | 125 ++++ .../tests/unit_tests/mocks/__init__.py | 8 + .../mocks/mock_checkpoint_client.py | 156 +++++ .../unit_tests/test_from_langgraph_managed.py | 72 +++ 13 files changed, 1557 insertions(+), 118 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_item_id.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/test_foundry_checkpoint_saver.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/test_item_id.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_from_langgraph_managed.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index 85d0c9584ea9..c4d938ada8d9 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -49,8 +49,6 @@ def from_agent_framework( credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, thread_repository: Optional[AgentThreadRepository] = None, checkpoint_repository: Optional[CheckpointRepository] = None, - managed_checkpoints: bool = False, - project_endpoint: Optional[str] = None, ) -> "AgentFrameworkWorkflowAdapter": """ Create an Agent Framework Workflow Adapter. @@ -68,13 +66,9 @@ def from_agent_framework( :param thread_repository: Optional thread repository for agent thread management. :type thread_repository: Optional[AgentThreadRepository] :param checkpoint_repository: Optional checkpoint repository for workflow checkpointing. + Use ``InMemoryCheckpointRepository``, ``FileCheckpointRepository``, or + ``FoundryCheckpointRepository`` for Azure AI Foundry managed storage. :type checkpoint_repository: Optional[CheckpointRepository] - :param managed_checkpoints: If True, use Azure AI Foundry managed checkpoint storage. - :type managed_checkpoints: bool - :param project_endpoint: The Azure AI Foundry project endpoint. If not provided, - will be read from AZURE_AI_PROJECT_ENDPOINT environment variable. - Example: "https://.services.ai.azure.com/api/projects/" - :type project_endpoint: Optional[str] :return: An instance of AgentFrameworkWorkflowAdapter. :rtype: AgentFrameworkWorkflowAdapter """ @@ -86,8 +80,6 @@ def from_agent_framework( credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, thread_repository: Optional[AgentThreadRepository] = None, checkpoint_repository: Optional[CheckpointRepository] = None, - managed_checkpoints: bool = False, - project_endpoint: Optional[str] = None, ) -> "AgentFrameworkAgent": """ Create an Agent Framework Adapter from either an AgentProtocol/BaseAgent or a @@ -101,19 +93,13 @@ def from_agent_framework( :param thread_repository: Optional thread repository for agent thread management. :type thread_repository: Optional[AgentThreadRepository] :param checkpoint_repository: Optional checkpoint repository for workflow checkpointing. + Use ``InMemoryCheckpointRepository``, ``FileCheckpointRepository``, or + ``FoundryCheckpointRepository`` for Azure AI Foundry managed storage. :type checkpoint_repository: Optional[CheckpointRepository] - :param managed_checkpoints: If True, use Azure AI Foundry managed checkpoint storage. - :type managed_checkpoints: bool - :param project_endpoint: The Azure AI Foundry project endpoint. If not provided, - will be read from AZURE_AI_PROJECT_ENDPOINT environment variable. - Example: "https://.services.ai.azure.com/api/projects/" - :type project_endpoint: Optional[str] :return: An instance of AgentFrameworkAgent. :rtype: AgentFrameworkAgent :raises TypeError: If neither or both of agent and workflow are provided, or if the provided types are incorrect. - :raises ValueError: If managed_checkpoints=True but required parameters are missing, - or if both managed_checkpoints=True and checkpoint_repository are provided. """ if isinstance(agent_or_workflow, WorkflowBuilder): @@ -122,8 +108,6 @@ def from_agent_framework( credentials=credentials, thread_repository=thread_repository, checkpoint_repository=checkpoint_repository, - managed_checkpoints=managed_checkpoints, - project_endpoint=project_endpoint, ) if isinstance(agent_or_workflow, Callable): # type: ignore return AgentFrameworkWorkflowAdapter( @@ -131,8 +115,6 @@ def from_agent_framework( credentials=credentials, thread_repository=thread_repository, checkpoint_repository=checkpoint_repository, - managed_checkpoints=managed_checkpoints, - project_endpoint=project_endpoint, ) # raise TypeError("workflow must be a WorkflowBuilder or callable returning a Workflow") diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py index d5cf61ad62ae..a119d697a377 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_workflow_agent_adapter.py @@ -16,7 +16,7 @@ from azure.core.credentials_async import AsyncTokenCredential from azure.ai.agentserver.core import AgentRunContext -from azure.ai.agentserver.core.logger import get_logger, get_project_endpoint +from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import ( Response as OpenAIResponse, ResponseStreamEvent, @@ -28,7 +28,7 @@ from .models.agent_framework_output_non_streaming_converter import ( AgentFrameworkOutputNonStreamingConverter, ) -from .persistence import AgentThreadRepository, CheckpointRepository, FoundryCheckpointRepository +from .persistence import AgentThreadRepository, CheckpointRepository logger = get_logger() @@ -40,35 +40,9 @@ def __init__( credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, thread_repository: Optional[AgentThreadRepository] = None, checkpoint_repository: Optional[CheckpointRepository] = None, - managed_checkpoints: bool = False, - project_endpoint: Optional[str] = None, ) -> None: super().__init__(credentials, thread_repository) self._workflow_factory = workflow_factory - - # Validate mutual exclusion of managed_checkpoints and checkpoint_repository - if managed_checkpoints and checkpoint_repository is not None: - raise ValueError( - "Cannot use both managed_checkpoints=True and checkpoint_repository. " - "Use managed_checkpoints=True for Azure AI Foundry managed storage, " - "or provide your own checkpoint_repository, but not both." - ) - - # Handle managed checkpoints - if managed_checkpoints: - resolved_endpoint = get_project_endpoint() or project_endpoint - if not resolved_endpoint: - raise ValueError( - "project_endpoint is required when managed_checkpoints=True. " - "Set AZURE_AI_PROJECT_ENDPOINT environment variable or pass project_endpoint parameter." - ) - if not credentials: - raise ValueError("credentials are required when managed_checkpoints=True") - checkpoint_repository = FoundryCheckpointRepository( - project_endpoint=resolved_endpoint, - credential=credentials, - ) - self._checkpoint_repository = checkpoint_repository async def agent_run( # pylint: disable=too-many-statements diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_from_agent_framework_managed.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_from_agent_framework_managed.py index a0cfbea51f6c..c1616c475cb4 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_from_agent_framework_managed.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_from_agent_framework_managed.py @@ -1,93 +1,47 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -"""Unit tests for from_agent_framework with managed checkpoints.""" +"""Unit tests for from_agent_framework with checkpoint repository.""" -import os import pytest -from unittest.mock import Mock, AsyncMock, patch +from unittest.mock import Mock from azure.core.credentials_async import AsyncTokenCredential @pytest.mark.unit -def test_managed_checkpoints_requires_project_endpoint() -> None: - """Test that managed_checkpoints=True requires project_endpoint when env var not set.""" +def test_checkpoint_repository_is_optional() -> None: + """Test that checkpoint_repository is optional and defaults to None.""" from azure.ai.agentserver.agentframework import from_agent_framework from agent_framework import WorkflowBuilder builder = WorkflowBuilder() - mock_credential = Mock(spec=AsyncTokenCredential) - # Ensure environment variable is not set - with patch.dict(os.environ, {}, clear=True): - with pytest.raises(ValueError) as exc_info: - from_agent_framework( - builder, - credentials=mock_credential, - managed_checkpoints=True, - project_endpoint=None, - ) + # Should not raise + adapter = from_agent_framework(builder) - assert "project_endpoint" in str(exc_info.value) + assert adapter is not None @pytest.mark.unit -def test_managed_checkpoints_requires_credentials() -> None: - """Test that managed_checkpoints=True requires credentials.""" +def test_foundry_checkpoint_repository_passed_directly() -> None: + """Test that FoundryCheckpointRepository can be passed via checkpoint_repository.""" from azure.ai.agentserver.agentframework import from_agent_framework + from azure.ai.agentserver.agentframework.persistence import FoundryCheckpointRepository from agent_framework import WorkflowBuilder builder = WorkflowBuilder() + mock_credential = Mock(spec=AsyncTokenCredential) - with pytest.raises(ValueError) as exc_info: - from_agent_framework( - builder, - credentials=None, - managed_checkpoints=True, - project_endpoint="https://test.services.ai.azure.com/api/projects/test-project", - ) - - assert "credentials" in str(exc_info.value) - - -@pytest.mark.unit -def test_managed_checkpoints_false_does_not_require_parameters() -> None: - """Test that managed_checkpoints=False does not require project_endpoint.""" - from azure.ai.agentserver.agentframework import from_agent_framework - from agent_framework import WorkflowBuilder - - builder = WorkflowBuilder() + repo = FoundryCheckpointRepository( + project_endpoint="https://test.services.ai.azure.com/api/projects/test-project", + credential=mock_credential, + ) - # Should not raise adapter = from_agent_framework( builder, - managed_checkpoints=False, + checkpoint_repository=repo, ) assert adapter is not None - - -@pytest.mark.unit -def test_managed_checkpoints_and_checkpoint_repository_are_mutually_exclusive() -> None: - """Test that managed_checkpoints=True and checkpoint_repository cannot be used together.""" - from azure.ai.agentserver.agentframework import from_agent_framework - from azure.ai.agentserver.agentframework.persistence import InMemoryCheckpointRepository - from agent_framework import WorkflowBuilder - - builder = WorkflowBuilder() - mock_credential = Mock(spec=AsyncTokenCredential) - checkpoint_repo = InMemoryCheckpointRepository() - - with pytest.raises(ValueError) as exc_info: - from_agent_framework( - builder, - credentials=mock_credential, - managed_checkpoints=True, - checkpoint_repository=checkpoint_repo, - project_endpoint="https://test.services.ai.azure.com/api/projects/test-project", - ) - - assert "Cannot use both" in str(exc_info.value) - assert "managed_checkpoints" in str(exc_info.value) - assert "checkpoint_repository" in str(exc_info.value) + assert adapter._checkpoint_repository is repo diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index f63eaa05ca0c..a3eef4358564 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) -from typing import Optional, TYPE_CHECKING +from typing import Optional, Union, TYPE_CHECKING from azure.ai.agentserver.core.application import PackageMetadata, set_current_app @@ -12,17 +12,30 @@ from .langgraph import LangGraphAdapter if TYPE_CHECKING: # pragma: no cover + from langgraph.graph.state import CompiledStateGraph from .models.response_api_converter import ResponseAPIConverter from azure.core.credentials_async import AsyncTokenCredential + from azure.core.credentials import TokenCredential def from_langgraph( - agent, + agent: "CompiledStateGraph", /, - credentials: Optional["AsyncTokenCredential"] = None, - converter: Optional["ResponseAPIConverter"] = None + credentials: Optional[Union["AsyncTokenCredential", "TokenCredential"]] = None, + converter: Optional["ResponseAPIConverter"] = None, ) -> "LangGraphAdapter": - + """Create a LangGraph adapter for Azure AI Agent Server. + + :param agent: The compiled LangGraph state graph. To use persistent checkpointing, + compile the graph with a checkpointer via ``builder.compile(checkpointer=saver)``. + :type agent: CompiledStateGraph + :param credentials: Azure credentials for authentication. + :type credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] + :param converter: Custom response converter. + :type converter: Optional[ResponseAPIConverter] + :return: A LangGraphAdapter instance. + :rtype: LangGraphAdapter + """ return LangGraphAdapter(agent, credentials=credentials, converter=converter) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/__init__.py new file mode 100644 index 000000000000..9e91582733d3 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/__init__.py @@ -0,0 +1,8 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Checkpoint saver implementations for LangGraph with Azure AI Foundry.""" + +from ._foundry_checkpoint_saver import FoundryCheckpointSaver + +__all__ = ["FoundryCheckpointSaver"] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py new file mode 100644 index 000000000000..abfeb4bf3fa8 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py @@ -0,0 +1,606 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Foundry-backed checkpoint saver for LangGraph.""" + +import logging +from contextlib import AbstractAsyncContextManager +from types import TracebackType +from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Sequence, Tuple, Union + +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential +from langchain_core.runnables import RunnableConfig +from langgraph.checkpoint.base import ( + BaseCheckpointSaver, + ChannelVersions, + Checkpoint, + CheckpointMetadata, + CheckpointTuple, + SerializerProtocol, + get_checkpoint_id, +) + +from azure.ai.agentserver.core.checkpoints.client import ( + CheckpointItem, + CheckpointItemId, + CheckpointSession, + FoundryCheckpointClient, +) + +from ._item_id import ItemType, ParsedItemId, make_item_id, parse_item_id + +logger = logging.getLogger(__name__) + + +class FoundryCheckpointSaver( + BaseCheckpointSaver[str], AbstractAsyncContextManager["FoundryCheckpointSaver"] +): + """Checkpoint saver backed by Azure AI Foundry checkpoint storage. + + Implements LangGraph's BaseCheckpointSaver interface using the + FoundryCheckpointClient for remote storage. + + This saver only supports async operations. Sync methods will raise + NotImplementedError. + + :param project_endpoint: The Azure AI Foundry project endpoint URL. + Example: "https://.services.ai.azure.com/api/projects/" + :type project_endpoint: str + :param credential: Credential for authentication. Must be an async credential. + :type credential: Union[AsyncTokenCredential, TokenCredential] + :param serde: Optional serializer protocol. Defaults to JsonPlusSerializer. + :type serde: Optional[SerializerProtocol] + + Example:: + + from azure.ai.agentserver.langgraph.checkpointer import FoundryCheckpointSaver + from azure.identity.aio import DefaultAzureCredential + + saver = FoundryCheckpointSaver( + project_endpoint="https://myresource.services.ai.azure.com/api/projects/my-project", + credential=DefaultAzureCredential(), + ) + + # Use with LangGraph + graph = builder.compile(checkpointer=saver) + """ + + def __init__( + self, + project_endpoint: str, + credential: Union[AsyncTokenCredential, TokenCredential], + *, + serde: Optional[SerializerProtocol] = None, + ) -> None: + """Initialize the Foundry checkpoint saver. + + :param project_endpoint: The Azure AI Foundry project endpoint URL. + :type project_endpoint: str + :param credential: Credential for authentication. Must be an async credential. + :type credential: Union[AsyncTokenCredential, TokenCredential] + :param serde: Optional serializer protocol. + :type serde: Optional[SerializerProtocol] + :raises TypeError: If credential is not an AsyncTokenCredential. + """ + super().__init__(serde=serde) + if not isinstance(credential, AsyncTokenCredential): + raise TypeError( + "FoundryCheckpointSaver requires an AsyncTokenCredential. " + "Please use an async credential like DefaultAzureCredential from azure.identity.aio." + ) + self._client = FoundryCheckpointClient(project_endpoint, credential) + self._session_cache: set[str] = set() + + async def __aenter__(self) -> "FoundryCheckpointSaver": + """Enter the async context manager. + + :return: The saver instance. + :rtype: FoundryCheckpointSaver + """ + await self._client.__aenter__() + return self + + async def __aexit__( + self, + exc_type: Optional[type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + """Exit the async context manager. + + :param exc_type: Exception type if an exception occurred. + :param exc_val: Exception value if an exception occurred. + :param exc_tb: Exception traceback if an exception occurred. + """ + await self._client.__aexit__(exc_type, exc_val, exc_tb) + + async def _ensure_session(self, thread_id: str) -> None: + """Ensure a session exists for the thread. + + :param thread_id: The thread identifier. + :type thread_id: str + """ + if thread_id not in self._session_cache: + session = CheckpointSession(session_id=thread_id) + await self._client.upsert_session(session) + self._session_cache.add(thread_id) + + async def _get_latest_checkpoint_id( + self, thread_id: str, checkpoint_ns: str + ) -> Optional[str]: + """Find the latest checkpoint ID for a thread and namespace. + + :param thread_id: The thread identifier. + :type thread_id: str + :param checkpoint_ns: The checkpoint namespace. + :type checkpoint_ns: str + :return: The latest checkpoint ID, or None if not found. + :rtype: Optional[str] + """ + item_ids = await self._client.list_item_ids(thread_id) + + # Filter to checkpoint items in this namespace + checkpoint_ids: List[str] = [] + for item_id in item_ids: + try: + parsed = parse_item_id(item_id.item_id) + if parsed.item_type == "checkpoint" and parsed.checkpoint_ns == checkpoint_ns: + checkpoint_ids.append(parsed.checkpoint_id) + except ValueError: + continue + + if not checkpoint_ids: + return None + + # Return the latest (max) checkpoint ID + return max(checkpoint_ids) + + async def _load_pending_writes( + self, thread_id: str, checkpoint_ns: str, checkpoint_id: str + ) -> List[Tuple[str, str, Any]]: + """Load pending writes for a checkpoint. + + :param thread_id: The thread identifier. + :type thread_id: str + :param checkpoint_ns: The checkpoint namespace. + :type checkpoint_ns: str + :param checkpoint_id: The checkpoint identifier. + :type checkpoint_id: str + :return: List of pending writes as (task_id, channel, value) tuples. + :rtype: List[Tuple[str, str, Any]] + """ + item_ids = await self._client.list_item_ids(thread_id) + writes: List[Tuple[str, str, Any]] = [] + + for item_id in item_ids: + try: + parsed = parse_item_id(item_id.item_id) + if ( + parsed.item_type == "writes" + and parsed.checkpoint_ns == checkpoint_ns + and parsed.checkpoint_id == checkpoint_id + ): + item = await self._client.read_item(item_id) + if item: + task_id, channel, value, _ = self.serde.loads_typed(item.data) + writes.append((task_id, channel, value)) + except (ValueError, TypeError): + continue + + return writes + + async def _load_blobs( + self, thread_id: str, checkpoint_ns: str, checkpoint_id: str, versions: ChannelVersions + ) -> Dict[str, Any]: + """Load channel blobs for a checkpoint. + + :param thread_id: The thread identifier. + :type thread_id: str + :param checkpoint_ns: The checkpoint namespace. + :type checkpoint_ns: str + :param checkpoint_id: The checkpoint identifier. + :type checkpoint_id: str + :param versions: The channel versions to load. + :type versions: ChannelVersions + :return: Dictionary of channel values. + :rtype: Dict[str, Any] + """ + channel_values: Dict[str, Any] = {} + + for channel, version in versions.items(): + blob_item_id = make_item_id( + checkpoint_ns, checkpoint_id, "blob", f"{channel}:{version}" + ) + item_id = CheckpointItemId(session_id=thread_id, item_id=blob_item_id) + item = await self._client.read_item(item_id) + if item: + type_tag, data = self.serde.loads_typed(item.data) + if type_tag != "empty": + channel_values[channel] = data + + return channel_values + + # Async methods (primary implementation) + + async def aget_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: + """Asynchronously get a checkpoint tuple by config. + + :param config: Configuration specifying which checkpoint to retrieve. + :type config: RunnableConfig + :return: The checkpoint tuple, or None if not found. + :rtype: Optional[CheckpointTuple] + """ + thread_id: str = config["configurable"]["thread_id"] + checkpoint_ns: str = config["configurable"].get("checkpoint_ns", "") + checkpoint_id = get_checkpoint_id(config) + + # Ensure session exists + await self._ensure_session(thread_id) + + # If no checkpoint_id, find the latest + if not checkpoint_id: + checkpoint_id = await self._get_latest_checkpoint_id(thread_id, checkpoint_ns) + if not checkpoint_id: + return None + + # Load the checkpoint item + item_id_str = make_item_id(checkpoint_ns, checkpoint_id, "checkpoint") + item = await self._client.read_item( + CheckpointItemId(session_id=thread_id, item_id=item_id_str) + ) + if not item: + return None + + # Deserialize checkpoint data + checkpoint_data = self.serde.loads_typed(item.data) + checkpoint: Checkpoint = checkpoint_data["checkpoint"] + metadata: CheckpointMetadata = checkpoint_data["metadata"] + + # Load channel values (blobs) + channel_values = await self._load_blobs( + thread_id, checkpoint_ns, checkpoint_id, checkpoint.get("channel_versions", {}) + ) + checkpoint = {**checkpoint, "channel_values": channel_values} + + # Load pending writes + pending_writes = await self._load_pending_writes(thread_id, checkpoint_ns, checkpoint_id) + + # Build parent config if parent exists + parent_config: Optional[RunnableConfig] = None + if item.parent_id: + try: + parent_parsed = parse_item_id(item.parent_id) + parent_config = { + "configurable": { + "thread_id": thread_id, + "checkpoint_ns": parent_parsed.checkpoint_ns, + "checkpoint_id": parent_parsed.checkpoint_id, + } + } + except ValueError: + pass + + return CheckpointTuple( + config={ + "configurable": { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": checkpoint_id, + } + }, + checkpoint=checkpoint, + metadata=metadata, + parent_config=parent_config, + pending_writes=pending_writes, + ) + + async def aput( + self, + config: RunnableConfig, + checkpoint: Checkpoint, + metadata: CheckpointMetadata, + new_versions: ChannelVersions, + ) -> RunnableConfig: + """Asynchronously store a checkpoint. + + :param config: Configuration for the checkpoint. + :type config: RunnableConfig + :param checkpoint: The checkpoint to store. + :type checkpoint: Checkpoint + :param metadata: Additional metadata for the checkpoint. + :type metadata: CheckpointMetadata + :param new_versions: New channel versions as of this write. + :type new_versions: ChannelVersions + :return: Updated configuration with the checkpoint ID. + :rtype: RunnableConfig + """ + thread_id = config["configurable"]["thread_id"] + checkpoint_ns = config["configurable"].get("checkpoint_ns", "") + checkpoint_id = checkpoint["id"] + + # Ensure session exists + await self._ensure_session(thread_id) + + # Determine parent + parent_checkpoint_id = config["configurable"].get("checkpoint_id") + parent_item_id: Optional[str] = None + if parent_checkpoint_id: + parent_item_id = make_item_id(checkpoint_ns, parent_checkpoint_id, "checkpoint") + + # Prepare checkpoint data (without channel_values - stored as blobs) + checkpoint_copy = checkpoint.copy() + channel_values: Dict[str, Any] = checkpoint_copy.pop("channel_values", {}) # type: ignore[misc] + + checkpoint_data = self.serde.dumps_typed({ + "checkpoint": checkpoint_copy, + "metadata": metadata, + }) + + # Create checkpoint item + item_id_str = make_item_id(checkpoint_ns, checkpoint_id, "checkpoint") + items: List[CheckpointItem] = [ + CheckpointItem( + session_id=thread_id, + item_id=item_id_str, + data=checkpoint_data, + parent_id=parent_item_id, + ) + ] + + # Create blob items for channel values with new versions + for channel, version in new_versions.items(): + if channel in channel_values: + blob_data = self.serde.dumps_typed(channel_values[channel]) + else: + blob_data = self.serde.dumps_typed(("empty", b"")) + + blob_item_id = make_item_id( + checkpoint_ns, checkpoint_id, "blob", f"{channel}:{version}" + ) + items.append( + CheckpointItem( + session_id=thread_id, + item_id=blob_item_id, + data=blob_data, + parent_id=item_id_str, + ) + ) + + await self._client.create_items(items) + + logger.debug( + "Saved checkpoint %s to Foundry session %s", + checkpoint_id, + thread_id, + ) + + return { + "configurable": { + "thread_id": thread_id, + "checkpoint_ns": checkpoint_ns, + "checkpoint_id": checkpoint_id, + } + } + + async def aput_writes( + self, + config: RunnableConfig, + writes: Sequence[Tuple[str, Any]], + task_id: str, + task_path: str = "", + ) -> None: + """Asynchronously store intermediate writes for a checkpoint. + + :param config: Configuration of the related checkpoint. + :type config: RunnableConfig + :param writes: List of writes to store as (channel, value) pairs. + :type writes: Sequence[Tuple[str, Any]] + :param task_id: Identifier for the task creating the writes. + :type task_id: str + :param task_path: Path of the task creating the writes. + :type task_path: str + """ + thread_id = config["configurable"]["thread_id"] + checkpoint_ns = config["configurable"].get("checkpoint_ns", "") + checkpoint_id = config["configurable"]["checkpoint_id"] + + checkpoint_item_id = make_item_id(checkpoint_ns, checkpoint_id, "checkpoint") + + items: List[CheckpointItem] = [] + for idx, (channel, value) in enumerate(writes): + write_data = self.serde.dumps_typed((task_id, channel, value, task_path)) + write_item_id = make_item_id( + checkpoint_ns, checkpoint_id, "writes", f"{task_id}:{idx}" + ) + items.append( + CheckpointItem( + session_id=thread_id, + item_id=write_item_id, + data=write_data, + parent_id=checkpoint_item_id, + ) + ) + + if items: + await self._client.create_items(items) + logger.debug( + "Saved %d writes for checkpoint %s", + len(items), + checkpoint_id, + ) + + async def alist( + self, + config: Optional[RunnableConfig], + *, + filter: Optional[Dict[str, Any]] = None, + before: Optional[RunnableConfig] = None, + limit: Optional[int] = None, + ) -> AsyncIterator[CheckpointTuple]: + """Asynchronously list checkpoints matching filter criteria. + + :param config: Base configuration for filtering checkpoints. + :type config: Optional[RunnableConfig] + :param filter: Additional filtering criteria for metadata. + :type filter: Optional[Dict[str, Any]] + :param before: List checkpoints created before this configuration. + :type before: Optional[RunnableConfig] + :param limit: Maximum number of checkpoints to return. + :type limit: Optional[int] + :return: Async iterator of matching checkpoint tuples. + :rtype: AsyncIterator[CheckpointTuple] + """ + if not config: + return + + thread_id = config["configurable"]["thread_id"] + checkpoint_ns = config["configurable"].get("checkpoint_ns") + + # Get all items for this session + item_ids = await self._client.list_item_ids(thread_id) + + # Filter to checkpoint items only + checkpoint_items: List[Tuple[ParsedItemId, CheckpointItemId]] = [] + for item_id in item_ids: + try: + parsed = parse_item_id(item_id.item_id) + if parsed.item_type == "checkpoint": + # Filter by namespace if specified + if checkpoint_ns is None or parsed.checkpoint_ns == checkpoint_ns: + checkpoint_items.append((parsed, item_id)) + except ValueError: + continue + + # Sort by checkpoint_id in reverse order (newest first) + checkpoint_items.sort(key=lambda x: x[0].checkpoint_id, reverse=True) + + # Apply before cursor + if before: + before_id = get_checkpoint_id(before) + if before_id: + checkpoint_items = [ + (p, i) for p, i in checkpoint_items if p.checkpoint_id < before_id + ] + + # Apply limit + if limit: + checkpoint_items = checkpoint_items[:limit] + + # Load and yield each checkpoint + for parsed, _ in checkpoint_items: + tuple_config: RunnableConfig = { + "configurable": { + "thread_id": thread_id, + "checkpoint_ns": parsed.checkpoint_ns, + "checkpoint_id": parsed.checkpoint_id, + } + } + checkpoint_tuple = await self.aget_tuple(tuple_config) + if checkpoint_tuple: + # Apply metadata filter if provided + if filter: + if not all( + checkpoint_tuple.metadata.get(k) == v for k, v in filter.items() + ): + continue + yield checkpoint_tuple + + async def adelete_thread(self, thread_id: str) -> None: + """Delete all checkpoints and writes for a thread. + + :param thread_id: The thread ID whose checkpoints should be deleted. + :type thread_id: str + """ + await self._client.delete_session(thread_id) + self._session_cache.discard(thread_id) + logger.debug("Deleted session %s", thread_id) + + # Sync methods (raise NotImplementedError) + + def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: + """Sync version not supported - use aget_tuple instead. + + :raises NotImplementedError: Always raised. + """ + raise NotImplementedError( + "FoundryCheckpointSaver requires async usage. Use aget_tuple() instead." + ) + + def list( + self, + config: Optional[RunnableConfig], + *, + filter: Optional[Dict[str, Any]] = None, + before: Optional[RunnableConfig] = None, + limit: Optional[int] = None, + ) -> Iterator[CheckpointTuple]: + """Sync version not supported - use alist instead. + + :raises NotImplementedError: Always raised. + """ + raise NotImplementedError( + "FoundryCheckpointSaver requires async usage. Use alist() instead." + ) + + def put( + self, + config: RunnableConfig, + checkpoint: Checkpoint, + metadata: CheckpointMetadata, + new_versions: ChannelVersions, + ) -> RunnableConfig: + """Sync version not supported - use aput instead. + + :raises NotImplementedError: Always raised. + """ + raise NotImplementedError( + "FoundryCheckpointSaver requires async usage. Use aput() instead." + ) + + def put_writes( + self, + config: RunnableConfig, + writes: Sequence[Tuple[str, Any]], + task_id: str, + task_path: str = "", + ) -> None: + """Sync version not supported - use aput_writes instead. + + :raises NotImplementedError: Always raised. + """ + raise NotImplementedError( + "FoundryCheckpointSaver requires async usage. Use aput_writes() instead." + ) + + def delete_thread(self, thread_id: str) -> None: + """Sync version not supported - use adelete_thread instead. + + :raises NotImplementedError: Always raised. + """ + raise NotImplementedError( + "FoundryCheckpointSaver requires async usage. Use adelete_thread() instead." + ) + + def get_next_version(self, current: Optional[str], channel: None) -> str: + """Generate the next version ID for a channel. + + Uses string versions with format "{counter}.{random}". + + :param current: The current version identifier. + :type current: Optional[str] + :param channel: Deprecated argument, kept for backwards compatibility. + :return: The next version identifier. + :rtype: str + """ + import random as rand + + if current is None: + current_v = 0 + elif isinstance(current, int): + current_v = current + else: + current_v = int(current.split(".")[0]) + next_v = current_v + 1 + next_h = rand.random() + return f"{next_v:032}.{next_h:016}" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_item_id.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_item_id.py new file mode 100644 index 000000000000..8758181ce2f2 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_item_id.py @@ -0,0 +1,96 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Item ID utilities for composite checkpoint item identifiers.""" + +from dataclasses import dataclass +from typing import Literal + +ItemType = Literal["checkpoint", "writes", "blob"] + + +@dataclass +class ParsedItemId: + """Parsed components of a checkpoint item ID. + + :ivar checkpoint_ns: The checkpoint namespace. + :ivar checkpoint_id: The checkpoint identifier. + :ivar item_type: The type of item (checkpoint, writes, or blob). + :ivar sub_key: Additional key for writes or blobs. + """ + + checkpoint_ns: str + checkpoint_id: str + item_type: ItemType + sub_key: str + + +def _encode(s: str) -> str: + """URL-safe encode a string (escape colons and percent signs). + + :param s: The string to encode. + :type s: str + :return: The encoded string. + :rtype: str + """ + return s.replace("%", "%25").replace(":", "%3A") + + +def _decode(s: str) -> str: + """Decode a URL-safe encoded string. + + :param s: The encoded string. + :type s: str + :return: The decoded string. + :rtype: str + """ + return s.replace("%3A", ":").replace("%25", "%") + + +def make_item_id( + checkpoint_ns: str, + checkpoint_id: str, + item_type: ItemType, + sub_key: str = "", +) -> str: + """Create a composite item ID. + + Format: {checkpoint_ns}:{checkpoint_id}:{type}:{sub_key} + + :param checkpoint_ns: The checkpoint namespace. + :type checkpoint_ns: str + :param checkpoint_id: The checkpoint identifier. + :type checkpoint_id: str + :param item_type: The type of item (checkpoint, writes, or blob). + :type item_type: ItemType + :param sub_key: Additional key for writes or blobs. + :type sub_key: str + :return: The composite item ID. + :rtype: str + """ + return f"{_encode(checkpoint_ns)}:{_encode(checkpoint_id)}:{item_type}:{_encode(sub_key)}" + + +def parse_item_id(item_id: str) -> ParsedItemId: + """Parse a composite item ID back to components. + + :param item_id: The composite item ID to parse. + :type item_id: str + :return: The parsed item ID components. + :rtype: ParsedItemId + :raises ValueError: If the item ID format is invalid. + """ + parts = item_id.split(":", 3) + if len(parts) != 4: + raise ValueError(f"Invalid item_id format: {item_id}") + + item_type = parts[2] + if item_type not in ("checkpoint", "writes", "blob"): + raise ValueError(f"Invalid item_type in item_id: {item_type}") + + return ParsedItemId( + checkpoint_ns=_decode(parts[0]), + checkpoint_id=_decode(parts[1]), + item_type=item_type, # type: ignore[arg-type] + sub_key=_decode(parts[3]), + ) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/__init__.py new file mode 100644 index 000000000000..315126869940 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for the checkpointer module.""" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/test_foundry_checkpoint_saver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/test_foundry_checkpoint_saver.py new file mode 100644 index 000000000000..d25a34f6289d --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/test_foundry_checkpoint_saver.py @@ -0,0 +1,441 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for FoundryCheckpointSaver.""" + +import pytest +from unittest.mock import Mock + +from azure.core.credentials import TokenCredential +from azure.core.credentials_async import AsyncTokenCredential + +from azure.ai.agentserver.langgraph.checkpointer import FoundryCheckpointSaver +from azure.ai.agentserver.langgraph.checkpointer._foundry_checkpoint_saver import ( + BaseCheckpointSaver, +) +from azure.ai.agentserver.langgraph.checkpointer._item_id import make_item_id + +from ..mocks import MockFoundryCheckpointClient + + +class TestableFoundryCheckpointSaver(FoundryCheckpointSaver): + """Testable version that accepts a mock client directly (bypasses credential check).""" + + def __init__(self, client: MockFoundryCheckpointClient) -> None: + """Initialize with a mock client.""" + # Skip FoundryCheckpointSaver.__init__ and call BaseCheckpointSaver directly + BaseCheckpointSaver.__init__(self, serde=None) + self._client = client # type: ignore[assignment] + self._session_cache: set[str] = set() + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_aget_tuple_returns_none_for_missing_checkpoint() -> None: + """Test that aget_tuple returns None when checkpoint doesn't exist.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + config = {"configurable": {"thread_id": "thread-1"}} + result = await saver.aget_tuple(config) + + assert result is None + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_aput_creates_checkpoint_item() -> None: + """Test that aput creates a checkpoint item.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": ""}} + checkpoint = { + "id": "cp-001", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + metadata = {"source": "test"} + + result = await saver.aput(config, checkpoint, metadata, {}) + + assert result["configurable"]["checkpoint_id"] == "cp-001" + assert result["configurable"]["thread_id"] == "thread-1" + + # Verify item was created + item_ids = await client.list_item_ids("thread-1") + assert len(item_ids) == 1 + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_aput_creates_blob_items_for_new_versions() -> None: + """Test that aput creates blob items for channel values.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": ""}} + checkpoint = { + "id": "cp-001", + "channel_values": {"messages": ["hello", "world"]}, + "channel_versions": {"messages": "1"}, + "versions_seen": {}, + "pending_sends": [], + } + metadata = {"source": "test"} + new_versions = {"messages": "1"} + + await saver.aput(config, checkpoint, metadata, new_versions) + + # Should have 2 items: checkpoint + 1 blob + item_ids = await client.list_item_ids("thread-1") + assert len(item_ids) == 2 + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_aput_returns_config_with_checkpoint_id() -> None: + """Test that aput returns config with the correct checkpoint ID.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": "ns1"}} + checkpoint = { + "id": "my-checkpoint-id", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + + result = await saver.aput(config, checkpoint, {}, {}) + + assert result["configurable"]["checkpoint_id"] == "my-checkpoint-id" + assert result["configurable"]["thread_id"] == "thread-1" + assert result["configurable"]["checkpoint_ns"] == "ns1" + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_aget_tuple_returns_checkpoint_with_data() -> None: + """Test that aget_tuple returns checkpoint data correctly.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + # Save a checkpoint first + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": ""}} + checkpoint = { + "id": "cp-001", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + metadata = {"source": "test", "step": 1} + + await saver.aput(config, checkpoint, metadata, {}) + + # Now retrieve it + get_config = {"configurable": {"thread_id": "thread-1", "checkpoint_id": "cp-001"}} + result = await saver.aget_tuple(get_config) + + assert result is not None + assert result.checkpoint["id"] == "cp-001" + assert result.metadata["source"] == "test" + assert result.metadata["step"] == 1 + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_aget_tuple_returns_latest_without_checkpoint_id() -> None: + """Test that aget_tuple returns the latest checkpoint when no ID specified.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + # Save multiple checkpoints + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": ""}} + + for i in range(3): + checkpoint = { + "id": f"cp-00{i}", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + config = await saver.aput(config, checkpoint, {"step": i}, {}) + + # Retrieve without specifying checkpoint_id + get_config = {"configurable": {"thread_id": "thread-1"}} + result = await saver.aget_tuple(get_config) + + assert result is not None + # Should get the latest (max checkpoint_id) + assert result.checkpoint["id"] == "cp-002" + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_aput_writes_creates_write_items() -> None: + """Test that aput_writes creates write items.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + # First create a checkpoint + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": ""}} + checkpoint = { + "id": "cp-001", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + config = await saver.aput(config, checkpoint, {}, {}) + + # Now add writes + writes = [("channel1", "value1"), ("channel2", "value2")] + await saver.aput_writes(config, writes, task_id="task-1") + + # Should have 3 items: checkpoint + 2 writes + item_ids = await client.list_item_ids("thread-1") + assert len(item_ids) == 3 + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_aget_tuple_returns_pending_writes() -> None: + """Test that aget_tuple includes pending writes.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + # Create checkpoint and add writes + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": ""}} + checkpoint = { + "id": "cp-001", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + config = await saver.aput(config, checkpoint, {}, {}) + + writes = [("channel1", "value1")] + await saver.aput_writes(config, writes, task_id="task-1") + + # Retrieve and check pending writes + result = await saver.aget_tuple(config) + + assert result is not None + assert result.pending_writes is not None + assert len(result.pending_writes) == 1 + assert result.pending_writes[0][1] == "channel1" + assert result.pending_writes[0][2] == "value1" + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_alist_returns_checkpoints_in_order() -> None: + """Test that alist returns checkpoints in reverse order.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + # Save multiple checkpoints + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": ""}} + for i in range(3): + checkpoint = { + "id": f"cp-00{i}", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + config = await saver.aput(config, checkpoint, {"step": i}, {}) + + # List checkpoints + list_config = {"configurable": {"thread_id": "thread-1"}} + results = [] + async for cp in saver.alist(list_config): + results.append(cp) + + assert len(results) == 3 + # Should be in reverse order (newest first) + assert results[0].checkpoint["id"] == "cp-002" + assert results[1].checkpoint["id"] == "cp-001" + assert results[2].checkpoint["id"] == "cp-000" + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_alist_filters_by_namespace() -> None: + """Test that alist filters by checkpoint namespace.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + # Save checkpoints in different namespaces + for ns in ["ns1", "ns2"]: + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": ns}} + checkpoint = { + "id": f"cp-{ns}", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + await saver.aput(config, checkpoint, {}, {}) + + # List only ns1 + list_config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": "ns1"}} + results = [] + async for cp in saver.alist(list_config): + results.append(cp) + + assert len(results) == 1 + assert results[0].checkpoint["id"] == "cp-ns1" + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_alist_applies_limit() -> None: + """Test that alist respects the limit parameter.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + # Save multiple checkpoints + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": ""}} + for i in range(5): + checkpoint = { + "id": f"cp-00{i}", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + config = await saver.aput(config, checkpoint, {}, {}) + + # List with limit + list_config = {"configurable": {"thread_id": "thread-1"}} + results = [] + async for cp in saver.alist(list_config, limit=2): + results.append(cp) + + assert len(results) == 2 + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_adelete_thread_deletes_session() -> None: + """Test that adelete_thread removes all checkpoints for a thread.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + # Create a checkpoint + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": ""}} + checkpoint = { + "id": "cp-001", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + await saver.aput(config, checkpoint, {}, {}) + + # Verify it exists + item_ids = await client.list_item_ids("thread-1") + assert len(item_ids) == 1 + + # Delete the thread + await saver.adelete_thread("thread-1") + + # Verify it's gone + item_ids = await client.list_item_ids("thread-1") + assert len(item_ids) == 0 + + +@pytest.mark.unit +def test_sync_methods_raise_not_implemented() -> None: + """Test that sync methods raise NotImplementedError.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + config = {"configurable": {"thread_id": "thread-1"}} + checkpoint = {"id": "cp-001", "channel_values": {}, "channel_versions": {}} + + with pytest.raises(NotImplementedError, match="aget_tuple"): + saver.get_tuple(config) + + with pytest.raises(NotImplementedError, match="aput"): + saver.put(config, checkpoint, {}, {}) + + with pytest.raises(NotImplementedError, match="aput_writes"): + saver.put_writes(config, [], "task-1") + + with pytest.raises(NotImplementedError, match="alist"): + list(saver.list(config)) + + with pytest.raises(NotImplementedError, match="adelete_thread"): + saver.delete_thread("thread-1") + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_aget_tuple_returns_parent_config() -> None: + """Test that aget_tuple includes parent config when checkpoint has parent.""" + client = MockFoundryCheckpointClient() + saver = TestableFoundryCheckpointSaver(client=client) + + # Save parent checkpoint + config = {"configurable": {"thread_id": "thread-1", "checkpoint_ns": ""}} + parent_checkpoint = { + "id": "cp-001", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + config = await saver.aput(config, parent_checkpoint, {}, {}) + + # Save child checkpoint + child_checkpoint = { + "id": "cp-002", + "channel_values": {}, + "channel_versions": {}, + "versions_seen": {}, + "pending_sends": [], + } + await saver.aput(config, child_checkpoint, {}, {}) + + # Retrieve child + get_config = {"configurable": {"thread_id": "thread-1", "checkpoint_id": "cp-002"}} + result = await saver.aget_tuple(get_config) + + assert result is not None + assert result.parent_config is not None + assert result.parent_config["configurable"]["checkpoint_id"] == "cp-001" + + +@pytest.mark.unit +def test_constructor_requires_async_credential() -> None: + """Test that FoundryCheckpointSaver raises TypeError for sync credentials.""" + mock_credential = Mock(spec=TokenCredential) + + with pytest.raises(TypeError, match="AsyncTokenCredential"): + FoundryCheckpointSaver( + project_endpoint="https://test.services.ai.azure.com/api/projects/test", + credential=mock_credential, + ) + + +@pytest.mark.unit +def test_constructor_accepts_async_credential() -> None: + """Test that FoundryCheckpointSaver accepts AsyncTokenCredential.""" + mock_credential = Mock(spec=AsyncTokenCredential) + + saver = FoundryCheckpointSaver( + project_endpoint="https://test.services.ai.azure.com/api/projects/test", + credential=mock_credential, + ) + + assert saver is not None + assert saver._client is not None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/test_item_id.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/test_item_id.py new file mode 100644 index 000000000000..ddb11ffa62f0 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/checkpointer/test_item_id.py @@ -0,0 +1,125 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for item ID utilities.""" + +import pytest + +from azure.ai.agentserver.langgraph.checkpointer._item_id import ( + ParsedItemId, + make_item_id, + parse_item_id, +) + + +@pytest.mark.unit +def test_make_item_id_formats_correctly() -> None: + """Test that make_item_id creates correct composite IDs.""" + item_id = make_item_id("ns1", "cp-001", "checkpoint") + assert item_id == "ns1:cp-001:checkpoint:" + + +@pytest.mark.unit +def test_make_item_id_with_sub_key() -> None: + """Test that make_item_id includes sub_key correctly.""" + item_id = make_item_id("ns1", "cp-001", "writes", "task1:0") + assert item_id == "ns1:cp-001:writes:task1%3A0" + + +@pytest.mark.unit +def test_make_item_id_with_blob() -> None: + """Test blob item ID format.""" + item_id = make_item_id("", "cp-001", "blob", "messages:v2") + assert item_id == ":cp-001:blob:messages%3Av2" + + +@pytest.mark.unit +def test_parse_item_id_extracts_components() -> None: + """Test that parse_item_id extracts all components correctly.""" + item_id = "ns1:cp-001:checkpoint:" + parsed = parse_item_id(item_id) + + assert parsed.checkpoint_ns == "ns1" + assert parsed.checkpoint_id == "cp-001" + assert parsed.item_type == "checkpoint" + assert parsed.sub_key == "" + + +@pytest.mark.unit +def test_parse_item_id_extracts_sub_key() -> None: + """Test that parse_item_id extracts sub_key correctly.""" + item_id = "ns1:cp-001:writes:task1%3A0" + parsed = parse_item_id(item_id) + + assert parsed.checkpoint_ns == "ns1" + assert parsed.checkpoint_id == "cp-001" + assert parsed.item_type == "writes" + assert parsed.sub_key == "task1:0" + + +@pytest.mark.unit +def test_roundtrip_simple() -> None: + """Test roundtrip encoding/decoding of simple IDs.""" + original_ns = "namespace" + original_id = "checkpoint-123" + original_type = "checkpoint" + original_key = "" + + item_id = make_item_id(original_ns, original_id, original_type, original_key) + parsed = parse_item_id(item_id) + + assert parsed.checkpoint_ns == original_ns + assert parsed.checkpoint_id == original_id + assert parsed.item_type == original_type + assert parsed.sub_key == original_key + + +@pytest.mark.unit +def test_roundtrip_with_special_characters() -> None: + """Test roundtrip encoding/decoding with special characters (colons).""" + original_ns = "ns:with:colons" + original_id = "cp:123:abc" + original_type = "blob" + original_key = "channel:v1:extra" + + item_id = make_item_id(original_ns, original_id, original_type, original_key) + parsed = parse_item_id(item_id) + + assert parsed.checkpoint_ns == original_ns + assert parsed.checkpoint_id == original_id + assert parsed.item_type == original_type + assert parsed.sub_key == original_key + + +@pytest.mark.unit +def test_roundtrip_with_percent_signs() -> None: + """Test roundtrip encoding/decoding with percent signs.""" + original_ns = "ns%test" + original_id = "cp%123" + original_type = "checkpoint" + original_key = "key%value" + + item_id = make_item_id(original_ns, original_id, original_type, original_key) + parsed = parse_item_id(item_id) + + assert parsed.checkpoint_ns == original_ns + assert parsed.checkpoint_id == original_id + assert parsed.item_type == original_type + assert parsed.sub_key == original_key + + +@pytest.mark.unit +def test_parse_item_id_raises_on_invalid_format() -> None: + """Test that parse_item_id raises ValueError for invalid format.""" + with pytest.raises(ValueError, match="Invalid item_id format"): + parse_item_id("invalid:format") + + with pytest.raises(ValueError, match="Invalid item_id format"): + parse_item_id("only:two:parts") + + +@pytest.mark.unit +def test_parse_item_id_raises_on_invalid_type() -> None: + """Test that parse_item_id raises ValueError for invalid item type.""" + with pytest.raises(ValueError, match="Invalid item_type"): + parse_item_id("ns:cp:invalid:key") diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/__init__.py new file mode 100644 index 000000000000..4436d04866df --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/__init__.py @@ -0,0 +1,8 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Mock implementations for testing.""" + +from .mock_checkpoint_client import MockFoundryCheckpointClient + +__all__ = ["MockFoundryCheckpointClient"] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py new file mode 100644 index 000000000000..ffc1e2fcc4c1 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py @@ -0,0 +1,156 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Mock implementation of FoundryCheckpointClient for testing.""" + +from typing import Any, Dict, List, Optional + +from azure.ai.agentserver.core.checkpoints.client import ( + CheckpointItem, + CheckpointItemId, + CheckpointSession, +) + + +class MockFoundryCheckpointClient: + """In-memory mock for FoundryCheckpointClient for unit testing. + + Stores checkpoints in memory without making any HTTP calls. + """ + + def __init__(self, endpoint: str = "https://mock.endpoint") -> None: + """Initialize the mock client. + + :param endpoint: The mock endpoint URL. + :type endpoint: str + """ + self._endpoint = endpoint + self._sessions: Dict[str, CheckpointSession] = {} + self._items: Dict[str, CheckpointItem] = {} + + def _item_key(self, item_id: CheckpointItemId) -> str: + """Generate a unique key for a checkpoint item. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: The unique key. + :rtype: str + """ + return f"{item_id.session_id}:{item_id.item_id}" + + # Session operations + + async def upsert_session(self, session: CheckpointSession) -> CheckpointSession: + """Create or update a checkpoint session. + + :param session: The checkpoint session to upsert. + :type session: CheckpointSession + :return: The upserted checkpoint session. + :rtype: CheckpointSession + """ + self._sessions[session.session_id] = session + return session + + async def read_session(self, session_id: str) -> Optional[CheckpointSession]: + """Read a checkpoint session by ID. + + :param session_id: The session identifier. + :type session_id: str + :return: The checkpoint session if found, None otherwise. + :rtype: Optional[CheckpointSession] + """ + return self._sessions.get(session_id) + + async def delete_session(self, session_id: str) -> None: + """Delete a checkpoint session. + + :param session_id: The session identifier. + :type session_id: str + """ + self._sessions.pop(session_id, None) + # Also delete all items in the session + keys_to_delete = [ + key for key, item in self._items.items() if item.session_id == session_id + ] + for key in keys_to_delete: + del self._items[key] + + # Item operations + + async def create_items(self, items: List[CheckpointItem]) -> List[CheckpointItem]: + """Create checkpoint items in batch. + + :param items: The checkpoint items to create. + :type items: List[CheckpointItem] + :return: The created checkpoint items. + :rtype: List[CheckpointItem] + """ + for item in items: + key = self._item_key(item.to_item_id()) + self._items[key] = item + return items + + async def read_item(self, item_id: CheckpointItemId) -> Optional[CheckpointItem]: + """Read a checkpoint item by ID. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: The checkpoint item if found, None otherwise. + :rtype: Optional[CheckpointItem] + """ + key = self._item_key(item_id) + return self._items.get(key) + + async def delete_item(self, item_id: CheckpointItemId) -> bool: + """Delete a checkpoint item. + + :param item_id: The checkpoint item identifier. + :type item_id: CheckpointItemId + :return: True if the item was deleted, False if not found. + :rtype: bool + """ + key = self._item_key(item_id) + if key in self._items: + del self._items[key] + return True + return False + + async def list_item_ids( + self, session_id: str, parent_id: Optional[str] = None + ) -> List[CheckpointItemId]: + """List checkpoint item IDs for a session. + + :param session_id: The session identifier. + :type session_id: str + :param parent_id: Optional parent item identifier for filtering. + :type parent_id: Optional[str] + :return: List of checkpoint item identifiers. + :rtype: List[CheckpointItemId] + """ + result = [] + for item in self._items.values(): + if item.session_id == session_id: + if parent_id is None or item.parent_id == parent_id: + result.append(item.to_item_id()) + return result + + # Context manager methods + + async def close(self) -> None: + """Close the client (no-op for mock).""" + pass + + async def __aenter__(self) -> "MockFoundryCheckpointClient": + """Enter the async context manager. + + :return: The client instance. + :rtype: MockFoundryCheckpointClient + """ + return self + + async def __aexit__(self, *exc_details: Any) -> None: + """Exit the async context manager. + + :param exc_details: Exception details if an exception occurred. + """ + pass diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_from_langgraph_managed.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_from_langgraph_managed.py new file mode 100644 index 000000000000..5a8b5cf2a1f4 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_from_langgraph_managed.py @@ -0,0 +1,72 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Unit tests for from_langgraph with checkpointer via compile.""" + +import pytest +from unittest.mock import Mock + +from azure.core.credentials_async import AsyncTokenCredential + + +@pytest.mark.unit +def test_from_langgraph_basic() -> None: + """Test that from_langgraph works without a checkpointer.""" + from azure.ai.agentserver.langgraph import from_langgraph + from langgraph.graph import StateGraph + from typing_extensions import TypedDict + + class State(TypedDict): + messages: list + + builder = StateGraph(State) + builder.add_node("node1", lambda x: x) + builder.set_entry_point("node1") + builder.set_finish_point("node1") + graph = builder.compile() + + adapter = from_langgraph(graph) + + assert adapter is not None + + +@pytest.mark.unit +def test_graph_with_foundry_checkpointer_via_compile() -> None: + """Test that FoundryCheckpointSaver can be set via builder.compile().""" + from azure.ai.agentserver.langgraph import from_langgraph + from azure.ai.agentserver.langgraph.checkpointer import FoundryCheckpointSaver + from langgraph.graph import StateGraph + from typing_extensions import TypedDict + + class State(TypedDict): + messages: list + + builder = StateGraph(State) + builder.add_node("node1", lambda x: x) + builder.add_node("node2", lambda x: x) + builder.add_edge("node1", "node2") + builder.set_entry_point("node1") + builder.set_finish_point("node2") + + mock_credential = Mock(spec=AsyncTokenCredential) + saver = FoundryCheckpointSaver( + project_endpoint="https://test.services.ai.azure.com/api/projects/test-project", + credential=mock_credential, + ) + + # User sets checkpointer via LangGraph's native compile() + graph = builder.compile( + checkpointer=saver, + interrupt_before=["node1"], + interrupt_after=["node2"], + debug=True, + ) + + adapter = from_langgraph(graph) + + # Verify checkpointer and compile parameters are preserved + assert adapter is not None + assert isinstance(adapter._graph.checkpointer, FoundryCheckpointSaver) + assert adapter._graph.interrupt_before_nodes == ["node1"] + assert adapter._graph.interrupt_after_nodes == ["node2"] + assert adapter._graph.debug is True From cb74c2c4e7099f0cf92c55fab4a1cf8487f58f3b Mon Sep 17 00:00:00 2001 From: Declan Date: Fri, 6 Feb 2026 14:53:07 -0800 Subject: [PATCH 92/94] [Hosted Agents] Add foundry checkpoints samples for agentframework and langgraph (#45064) * [Hosted Agents] Add foundry checkpoints samples for agentframework and langgraph * docs: Add README files for Foundry checkpoint samples --- sdk/agentserver/TASK.md | 5 ++ .../README.md | 89 +++++++++++++++++++ .../workflow_with_foundry_checkpoints/main.py | 84 +++++++++++++++++ .../README.md | 80 +++++++++++++++++ .../main.py | 82 +++++++++++++++++ 5 files changed, 340 insertions(+) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_with_foundry_checkpoints/README.md create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_with_foundry_checkpoints/main.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/simple_agent_with_foundry_checkpointer/README.md create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/simple_agent_with_foundry_checkpointer/main.py diff --git a/sdk/agentserver/TASK.md b/sdk/agentserver/TASK.md index ff03e777585b..aecd759e81ce 100644 --- a/sdk/agentserver/TASK.md +++ b/sdk/agentserver/TASK.md @@ -4,6 +4,11 @@ ## Done +- [x] 2026-02-06 — Add README files for Foundry checkpoint samples + - Files: `azure-ai-agentserver-agentframework/samples/workflow_with_foundry_checkpoints/README.md`, + `azure-ai-agentserver-langgraph/samples/simple_agent_with_foundry_checkpointer/README.md` + - Updated setup/run/request docs, added missing LangGraph sample README, and corrected `.env` setup guidance. + - [x] 2026-02-04 — Implement managed checkpoints feature - Files: core/checkpoints/ (new), agentframework/persistence/_foundry_checkpoint_*.py (new), agentframework/__init__.py (modified) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_with_foundry_checkpoints/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_with_foundry_checkpoints/README.md new file mode 100644 index 000000000000..3034a4e7de6b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_with_foundry_checkpoints/README.md @@ -0,0 +1,89 @@ +# Workflow Agent with Foundry Managed Checkpoints + +This sample hosts a two-step Agent Framework workflow—`Writer` followed by `Reviewer`—and uses +`FoundryCheckpointRepository` to persist workflow checkpoints in Azure AI Foundry managed storage. + +With Foundry managed checkpoints, workflow state is stored remotely so long-running conversations can +resume even after the host process restarts, without managing your own storage backend. + +### What `main.py` does + +- Builds a workflow with `WorkflowBuilder` (writer + reviewer) +- Creates a `FoundryCheckpointRepository` pointed at your Azure AI Foundry project +- Passes both to `from_agent_framework(..., checkpoint_repository=...)` so the adapter spins up an + HTTP server (defaults to `0.0.0.0:8088`) + +--- + +## Prerequisites + +- Python 3.10+ +- Azure CLI authenticated with `az login` (required for `AzureCliCredential`) +- An Azure AI Foundry project with a chat model deployment + +--- + +## Setup + +1. Create a `.env` file in this folder: + ``` + AZURE_AI_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + AZURE_AI_MODEL_DEPLOYMENT_NAME= + ``` + +2. Install dependencies: + ```bash + pip install azure-ai-agentserver-agentframework agent-framework-azure-ai azure-identity python-dotenv + ``` + +--- + +## Run the Workflow Agent + +From this folder: + +```bash +python main.py +``` + +The adapter starts the server on `http://0.0.0.0:8088` by default. + +--- + +## Send Requests + +**Non-streaming:** + +```bash +curl -sS \ + -H "Content-Type: application/json" \ + -X POST http://localhost:8088/responses \ + -d '{ + "agent": {"name": "local_agent", "type": "agent_reference"}, + "stream": false, + "input": "Write a short blog post about cloud-native AI applications", + "conversation": {"id": "test-conversation-1"} + }' +``` + +The `conversation.id` ties requests to the same checkpoint session. Subsequent requests with the same +ID will resume the workflow from its last checkpoint. + +--- + +## Checkpoint Repository Options + +The `checkpoint_repository` parameter in `from_agent_framework` accepts any `CheckpointRepository` implementation: + +| Repository | Use case | +|---|---| +| `InMemoryCheckpointRepository()` | Quick demos; checkpoints vanish when the process exits | +| `FileCheckpointRepository("")` | Local file-based persistence | +| `FoundryCheckpointRepository(project_endpoint, credential)` | Azure AI Foundry managed remote storage (this sample) | + +--- + +## Related Resources + +- Agent Framework repo: https://github.com/microsoft/agent-framework +- Adapter package docs: `azure.ai.agentserver.agentframework` in this SDK diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_with_foundry_checkpoints/main.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_with_foundry_checkpoints/main.py new file mode 100644 index 000000000000..586c31b8d4b7 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/workflow_with_foundry_checkpoints/main.py @@ -0,0 +1,84 @@ +# Copyright (c) Microsoft. All rights reserved. + +""" +Workflow Agent with Foundry Managed Checkpoints + +This sample demonstrates how to use FoundryCheckpointRepository with +a WorkflowBuilder agent to persist workflow checkpoints in Azure AI Foundry. + +Foundry managed checkpoints enable workflow state to be persisted across +requests, allowing workflows to be paused, resumed, and replayed. + +Prerequisites: + - Set AZURE_AI_PROJECT_ENDPOINT to your Azure AI Foundry project endpoint + e.g. "https://.services.ai.azure.com/api/projects/" + - Azure credentials configured (e.g. az login) +""" + +import asyncio +import os + +from dotenv import load_dotenv + +from agent_framework import ChatAgent, WorkflowBuilder +from agent_framework.azure import AzureAIAgentClient +from azure.identity.aio import AzureCliCredential + +from azure.ai.agentserver.agentframework import from_agent_framework +from azure.ai.agentserver.agentframework.persistence import FoundryCheckpointRepository + +load_dotenv() + + +def create_writer_agent(client: AzureAIAgentClient) -> ChatAgent: + """Create a writer agent that generates content.""" + return client.create_agent( + name="Writer", + instructions=( + "You are an excellent content writer. " + "You create new content and edit contents based on the feedback." + ), + ) + + +def create_reviewer_agent(client: AzureAIAgentClient) -> ChatAgent: + """Create a reviewer agent that provides feedback.""" + return client.create_agent( + name="Reviewer", + instructions=( + "You are an excellent content reviewer. " + "Provide actionable feedback to the writer about the provided content. " + "Provide the feedback in the most concise manner possible." + ), + ) + + +async def main() -> None: + """Run the workflow agent with Foundry managed checkpoints.""" + project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT", "") + + async with AzureCliCredential() as cred, AzureAIAgentClient(credential=cred) as client: + builder = ( + WorkflowBuilder() + .register_agent(lambda: create_writer_agent(client), name="writer") + .register_agent(lambda: create_reviewer_agent(client), name="reviewer", output_response=True) + .set_start_executor("writer") + .add_edge("writer", "reviewer") + ) + + # Use FoundryCheckpointRepository for Azure AI Foundry managed storage. + # This persists workflow checkpoints remotely, enabling pause/resume + # across requests and server restarts. + checkpoint_repository = FoundryCheckpointRepository( + project_endpoint=project_endpoint, + credential=cred, + ) + + await from_agent_framework( + builder, + checkpoint_repository=checkpoint_repository, + ).run_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/simple_agent_with_foundry_checkpointer/README.md b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/simple_agent_with_foundry_checkpointer/README.md new file mode 100644 index 000000000000..b56576c45fc0 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/simple_agent_with_foundry_checkpointer/README.md @@ -0,0 +1,80 @@ +# Simple LangGraph Agent with Foundry Managed Checkpointer + +This sample hosts a LangGraph ReAct-style agent and uses `FoundryCheckpointSaver` to persist +checkpoints in Azure AI Foundry managed storage. + +With Foundry managed checkpoints, graph state is stored remotely so conversations can resume across +requests and server restarts without self-managed storage. + +### What `main.py` does + +- Creates an `AzureChatOpenAI` model and two tools (`get_word_length`, `calculator`) +- Builds a LangGraph agent with `create_react_agent(..., checkpointer=saver)` +- Creates `FoundryCheckpointSaver(project_endpoint, credential)` and runs the server via + `from_langgraph(...).run_async()` + +--- + +## Prerequisites + +- Python 3.10+ +- Azure CLI authenticated with `az login` (required for `AzureCliCredential`) +- An Azure AI Foundry project endpoint +- An Azure OpenAI chat deployment (for example `gpt-4o`) + +--- + +## Setup + +1. Create a `.env` file in this folder: + ```env + AZURE_AI_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ + AZURE_OPENAI_ENDPOINT=https://.openai.azure.com/ + AZURE_OPENAI_API_KEY= + OPENAI_API_VERSION=2025-03-01-preview + AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=gpt-4o + ``` + +2. Install dependencies: + ```bash + pip install azure-ai-agentserver-langgraph python-dotenv azure-identity langgraph + ``` + +--- + +## Run the Agent + +From this folder: + +```bash +python main.py +``` + +The adapter starts the server on `http://0.0.0.0:8088` by default. + +--- + +## Send Requests + +Non-streaming example: + +```bash +curl -sS \ + -H "Content-Type: application/json" \ + -X POST http://localhost:8088/responses \ + -d '{ + "agent": {"name": "local_agent", "type": "agent_reference"}, + "stream": false, + "input": "What is (15 * 4) + 6?", + "conversation": {"id": "test-conversation-1"} + }' +``` + +Use the same `conversation.id` on follow-up requests to continue the checkpointed conversation state. + +--- + +## Related Resources + +- LangGraph docs: https://langchain-ai.github.io/langgraph/ +- Adapter package docs: `azure.ai.agentserver.langgraph` in this SDK diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/simple_agent_with_foundry_checkpointer/main.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/simple_agent_with_foundry_checkpointer/main.py new file mode 100644 index 000000000000..10bbb3d22712 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/simple_agent_with_foundry_checkpointer/main.py @@ -0,0 +1,82 @@ +# Copyright (c) Microsoft. All rights reserved. + +""" +Simple Agent with Foundry Managed Checkpointer + +This sample demonstrates how to use FoundryCheckpointSaver with a LangGraph +agent to persist checkpoints in Azure AI Foundry. + +Foundry managed checkpoints enable graph state to be persisted across +requests, allowing conversations to be paused, resumed, and replayed. + +Prerequisites: + - Set AZURE_AI_PROJECT_ENDPOINT to your Azure AI Foundry project endpoint + e.g. "https://.services.ai.azure.com/api/projects/" + - Set AZURE_OPENAI_CHAT_DEPLOYMENT_NAME (defaults to "gpt-4o") + - Azure credentials configured (e.g. az login) +""" + +import asyncio +import os + +from dotenv import load_dotenv +from langchain_core.tools import tool +from langchain_openai import AzureChatOpenAI +from azure.identity.aio import AzureCliCredential + +from azure.ai.agentserver.langgraph import from_langgraph +from azure.ai.agentserver.langgraph.checkpointer import FoundryCheckpointSaver + +load_dotenv() + +deployment_name = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "gpt-4o") +model = AzureChatOpenAI(model=deployment_name) + + +@tool +def get_word_length(word: str) -> int: + """Returns the length of a word.""" + return len(word) + + +@tool +def calculator(expression: str) -> str: + """Evaluates a mathematical expression.""" + try: + result = eval(expression) # noqa: S307 + return str(result) + except Exception as e: + return f"Error: {str(e)}" + + +tools = [get_word_length, calculator] + + +def create_agent(checkpointer): + """Create a react agent with the given checkpointer.""" + from langgraph.prebuilt import create_react_agent + + return create_react_agent(model, tools, checkpointer=checkpointer) + + +async def main() -> None: + """Run the agent with Foundry managed checkpoints.""" + project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT", "") + + async with AzureCliCredential() as cred: + # Use FoundryCheckpointSaver for Azure AI Foundry managed storage. + # This persists graph checkpoints remotely, enabling pause/resume + # across requests and server restarts. + saver = FoundryCheckpointSaver( + project_endpoint=project_endpoint, + credential=cred, + ) + + # Pass the checkpointer via LangGraph's native compile/create API + executor = create_agent(checkpointer=saver) + + await from_langgraph(executor).run_async() + + +if __name__ == "__main__": + asyncio.run(main()) From 3a5e0451b491b8a129877de7b43f26788d549b13 Mon Sep 17 00:00:00 2001 From: melionel Date: Fri, 6 Feb 2026 15:40:52 -0800 Subject: [PATCH 93/94] minor improve logging (#45063) Co-authored-by: Declan --- .../azure/ai/agentserver/core/logger.py | 34 +++++++++++-------- .../azure/ai/agentserver/core/server/base.py | 11 ++++-- 2 files changed, 29 insertions(+), 16 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py index e15ccd86f9cc..b9b0153f281a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py @@ -42,9 +42,11 @@ def get_dimensions(): return res -def get_project_endpoint(): +def get_project_endpoint(logger=None): project_endpoint = os.environ.get(Constants.AZURE_AI_PROJECT_ENDPOINT) if project_endpoint: + if logger: + logger.info(f"Using project endpoint from {Constants.AZURE_AI_PROJECT_ENDPOINT}: {project_endpoint}") return project_endpoint project_resource_id = os.environ.get(Constants.AGENT_PROJECT_RESOURCE_ID) if project_resource_id: @@ -52,35 +54,39 @@ def get_project_endpoint(): parts = last_part.split("@") if len(parts) < 2: - print(f"invalid project resource id: {project_resource_id}") + if logger: + logger.warning(f"Invalid project resource id format: {project_resource_id}") return None account = parts[0] project = parts[1] - return f"https://{account}.services.ai.azure.com/api/projects/{project}" - print("environment variable AGENT_PROJECT_RESOURCE_ID not set.") + endpoint = f"https://{account}.services.ai.azure.com/api/projects/{project}" + if logger: + logger.info(f"Using project endpoint derived from {Constants.AGENT_PROJECT_RESOURCE_ID}: {endpoint}") + return endpoint return None -def get_application_insights_connstr(): +def get_application_insights_connstr(logger=None): try: conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) if not conn_str: - print(f"environment variable {APPINSIGHT_CONNSTR_ENV_NAME} not set.") - project_endpoint = get_project_endpoint() + project_endpoint = get_project_endpoint(logger=logger) if project_endpoint: # try to get the project connected application insights from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential - project_client = AIProjectClient(credential=DefaultAzureCredential(), endpoint=project_endpoint) conn_str = project_client.telemetry.get_application_insights_connection_string() - if not conn_str: - print(f"no connected application insights found for project:{project_endpoint}") - else: + if not conn_str and logger: + logger.info(f"No Application Insights connection found for project: {project_endpoint}") + elif conn_str: os.environ[APPINSIGHT_CONNSTR_ENV_NAME] = conn_str + elif logger: + logger.info("Application Insights not configured, telemetry export disabled.") return conn_str except Exception as e: - print(f"failed to get application insights with error: {e}") + if logger: + logger.warning(f"Failed to get Application Insights connection string, telemetry export disabled: {e}") return None @@ -107,8 +113,9 @@ def configure(log_config: dict = default_log_config): """ try: config.dictConfig(log_config) + app_logger = logging.getLogger("azure.ai.agentserver") - application_insights_connection_string = get_application_insights_connstr() + application_insights_connection_string = get_application_insights_connstr(logger=app_logger) enable_application_insights_logger = ( os.environ.get(Constants.ENABLE_APPLICATION_INSIGHTS_LOGGER, "true").lower() == "true" ) @@ -137,7 +144,6 @@ def configure(log_config: dict = default_log_config): handler.addFilter(custom_filter) # Only add to azure.ai.agentserver namespace to avoid infrastructure logs - app_logger = logging.getLogger("azure.ai.agentserver") app_logger.setLevel(get_log_level()) app_logger.addHandler(handler) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 4f0032b64bd3..74688d7772f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -99,7 +99,7 @@ def __init__(self, credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, project_endpoint: Optional[str] = None) -> None: self.credentials = AsyncTokenCredentialAdapter(credentials) if credentials else AsyncDefaultTokenCredential() - project_endpoint = get_project_endpoint() or project_endpoint + project_endpoint = get_project_endpoint(logger=logger) or project_endpoint AgentServerContext(create_tool_runtime(project_endpoint, self.credentials)) async def runs_endpoint(request): @@ -198,9 +198,14 @@ async def readiness_endpoint(request): self.app.add_middleware(AgentRunContextMiddleware, agent=self) @self.app.on_event("startup") - async def attach_appinsights_logger(): + async def on_startup(): import logging + # Log server started successfully + port = getattr(self, '_port', 'unknown') + logger.info(f"FoundryCBAgent server started successfully on port {port}") + + # Attach App Insights handler to uvicorn loggers for handler in logger.handlers: if handler.name == "appinsights_handler": for logger_name in ["uvicorn", "uvicorn.error", "uvicorn.access"]: @@ -348,6 +353,7 @@ async def run_async( self.init_tracing() config = uvicorn.Config(self.app, host="0.0.0.0", port=port, loop="asyncio") server = uvicorn.Server(config) + self._port = port logger.info(f"Starting FoundryCBAgent server async on port {port}") await server.serve() @@ -363,6 +369,7 @@ def run(self, port: int = int(os.environ.get("DEFAULT_AD_PORT", 8088))) -> None: :type port: int """ self.init_tracing() + self._port = port logger.info(f"Starting FoundryCBAgent server on port {port}") uvicorn.run(self.app, host="0.0.0.0", port=port) From 4cb2c9882fa8c1afe5a2f84b33ab8fcde0b49344 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty <1634042+ganeshyb@users.noreply.github.com> Date: Fri, 6 Feb 2026 18:08:07 -0800 Subject: [PATCH 94/94] Fix: Handle empty schema type in LangGraph tool resolver (#45068) Applies the same guard as the agentframework fix (PR #45051) to the LangGraph tool resolver. When an MCP tool manifest contains a property with an empty or unrecognised schema type, the resolver now skips that field with a warning instead of crashing with AttributeError. --- .../azure/ai/agentserver/langgraph/tools/_resolver.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py index 1e5b5d4e351f..5c77b1339132 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py @@ -4,6 +4,8 @@ from collections import defaultdict from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, overload +import logging + from langchain_core.tools import BaseTool, StructuredTool from pydantic import BaseModel, Field, create_model @@ -128,6 +130,10 @@ def _create_pydantic_model(cls, tool_name: str, input_schema: SchemaDefinition) field_definitions: Dict[str, Any] = {} required_fields = input_schema.required or set() for prop_name, prop in input_schema.properties.items(): + if prop.type is None: + logging.getLogger(__name__).warning( + "Skipping field '%s' in tool '%s': unknown or empty schema type.", prop_name, tool_name) + continue py_type = prop.type.py_type default = ... if prop_name in required_fields else None field_definitions[prop_name] = (py_type, Field(default, description=prop.description))