Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 42 additions & 2 deletions src/google/adk/cli/cli_tools_click.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,12 @@
import os
from pathlib import Path
import tempfile
import textwrap
from typing import Optional
from typing import Optional, TYPE_CHECKING

if TYPE_CHECKING:
from ..apps.app import App

import textwrap
import click
from click.core import ParameterSource
from fastapi import FastAPI
Expand Down Expand Up @@ -515,6 +518,34 @@ def cli_run(
)
)

def _load_app_from_module(module_path: str) -> Optional['App']:
"""Try to load an App instance from the agent module.

Args:
module_path: Python module path (e.g., 'my_package.my_agent')

Returns:
App instance if found, None otherwise
"""
try:
import importlib
module = importlib.import_module(module_path)

# Check for 'app' attribute (most common convention)
if hasattr(module, 'app'):
from ..apps.app import App
candidate = getattr(module, 'app')
if isinstance(candidate, App):
logger.info(f"Loaded App instance from {module_path}")
return candidate

logger.debug(f"No App instance found in {module_path}")

except (ImportError, AttributeError) as e:
logger.debug(f"Could not load App from module {module_path}: {e}")

return None


def eval_options():
"""Decorator to add common eval options to click commands."""
Expand Down Expand Up @@ -733,10 +764,19 @@ def cli_eval(
)

try:
# Try to load App if available (for plugin support like ReflectAndRetryToolPlugin)
app = _load_app_from_module(agent_module_file_path)

if app:
logger.info("Using App instance for evaluation (plugins will be applied)")
else:
logger.info("No App found, using root_agent directly")

eval_service = LocalEvalService(
root_agent=root_agent,
eval_sets_manager=eval_sets_manager,
eval_set_results_manager=eval_set_results_manager,
app=app, # NEW: Pass app if available
user_simulator_provider=user_simulator_provider,
)

Expand Down
89 changes: 86 additions & 3 deletions src/google/adk/evaluation/evaluation_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,11 @@

import copy
import importlib
from typing import Any
from typing import AsyncGenerator
from typing import Optional
from typing import Any, AsyncGenerator, Optional, TYPE_CHECKING

if TYPE_CHECKING:
from ..apps.app import App

import uuid

from google.genai.types import Content
Expand All @@ -39,6 +41,7 @@
from .app_details import AgentDetails
from .app_details import AppDetails
from .eval_case import EvalCase
from .eval_case import IntermediateData
from .eval_case import Invocation
from .eval_case import InvocationEvent
from .eval_case import InvocationEvents
Expand Down Expand Up @@ -325,6 +328,85 @@ def convert_events_to_eval_invocations(
)

return invocations

@staticmethod
async def _generate_inferences_from_app(
invocations: list['Invocation'],
app: 'App',
initial_session: Optional['SessionInput'],
session_id: str,
session_service: 'BaseSessionService',
artifact_service: 'BaseArtifactService',
memory_service: 'BaseMemoryService',
) -> list['Invocation']:
"""Generate inferences by invoking through App (preserving plugins)."""
from ..runners import Runner

actual_invocations = []

# Determine user_id consistently
user_id = 'test_user_id'
if initial_session and initial_session.user_id is not None:
user_id = initial_session.user_id

# Initialize session if provided
if initial_session:
app_name = initial_session.app_name if initial_session.app_name else app.name
await session_service.create_session(
app_name=app_name,
user_id=user_id,
session_id=session_id,
state=initial_session.state if initial_session.state else {},
)

# Create Runner with App to preserve plugins
runner = Runner(
app=app,
session_service=session_service,
artifact_service=artifact_service,
memory_service=memory_service,
)

# Run each invocation through the app
for expected_invocation in invocations:
user_content = expected_invocation.user_content

# Invoke through Runner (this applies all plugins)
response = runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content,
)

# Extract response similar to existing implementation
final_response = None
tool_uses = []
tool_responses = []
invocation_id = ""

async for event in response:
invocation_id = invocation_id or event.invocation_id

if event.is_final_response() and event.content and event.content.parts:
final_response = event.content
elif calls := event.get_function_calls():
tool_uses.extend(calls)
elif responses := event.get_function_responses():
tool_responses.extend(responses)

actual_invocations.append(
Invocation(
invocation_id=invocation_id,
user_content=user_content,
final_response=final_response,
intermediate_data=IntermediateData(
tool_uses=tool_uses, tool_responses=tool_responses
),
)
)

return actual_invocations


@staticmethod
def _get_app_details_by_invocation_id(
Expand Down Expand Up @@ -413,3 +495,4 @@ def _process_query_with_session(session_data, data):
responses[index]["actual_tool_use"] = actual_tool_uses
responses[index]["response"] = response
return responses

50 changes: 36 additions & 14 deletions src/google/adk/evaluation/local_eval_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@
from typing import AsyncGenerator
from typing import Callable
from typing import Optional
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from ..apps.app import App
import uuid

from typing_extensions import override
Expand All @@ -41,6 +45,7 @@
from .base_eval_service import InferenceResult
from .base_eval_service import InferenceStatus
from .eval_case import Invocation
from .eval_case import SessionInput
from .eval_metrics import EvalMetric
from .eval_metrics import EvalMetricResult
from .eval_metrics import EvalMetricResultDetails
Expand Down Expand Up @@ -79,11 +84,13 @@ def __init__(
artifact_service: Optional[BaseArtifactService] = None,
eval_set_results_manager: Optional[EvalSetResultsManager] = None,
session_id_supplier: Callable[[], str] = _get_session_id,
app: Optional['App'] = None,
user_simulator_provider: UserSimulatorProvider = UserSimulatorProvider(),
memory_service: Optional[BaseMemoryService] = None,
):
self._root_agent = root_agent
self._eval_sets_manager = eval_sets_manager
self._app = app
metric_evaluator_registry = (
metric_evaluator_registry or DEFAULT_METRIC_EVALUATOR_REGISTRY
)
Expand Down Expand Up @@ -406,25 +413,40 @@ async def _perform_inference_single_eval_item(
)

try:
with client_label_context(EVAL_CLIENT_LABEL):
inferences = (
await EvaluationGenerator._generate_inferences_from_root_agent(
root_agent=root_agent,
user_simulator=self._user_simulator_provider.provide(eval_case),
initial_session=initial_session,
session_id=session_id,
session_service=self._session_service,
artifact_service=self._artifact_service,
memory_service=self._memory_service,
)
)
# Use App if available (so plugins like ReflectAndRetryToolPlugin run)
if self._app is not None:
inferences = (
await EvaluationGenerator._generate_inferences_from_app(
invocations=eval_case.conversation,
app=self._app,
initial_session=initial_session,
session_id=session_id,
session_service=self._session_service,
artifact_service=self._artifact_service,
memory_service=self._memory_service,
)
)
else:
# Fallback to direct root_agent usage (existing behavior)
with client_label_context(EVAL_CLIENT_LABEL):
inferences = (
await EvaluationGenerator._generate_inferences_from_root_agent(
root_agent=root_agent,
user_simulator=self._user_simulator_provider.provide(eval_case),
initial_session=initial_session,
session_id=session_id,
session_service=self._session_service,
artifact_service=self._artifact_service,
memory_service=self._memory_service,
)
)

inference_result.inferences = inferences
inference_result.status = InferenceStatus.SUCCESS

return inference_result
except Exception as e:
# We intentionally catch the Exception as we don't failures to affect
# We intentionally catch the Exception as we don't want failures to affect
# other inferences.
logger.error(
'Inference failed for eval case `%s` with error %s.',
Expand All @@ -434,4 +456,4 @@ async def _perform_inference_single_eval_item(
)
inference_result.status = InferenceStatus.FAILURE
inference_result.error_message = str(e)
return inference_result
return inference_result