From e663375cc08127d2864301f82facad0cf0ac7eff Mon Sep 17 00:00:00 2001 From: fenilfaldu Date: Fri, 6 Jun 2025 04:24:53 +0530 Subject: [PATCH 01/14] base instrumentation --- agentops/instrumentation/__init__.py | 5 + agentops/instrumentation/agno/__init__.py | 75 ++ .../agno/attributes/__init__.py | 17 + .../instrumentation/agno/attributes/agent.py | 271 ++++++ .../agno/attributes/metrics.py | 236 ++++++ .../instrumentation/agno/attributes/model.py | 371 ++++++++ .../instrumentation/agno/attributes/team.py | 235 ++++++ .../instrumentation/agno/attributes/tool.py | 649 ++++++++++++++ .../agno/attributes/workflow.py | 232 +++++ agentops/instrumentation/agno/instrumentor.py | 794 ++++++++++++++++++ 10 files changed, 2885 insertions(+) create mode 100644 agentops/instrumentation/agno/__init__.py create mode 100644 agentops/instrumentation/agno/attributes/__init__.py create mode 100644 agentops/instrumentation/agno/attributes/agent.py create mode 100644 agentops/instrumentation/agno/attributes/metrics.py create mode 100644 agentops/instrumentation/agno/attributes/model.py create mode 100644 agentops/instrumentation/agno/attributes/team.py create mode 100644 agentops/instrumentation/agno/attributes/tool.py create mode 100644 agentops/instrumentation/agno/attributes/workflow.py create mode 100644 agentops/instrumentation/agno/instrumentor.py diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index d4e271f3d..87a53cf19 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -208,6 +208,11 @@ class InstrumentorConfig(TypedDict): "class_name": "GoogleADKInstrumentor", "min_version": "0.1.0", }, + "agno": { + "module_name": "agentops.instrumentation.agno", + "class_name": "AgnoInstrumentor", + "min_version": "0.1.0", + }, } # Combine all target packages for monitoring diff --git a/agentops/instrumentation/agno/__init__.py b/agentops/instrumentation/agno/__init__.py new file mode 100644 index 000000000..444d94b4c --- /dev/null +++ b/agentops/instrumentation/agno/__init__.py @@ -0,0 +1,75 @@ +"""Agno Agent instrumentation package.""" + +import logging + +logger = logging.getLogger(__name__) + +__version__ = "1.0.0" + +LIBRARY_NAME = "agno" +LIBRARY_VERSION = __version__ + +from .instrumentor import AgnoInstrumentor + +def get_current_agno_context(): + """ + Get the current Agno agent or workflow context for use by other instrumentations. + + This function allows other instrumentations (like LLM providers) to find and use + the current agent or team context for proper parent-child span relationships. + + Returns: + tuple: (context, span) if found, (None, None) otherwise + """ + try: + # Try to get current OpenTelemetry context first + from opentelemetry import context as otel_context, trace + current_context = otel_context.get_current() + current_span = trace.get_current_span(current_context) + + # Check if we're already in an agno span (agent, team, or workflow) + if current_span and hasattr(current_span, 'name'): + span_name = getattr(current_span, 'name', '') + if any(keyword in span_name for keyword in ['agno.agent.run', 'agno.team.run', 'agno.workflow']): + logger.debug(f"Found active agno span: {span_name}") + return current_context, current_span + + return None, None + + except Exception as e: + logger.debug(f"Error getting agno context: {e}") + return None, None + + +def get_agno_context_by_session(session_id: str): + """ + Legacy function for backward compatibility. + + Args: + session_id: Session identifier + + Returns: + tuple: (None, None) - not supported in new implementation + """ + logger.debug("get_agno_context_by_session is deprecated - context is managed automatically") + return None, None + + +# Export attribute handlers for external use +from .attributes.agent import get_agent_run_attributes +from .attributes.team import get_team_run_attributes, get_team_public_run_attributes +from .attributes.tool import get_tool_execution_attributes +from .attributes.metrics import get_metrics_attributes + +__all__ = [ + "AgnoInstrumentor", + "LIBRARY_NAME", + "LIBRARY_VERSION", + "get_current_agno_context", + "get_agno_context_by_session", + "get_agent_run_attributes", + "get_team_run_attributes", + "get_team_public_run_attributes", + "get_tool_execution_attributes", + "get_metrics_attributes" +] \ No newline at end of file diff --git a/agentops/instrumentation/agno/attributes/__init__.py b/agentops/instrumentation/agno/attributes/__init__.py new file mode 100644 index 000000000..d84223bbe --- /dev/null +++ b/agentops/instrumentation/agno/attributes/__init__.py @@ -0,0 +1,17 @@ +"""Agno Agent attributes package for span instrumentation.""" + +from .agent import get_agent_run_attributes +from .model import get_session_metrics_attributes +from .team import get_team_run_attributes +from .tool import get_tool_execution_attributes +from .workflow import get_workflow_run_attributes, get_workflow_session_attributes, get_workflow_storage_attributes + +__all__ = [ + "get_agent_run_attributes", + "get_session_metrics_attributes", + "get_team_run_attributes", + "get_tool_execution_attributes", + "get_workflow_run_attributes", + "get_workflow_session_attributes", + "get_workflow_storage_attributes", +] \ No newline at end of file diff --git a/agentops/instrumentation/agno/attributes/agent.py b/agentops/instrumentation/agno/attributes/agent.py new file mode 100644 index 000000000..ecbcbbb37 --- /dev/null +++ b/agentops/instrumentation/agno/attributes/agent.py @@ -0,0 +1,271 @@ +"""Agno Agent run attributes handler.""" + +from typing import Optional, Tuple, Dict, Any + +from agentops.instrumentation.common.attributes import AttributeMap +from agentops.semconv import SpanAttributes, WorkflowAttributes, AgentAttributes +from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind + + +def get_agent_run_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> AttributeMap: + """Extract span attributes for Agent.run/arun calls. + + Args: + args: Positional arguments passed to the run method (self, message, ...) + kwargs: Keyword arguments passed to the run method + return_value: The return value from the run method (RunResponse) + + Returns: + A dictionary of span attributes to be set on the agent span + """ + attributes: AttributeMap = {} + + # Base attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.WORKFLOW + attributes[SpanAttributes.LLM_SYSTEM] = "agno" + attributes[SpanAttributes.LLM_REQUEST_STREAMING] = "False" + + # AgentOps entity attributes (matching CrewAI pattern) + attributes[SpanAttributes.AGENTOPS_ENTITY_NAME] = "Agent" + + # Extract agent information from args[0] (self) + if args and len(args) >= 1: + agent = args[0] + + # Core agent identification using AgentAttributes + if hasattr(agent, 'agent_id') and agent.agent_id: + agent_id = str(agent.agent_id) + attributes[AgentAttributes.AGENT_ID] = agent_id + attributes["agno.agent.id"] = agent_id + + if hasattr(agent, 'name') and agent.name: + agent_name = str(agent.name) + attributes[AgentAttributes.AGENT_NAME] = agent_name + attributes["agno.agent.name"] = agent_name + + if hasattr(agent, 'role') and agent.role: + agent_role = str(agent.role) + attributes[AgentAttributes.AGENT_ROLE] = agent_role + attributes["agno.agent.role"] = agent_role + + # Model information using AgentAttributes + if hasattr(agent, 'model') and agent.model: + model = agent.model + if hasattr(model, 'id'): + model_id = str(model.id) + attributes[AgentAttributes.AGENT_MODELS] = model_id + attributes["agno.agent.model_id"] = model_id + attributes[SpanAttributes.LLM_RESPONSE_MODEL] = model_id + + if hasattr(model, 'provider'): + model_provider = str(model.provider) + attributes["agno.agent.model_provider"] = model_provider + attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_id if hasattr(model, 'id') else 'unknown' + + # Agent configuration details + agent_config = {} + + if hasattr(agent, 'description') and agent.description: + agent_config["description"] = str(agent.description)[:500] # Limit length + + if hasattr(agent, 'goal') and agent.goal: + agent_config["goal"] = str(agent.goal)[:500] # Limit length + + if hasattr(agent, 'instructions') and agent.instructions: + if isinstance(agent.instructions, list): + agent_config["instructions"] = " | ".join(str(i) for i in agent.instructions[:3]) # First 3 + else: + agent_config["instructions"] = str(agent.instructions)[:500] + + if hasattr(agent, 'expected_output') and agent.expected_output: + agent_config["expected_output"] = str(agent.expected_output)[:300] + + if hasattr(agent, 'markdown'): + agent_config["markdown"] = str(agent.markdown) + + if hasattr(agent, 'reasoning'): + agent_config["reasoning"] = str(agent.reasoning) + + if hasattr(agent, 'stream'): + agent_config["stream"] = str(agent.stream) + + if hasattr(agent, 'retries'): + agent_config["max_retry_limit"] = str(agent.retries) + + if hasattr(agent, 'response_model') and agent.response_model: + agent_config["response_model"] = str(agent.response_model.__name__) + + if hasattr(agent, 'show_tool_calls'): + agent_config["show_tool_calls"] = str(agent.show_tool_calls) + + if hasattr(agent, 'tool_call_limit') and agent.tool_call_limit: + agent_config["tool_call_limit"] = str(agent.tool_call_limit) + + # Add agent config to attributes + for key, value in agent_config.items(): + attributes[f"agno.agent.{key}"] = value + + # Tools information + if hasattr(agent, 'tools') and agent.tools: + tools_info = [] + tool_names = [] + + for tool in agent.tools: + tool_dict = {} + + if hasattr(tool, 'name'): + tool_name = str(tool.name) + tool_dict["name"] = tool_name + tool_names.append(tool_name) + elif hasattr(tool, '__name__'): + tool_name = str(tool.__name__) + tool_dict["name"] = tool_name + tool_names.append(tool_name) + elif callable(tool): + tool_name = getattr(tool, '__name__', 'unknown_tool') + tool_dict["name"] = tool_name + tool_names.append(tool_name) + + if hasattr(tool, 'description'): + description = str(tool.description) + if len(description) > 200: + description = description[:197] + "..." + tool_dict["description"] = description + + if tool_dict: # Only add if we have some info + tools_info.append(tool_dict) + + # Set tool attributes + if tool_names: + attributes["agent.tools_names"] = ",".join(tool_names[:5]) # Limit to first 5 + attributes["agno.agent.tools_count"] = str(len(tool_names)) + + if tools_info: + import json + try: + # Limit to first 3 tools to avoid overly long attributes + limited_tools = tools_info[:3] + tools_json = json.dumps(limited_tools) + attributes[AgentAttributes.AGENT_TOOLS] = tools_json + except: + # Fallback if JSON serialization fails + attributes[AgentAttributes.AGENT_TOOLS] = str(tools_info) + + # Memory and knowledge information + if hasattr(agent, 'memory') and agent.memory: + memory_type = type(agent.memory).__name__ + attributes["agno.agent.memory_type"] = memory_type + + if hasattr(agent, 'knowledge') and agent.knowledge: + knowledge_type = type(agent.knowledge).__name__ + attributes["agno.agent.knowledge_type"] = knowledge_type + + if hasattr(agent, 'storage') and agent.storage: + storage_type = type(agent.storage).__name__ + attributes["agno.agent.storage_type"] = storage_type + + # Session information + if hasattr(agent, 'session_id') and agent.session_id: + session_id = str(agent.session_id) + attributes["agno.agent.session_id"] = session_id + + if hasattr(agent, 'user_id') and agent.user_id: + user_id = str(agent.user_id) + attributes["agno.agent.user_id"] = user_id + + # Extract run input information + if args and len(args) >= 2: + message = args[1] # The message argument + if message: + message_str = str(message) + if len(message_str) > 500: + message_str = message_str[:497] + "..." + attributes[WorkflowAttributes.WORKFLOW_INPUT] = message_str + attributes["agno.agent.input"] = message_str + # AgentOps entity input (matching CrewAI pattern) + attributes[SpanAttributes.AGENTOPS_ENTITY_INPUT] = message_str + + # Extract kwargs information + if kwargs: + if kwargs.get('stream') is not None: + attributes[SpanAttributes.LLM_REQUEST_STREAMING] = str(kwargs['stream']) + + if kwargs.get('session_id'): + attributes["agno.agent.run_session_id"] = str(kwargs['session_id']) + + if kwargs.get('user_id'): + attributes["agno.agent.run_user_id"] = str(kwargs['user_id']) + + # Extract return value information + if return_value: + if hasattr(return_value, 'run_id') and return_value.run_id: + run_id = str(return_value.run_id) + attributes["agno.agent.run_id"] = run_id + + if hasattr(return_value, 'session_id') and return_value.session_id: + session_id = str(return_value.session_id) + attributes["agno.agent.response_session_id"] = session_id + + if hasattr(return_value, 'agent_id') and return_value.agent_id: + agent_id = str(return_value.agent_id) + attributes["agno.agent.response_agent_id"] = agent_id + + if hasattr(return_value, 'content') and return_value.content: + content = str(return_value.content) + if len(content) > 500: + content = content[:497] + "..." + attributes[WorkflowAttributes.WORKFLOW_OUTPUT] = content + attributes["agno.agent.output"] = content + + if hasattr(return_value, 'event') and return_value.event: + event = str(return_value.event) + attributes["agno.agent.event"] = event + + # Tool executions from the response + if hasattr(return_value, 'tools') and return_value.tools: + tool_executions = [] + for tool_exec in return_value.tools: + tool_exec_dict = {} + + if hasattr(tool_exec, 'tool_name') and tool_exec.tool_name: + tool_exec_dict["name"] = str(tool_exec.tool_name) + + if hasattr(tool_exec, 'tool_args') and tool_exec.tool_args: + try: + import json + args_str = json.dumps(tool_exec.tool_args) + if len(args_str) > 200: + args_str = args_str[:197] + "..." + tool_exec_dict["parameters"] = args_str + except: + tool_exec_dict["parameters"] = str(tool_exec.tool_args) + + if hasattr(tool_exec, 'result') and tool_exec.result: + result_str = str(tool_exec.result) + if len(result_str) > 200: + result_str = result_str[:197] + "..." + tool_exec_dict["result"] = result_str + + if hasattr(tool_exec, 'tool_call_error') and tool_exec.tool_call_error: + tool_exec_dict["error"] = str(tool_exec.tool_call_error) + + tool_exec_dict["status"] = "success" # Default to success + + if tool_exec_dict: + tool_executions.append(tool_exec_dict) + + if tool_executions: + # Add tool executions (limit to first 3) + limited_executions = tool_executions[:3] + for i, tool_exec in enumerate(limited_executions): + for key, value in tool_exec.items(): + attributes[f"agno.agent.tool_execution.{i}.{key}"] = value + + # Workflow type + attributes[WorkflowAttributes.WORKFLOW_TYPE] = "agent_run" + + return attributes \ No newline at end of file diff --git a/agentops/instrumentation/agno/attributes/metrics.py b/agentops/instrumentation/agno/attributes/metrics.py new file mode 100644 index 000000000..28305459a --- /dev/null +++ b/agentops/instrumentation/agno/attributes/metrics.py @@ -0,0 +1,236 @@ +"""Agno Agent session metrics attributes handler.""" + +from typing import Optional, Tuple, Dict, Any + +from agentops.instrumentation.common.attributes import AttributeMap +from agentops.semconv import SpanAttributes +from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind + + +def get_metrics_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> AttributeMap: + """Extract span attributes for Agent._set_session_metrics calls. + + Args: + args: Positional arguments passed to the _set_session_metrics method (self, run_messages) + kwargs: Keyword arguments passed to the _set_session_metrics method + return_value: The return value from the _set_session_metrics method + + Returns: + A dictionary of span attributes to be set on the metrics span + """ + attributes: AttributeMap = {} + + # Base attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = "llm" + attributes[SpanAttributes.LLM_SYSTEM] = "agno" + attributes[SpanAttributes.AGENTOPS_ENTITY_NAME] = "LLM" + + # Initialize default gen_ai.usage attributes to ensure they're always present + usage_attrs = { + "prompt_tokens": 0, + "completion_tokens": 0, + "total_tokens": 0, + "cache_read_input_tokens": 0, + "reasoning_tokens": 0, + "success_tokens": 0, + "fail_tokens": 0, + "indeterminate_tokens": 0 + } + + # Initialize counters for indexed messages + prompt_count = 0 + completion_count = 0 + + # Extract agent and run_messages from args (self, run_messages) + if args and len(args) >= 2: + agent = args[0] # self (Agent instance) + run_messages = args[1] # RunMessages object + + # Model information - get additional request parameters if available + if hasattr(agent, 'model') and agent.model: + model = agent.model + # Set model ID first + if hasattr(model, 'id'): + attributes[SpanAttributes.LLM_REQUEST_MODEL] = str(model.id) + attributes[SpanAttributes.LLM_RESPONSE_MODEL] = str(model.id) + # Additional model parameters + if hasattr(model, 'temperature') and model.temperature is not None: + attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = str(model.temperature) + if hasattr(model, 'max_tokens') and model.max_tokens is not None: + attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = str(model.max_tokens) + if hasattr(model, 'top_p') and model.top_p is not None: + attributes[SpanAttributes.LLM_REQUEST_TOP_P] = str(model.top_p) + if hasattr(model, 'provider'): + attributes['agno.model.provider'] = str(model.provider) + + # === EXTRACT CONVERSATION STRUCTURE === + if hasattr(run_messages, 'messages') and run_messages.messages: + messages = run_messages.messages + + # Initialize token tracking + total_prompt_tokens = 0 + total_completion_tokens = 0 + total_output_tokens = 0 + total_input_tokens = 0 + total_tokens = 0 + total_time = 0.0 + + # Process messages to create individual indexed gen_ai.prompt.{i} and gen_ai.completion.{i} attributes + for i, msg in enumerate(messages): + # Extract message content for prompts/completions + if hasattr(msg, 'role') and hasattr(msg, 'content'): + # Only set content if it's not None/empty + if msg.content is not None and str(msg.content).strip() != "" and str(msg.content) != "None": + content = str(msg.content) + # Truncate very long content to avoid oversized attributes + if len(content) > 1000: + content = content[:997] + "..." + + if msg.role == 'user': + attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'user' + attributes[f'gen_ai.prompt.{prompt_count}.content'] = content + prompt_count += 1 + elif msg.role == 'assistant': + attributes[f'gen_ai.completion.{completion_count}.role'] = 'assistant' + attributes[f'gen_ai.completion.{completion_count}.content'] = content + completion_count += 1 + elif msg.role == 'system': + attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'system' + attributes[f'gen_ai.prompt.{prompt_count}.content'] = content + prompt_count += 1 + else: + # For messages with None content, still set the role but skip content + if msg.role == 'user': + attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'user' + prompt_count += 1 + elif msg.role == 'assistant': + attributes[f'gen_ai.completion.{completion_count}.role'] = 'assistant' + completion_count += 1 + elif msg.role == 'system': + attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'system' + prompt_count += 1 + + # Extract token metrics from message + if hasattr(msg, 'metrics') and msg.metrics: + metrics = msg.metrics + + # Handle different token metric patterns + if hasattr(metrics, 'prompt_tokens') and metrics.prompt_tokens > 0: + total_prompt_tokens += metrics.prompt_tokens + if hasattr(metrics, 'completion_tokens') and metrics.completion_tokens > 0: + total_completion_tokens += metrics.completion_tokens + if hasattr(metrics, 'total_tokens') and metrics.total_tokens > 0: + total_tokens += metrics.total_tokens + # For messages that only have output_tokens (like Anthropic) + if hasattr(metrics, 'output_tokens') and metrics.output_tokens > 0: + total_output_tokens += metrics.output_tokens + if hasattr(metrics, 'input_tokens') and metrics.input_tokens > 0: + total_input_tokens += metrics.input_tokens + if hasattr(metrics, 'time') and metrics.time: + total_time += metrics.time + + # === TOKEN METRICS FROM AGENT SESSION METRICS === + if hasattr(agent, 'session_metrics') and agent.session_metrics: + session_metrics = agent.session_metrics + + # Try to get model name from session metrics if not already set + if SpanAttributes.LLM_REQUEST_MODEL not in attributes: + if hasattr(session_metrics, 'model') and session_metrics.model: + model_id = str(session_metrics.model) + attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_id + attributes[SpanAttributes.LLM_RESPONSE_MODEL] = model_id + + # Use session metrics for more accurate token counts + session_prompt_tokens = getattr(session_metrics, 'prompt_tokens', 0) + session_completion_tokens = getattr(session_metrics, 'completion_tokens', 0) + session_output_tokens = getattr(session_metrics, 'output_tokens', 0) + session_input_tokens = getattr(session_metrics, 'input_tokens', 0) + session_total_tokens = getattr(session_metrics, 'total_tokens', 0) + + # For Anthropic, output_tokens represents completion tokens + if session_output_tokens > 0 and session_completion_tokens == 0: + session_completion_tokens = session_output_tokens + + # For some providers, input_tokens represents prompt tokens + if session_input_tokens > 0 and session_prompt_tokens == 0: + session_prompt_tokens = session_input_tokens + + # Only override if session metrics provide better data + if session_total_tokens > 0: + usage_attrs["total_tokens"] = session_total_tokens + + # Set breakdown if available + if session_prompt_tokens > 0: + usage_attrs["prompt_tokens"] = session_prompt_tokens + if session_completion_tokens > 0: + usage_attrs["completion_tokens"] = session_completion_tokens + + # Additional token types from session metrics + if hasattr(session_metrics, 'cached_tokens') and session_metrics.cached_tokens > 0: + usage_attrs["cache_read_input_tokens"] = session_metrics.cached_tokens + if hasattr(session_metrics, 'reasoning_tokens') and session_metrics.reasoning_tokens > 0: + usage_attrs["reasoning_tokens"] = session_metrics.reasoning_tokens + + # Success/fail token metrics + if session_total_tokens > 0: + usage_attrs["success_tokens"] = session_total_tokens + usage_attrs["fail_tokens"] = 0 + usage_attrs["indeterminate_tokens"] = 0 + + # === FALLBACK TO MESSAGE AGGREGATION IF SESSION METRICS ARE EMPTY === + # If session metrics don't have token info, use message aggregation + if usage_attrs["total_tokens"] == 0: + # Set aggregated token usage from messages + if total_prompt_tokens > 0 or total_input_tokens > 0: + usage_attrs["prompt_tokens"] = total_prompt_tokens or total_input_tokens + if total_completion_tokens > 0 or total_output_tokens > 0: + usage_attrs["completion_tokens"] = total_completion_tokens or total_output_tokens + if total_tokens > 0: + usage_attrs["total_tokens"] = total_tokens + + # Handle case where we have total but no breakdown (common with Anthropic) + if usage_attrs["prompt_tokens"] == 0 and usage_attrs["completion_tokens"] == 0: + # If we only have completion tokens from output_tokens, assume all are completion + if total_output_tokens > 0: + usage_attrs["completion_tokens"] = total_output_tokens + usage_attrs["prompt_tokens"] = max(0, total_tokens - total_output_tokens) + # Otherwise try to split reasonably + elif total_tokens > 0: + # For pure generation, most tokens are usually completion + estimated_completion = int(total_tokens * 0.7) # Rough estimate + estimated_prompt = total_tokens - estimated_completion + usage_attrs["completion_tokens"] = estimated_completion + usage_attrs["prompt_tokens"] = estimated_prompt + + # Success/fail tokens from message aggregation + if total_tokens > 0: + usage_attrs["success_tokens"] = total_tokens + usage_attrs["fail_tokens"] = 0 + usage_attrs["indeterminate_tokens"] = 0 + + # Extract user message info if available + if hasattr(run_messages, 'user_message') and run_messages.user_message: + user_msg = run_messages.user_message + if hasattr(user_msg, 'content'): + content = str(user_msg.content) + if len(content) > 1000: + content = content[:997] + "..." + attributes['agno.metrics.user_input'] = content + + # Set individual LLM usage attributes that AgentOps expects + attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage_attrs["prompt_tokens"] + attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage_attrs["completion_tokens"] + attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage_attrs["total_tokens"] + attributes[SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS] = usage_attrs["cache_read_input_tokens"] + attributes[SpanAttributes.LLM_USAGE_REASONING_TOKENS] = usage_attrs["reasoning_tokens"] + + # Also keep the nested format and individual gen_ai.usage.* attributes for compatibility + attributes["gen_ai.usage"] = usage_attrs + for key, value in usage_attrs.items(): + attributes[f"gen_ai.usage.{key}"] = value + + return attributes \ No newline at end of file diff --git a/agentops/instrumentation/agno/attributes/model.py b/agentops/instrumentation/agno/attributes/model.py new file mode 100644 index 000000000..ec7a59abe --- /dev/null +++ b/agentops/instrumentation/agno/attributes/model.py @@ -0,0 +1,371 @@ +"""Agno Model response attributes handler.""" + +from typing import Optional, Tuple, Dict, Any + +from agentops.instrumentation.common.attributes import AttributeMap +from agentops.semconv import SpanAttributes +from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind + + +def get_model_response_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> AttributeMap: + """Extract span attributes for Model.response method calls. + + Args: + args: Positional arguments passed to the Model.response method + kwargs: Keyword arguments passed to the Model.response method + return_value: The return value from the Model.response method + + Returns: + A dictionary of span attributes to be set on the LLM span + """ + attributes: AttributeMap = {} + + # Base attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.LLM_CALL + attributes[SpanAttributes.LLM_SYSTEM] = "agno" + + print(f"[DEBUG] get_model_response_attributes called") + + # Process input arguments + if kwargs: + # Extract messages from kwargs + messages = kwargs.get('messages') + if messages: + for i, msg in enumerate(messages): + if isinstance(msg, dict): + if 'role' in msg: + attributes[f'gen_ai.prompt.{i}.role'] = msg['role'] + if 'content' in msg: + content = msg['content'] + if len(str(content)) > 1000: + content = str(content)[:997] + "..." + attributes[f'gen_ai.prompt.{i}.content'] = str(content) + elif hasattr(msg, 'role') and hasattr(msg, 'content'): + attributes[f'gen_ai.prompt.{i}.role'] = msg.role + content = msg.content + if len(str(content)) > 1000: + content = str(content)[:997] + "..." + attributes[f'gen_ai.prompt.{i}.content'] = str(content) + + # Extract response format + if kwargs.get('response_format'): + attributes['agno.model.response_format'] = str(kwargs['response_format']) + + # Extract tools information + tools = kwargs.get('tools') + if tools: + attributes['agno.model.tools_count'] = str(len(tools)) + for i, tool in enumerate(tools): + if hasattr(tool, 'name'): + attributes[f'agno.model.tools.{i}.name'] = tool.name + if hasattr(tool, 'description'): + description = tool.description + if len(str(description)) > 200: + description = str(description)[:197] + "..." + attributes[f'agno.model.tools.{i}.description'] = str(description) + + # Extract functions information + functions = kwargs.get('functions') + if functions: + attributes['agno.model.functions_count'] = str(len(functions)) + for i, func in enumerate(functions): + if hasattr(func, 'name'): + attributes[f'agno.model.functions.{i}.name'] = func.name + + # Extract tool choice + if kwargs.get('tool_choice'): + attributes['agno.model.tool_choice'] = str(kwargs['tool_choice']) + + # Extract tool call limit + if kwargs.get('tool_call_limit'): + attributes['agno.model.tool_call_limit'] = str(kwargs['tool_call_limit']) + + # Process positional arguments (first arg is typically messages) + if args and args[0] and not kwargs.get('messages'): + messages = args[0] + if isinstance(messages, list): + for i, msg in enumerate(messages): + if isinstance(msg, dict): + if 'role' in msg: + attributes[f'gen_ai.prompt.{i}.role'] = msg['role'] + if 'content' in msg: + content = msg['content'] + if len(str(content)) > 1000: + content = str(content)[:997] + "..." + attributes[f'gen_ai.prompt.{i}.content'] = str(content) + + # Process return value + if return_value: + # Set completion content + if hasattr(return_value, 'content'): + content = return_value.content + if len(str(content)) > 1000: + content = str(content)[:997] + "..." + attributes['gen_ai.completion.0.content'] = str(content) + attributes['gen_ai.completion.0.role'] = 'assistant' + + # Set usage metrics - Enhanced to capture all token types + if hasattr(return_value, 'usage'): + usage = return_value.usage + if hasattr(usage, 'prompt_tokens'): + attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage.prompt_tokens + if hasattr(usage, 'completion_tokens'): + attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage.completion_tokens + if hasattr(usage, 'total_tokens'): + attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage.total_tokens + if hasattr(usage, 'reasoning_tokens'): + attributes[SpanAttributes.LLM_USAGE_REASONING_TOKENS] = usage.reasoning_tokens + if hasattr(usage, 'cached_tokens'): + attributes[SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS] = usage.cached_tokens + if hasattr(usage, 'cache_creation_input_tokens'): + attributes[SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS] = usage.cache_creation_input_tokens + + # Set response usage if available + if hasattr(return_value, 'response_usage') and return_value.response_usage: + response_usage = return_value.response_usage + if hasattr(response_usage, 'prompt_tokens'): + attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = response_usage.prompt_tokens + if hasattr(response_usage, 'completion_tokens'): + attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = response_usage.completion_tokens + if hasattr(response_usage, 'total_tokens'): + attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = response_usage.total_tokens + + # Set finish reason + if hasattr(return_value, 'finish_reason'): + attributes[SpanAttributes.LLM_RESPONSE_FINISH_REASON] = return_value.finish_reason + + # Set response ID + if hasattr(return_value, 'id'): + attributes[SpanAttributes.LLM_RESPONSE_ID] = str(return_value.id) + + # Set tool calls if present + if hasattr(return_value, 'tool_calls') and return_value.tool_calls: + for i, tool_call in enumerate(return_value.tool_calls): + if hasattr(tool_call, 'function'): + function = tool_call.function + if hasattr(function, 'name'): + attributes[f'agno.model.response.tool_calls.{i}.name'] = function.name + if hasattr(function, 'arguments'): + args_str = str(function.arguments) + if len(args_str) > 500: + args_str = args_str[:497] + "..." + attributes[f'agno.model.response.tool_calls.{i}.arguments'] = args_str + + # Set raw response for debugging + if hasattr(return_value, 'raw'): + raw_response = str(return_value.raw) + if len(raw_response) > 2000: + raw_response = raw_response[:1997] + "..." + attributes['agno.model.raw_response'] = raw_response + + return attributes + + +def get_session_metrics_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> AttributeMap: + """Extract span attributes from Agent._set_session_metrics method calls. + + This captures comprehensive session metrics AND model request/response data. + + Args: + args: Positional arguments passed to the _set_session_metrics method + kwargs: Keyword arguments passed to the _set_session_metrics method + return_value: The return value from the _set_session_metrics method + + Returns: + A dictionary of span attributes to be set on the metrics span + """ + attributes: AttributeMap = {} + + # Base attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.AGENT + attributes[SpanAttributes.LLM_SYSTEM] = "agno" + + print(f"[DEBUG] get_session_metrics_attributes called") + print(f"[DEBUG] args: {args}") + print(f"[DEBUG] kwargs: {kwargs}") + print(f"[DEBUG] return_value: {return_value}") + + # The agent instance is the wrapped instance, args[0] is RunMessages + # We need to access the agent through the call stack or extract data from RunMessages + if args and len(args) > 0: + run_messages = args[0] + print(f"[DEBUG] run_messages type: {type(run_messages)}") + + # === EXTRACT DATA FROM RUNMESSAGES === + if hasattr(run_messages, 'messages') and run_messages.messages: + messages = run_messages.messages + print(f"[DEBUG] Found {len(messages)} messages") + + total_prompt_tokens = 0 + total_completion_tokens = 0 + total_tokens = 0 + total_time = 0 + + prompt_count = 0 + completion_count = 0 + + # Process each message + for i, msg in enumerate(messages): + print(f"[DEBUG] Message {i}: role={getattr(msg, 'role', 'unknown')}") + + # Extract message content for prompts/completions + if hasattr(msg, 'role') and hasattr(msg, 'content'): + # Only set content if it's not None/empty + if msg.content is not None and str(msg.content).strip() != "" and str(msg.content) != "None": + content = str(msg.content) + if len(content) > 1000: + content = content[:997] + "..." + + if msg.role == 'user': + attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'user' + attributes[f'gen_ai.prompt.{prompt_count}.content'] = content + prompt_count += 1 + elif msg.role == 'assistant': + attributes[f'gen_ai.completion.{completion_count}.role'] = 'assistant' + attributes[f'gen_ai.completion.{completion_count}.content'] = content + completion_count += 1 + else: + # For messages with None content, still set the role but skip content + if msg.role == 'user': + attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'user' + prompt_count += 1 + elif msg.role == 'assistant': + attributes[f'gen_ai.completion.{completion_count}.role'] = 'assistant' + completion_count += 1 + + # Extract token metrics from message + if hasattr(msg, 'metrics') and msg.metrics: + metrics = msg.metrics + print(f"[DEBUG] Message {i} metrics: {metrics}") + + # Handle different token metric patterns + if hasattr(metrics, 'prompt_tokens') and metrics.prompt_tokens > 0: + total_prompt_tokens += metrics.prompt_tokens + if hasattr(metrics, 'completion_tokens') and metrics.completion_tokens > 0: + total_completion_tokens += metrics.completion_tokens + if hasattr(metrics, 'total_tokens') and metrics.total_tokens > 0: + total_tokens += metrics.total_tokens + # For messages that only have output_tokens (like Anthropic) + if hasattr(metrics, 'output_tokens') and metrics.output_tokens > 0: + total_completion_tokens += metrics.output_tokens + if hasattr(metrics, 'time') and metrics.time: + total_time += metrics.time + + # Set aggregated token usage + if total_prompt_tokens > 0: + attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = total_prompt_tokens + attributes['agno.metrics.prompt_tokens'] = total_prompt_tokens + if total_completion_tokens > 0: + attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = total_completion_tokens + attributes['agno.metrics.completion_tokens'] = total_completion_tokens + if total_tokens > 0: + attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = total_tokens + + # Handle case where we have total but no breakdown (common with Anthropic) + if total_prompt_tokens == 0 and total_completion_tokens == 0: + # We'll try to get the breakdown from session_metrics later + print(f"[DEBUG] Total tokens ({total_tokens}) available but no breakdown - will try session_metrics") + elif total_prompt_tokens > 0 or total_completion_tokens > 0: + # Ensure totals are consistent + calculated_total = total_prompt_tokens + total_completion_tokens + if calculated_total != total_tokens: + print(f"[DEBUG] Token mismatch: calculated={calculated_total}, reported={total_tokens}") + # Use the more reliable total + attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = max(total_tokens, calculated_total) + + if total_time > 0: + attributes['agno.metrics.total_time'] = total_time + + print(f"[DEBUG] Aggregated tokens: prompt={total_prompt_tokens}, completion={total_completion_tokens}, total={total_tokens}, time={total_time}") + + # Extract user message info + if hasattr(run_messages, 'user_message') and run_messages.user_message: + user_msg = run_messages.user_message + if hasattr(user_msg, 'content'): + content = str(user_msg.content) + if len(content) > 1000: + content = content[:997] + "..." + attributes['agno.metrics.user_input'] = content + + # Try to get agent instance from the call stack for additional data + import inspect + try: + for frame in inspect.stack(): + frame_locals = frame.frame.f_locals + # Look for agent instance in the call stack + for var_name, var_value in frame_locals.items(): + if (hasattr(var_value, 'session_metrics') and + hasattr(var_value, 'run') and + var_name in ['self', 'agent', 'instance']): + agent_instance = var_value + print(f"[DEBUG] Found agent instance in call stack: {type(agent_instance)}") + + # === MODEL INFO FROM AGENT === + if hasattr(agent_instance, 'model') and agent_instance.model: + model = agent_instance.model + if hasattr(model, 'id'): + attributes[SpanAttributes.LLM_REQUEST_MODEL] = str(model.id) + attributes[SpanAttributes.LLM_RESPONSE_MODEL] = str(model.id) + if hasattr(model, 'provider'): + attributes['agno.model.provider'] = str(model.provider) + + # === TOOLS INFO FROM AGENT === + if hasattr(agent_instance, 'tools') and agent_instance.tools: + tools = agent_instance.tools + attributes['agno.model.tools_count'] = str(len(tools)) + for i, tool in enumerate(tools): + if hasattr(tool, 'name'): + attributes[f'agno.model.tools.{i}.name'] = tool.name + + # === SESSION METRICS FROM AGENT (if available) === + if hasattr(agent_instance, 'session_metrics') and agent_instance.session_metrics: + session_metrics = agent_instance.session_metrics + print(f"[DEBUG] Found session_metrics on agent: {session_metrics}") + + # Use session metrics for more accurate token counts + session_prompt_tokens = getattr(session_metrics, 'prompt_tokens', 0) + session_completion_tokens = getattr(session_metrics, 'completion_tokens', 0) + session_output_tokens = getattr(session_metrics, 'output_tokens', 0) + session_total_tokens = getattr(session_metrics, 'total_tokens', 0) + + # For Anthropic, output_tokens represents completion tokens + if session_output_tokens > 0 and session_completion_tokens == 0: + session_completion_tokens = session_output_tokens + + # Only override if session metrics provide better breakdown + if session_total_tokens > 0: + attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = session_total_tokens + + # Set breakdown if available + if session_prompt_tokens > 0: + attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = session_prompt_tokens + attributes['agno.metrics.prompt_tokens'] = session_prompt_tokens + if session_completion_tokens > 0: + attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = session_completion_tokens + attributes['agno.metrics.completion_tokens'] = session_completion_tokens + + # If we have total but still no breakdown, estimate it + if session_prompt_tokens == 0 and session_completion_tokens > 0: + # All tokens are completion tokens (common for generative responses) + print(f"[DEBUG] Using all {session_total_tokens} tokens as completion tokens") + elif session_prompt_tokens > 0 and session_completion_tokens == 0: + # All tokens are prompt tokens (rare case) + print(f"[DEBUG] Using all {session_total_tokens} tokens as prompt tokens") + + if hasattr(session_metrics, 'time') and session_metrics.time: + attributes['agno.metrics.total_time'] = session_metrics.time + + break + except Exception as e: + print(f"[DEBUG] Error accessing call stack: {e}") + + print(f"[DEBUG] Final attributes keys: {list(attributes.keys())}") + return attributes \ No newline at end of file diff --git a/agentops/instrumentation/agno/attributes/team.py b/agentops/instrumentation/agno/attributes/team.py new file mode 100644 index 000000000..e327e633a --- /dev/null +++ b/agentops/instrumentation/agno/attributes/team.py @@ -0,0 +1,235 @@ +"""Agno Team run attributes handler.""" + +from typing import Optional, Tuple, Dict, Any + +from agentops.instrumentation.common.attributes import AttributeMap +from agentops.semconv import SpanAttributes, WorkflowAttributes +from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind + + +def get_team_run_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> AttributeMap: + """Extract span attributes for Team._run method calls. + + Args: + args: Positional arguments passed to the Team._run method + kwargs: Keyword arguments passed to the Team._run method + return_value: The return value from the Team._run method + + Returns: + A dictionary of span attributes to be set on the workflow span + """ + attributes: AttributeMap = {} + + # Base attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.WORKFLOW + attributes[SpanAttributes.LLM_SYSTEM] = "agno" + attributes[WorkflowAttributes.WORKFLOW_TYPE] = "team_run" + + # Process input arguments from the run_messages parameter + if args and len(args) >= 2: + # args[0] is run_response, args[1] is run_messages + run_messages = args[1] + if hasattr(run_messages, 'messages') and run_messages.messages: + # Get the user message for workflow input + user_messages = [msg for msg in run_messages.messages if hasattr(msg, 'role') and msg.role == 'user'] + if user_messages: + last_user_msg = user_messages[-1] + if hasattr(last_user_msg, 'content'): + attributes[WorkflowAttributes.WORKFLOW_INPUT] = str(last_user_msg.content) + attributes[WorkflowAttributes.WORKFLOW_INPUT_TYPE] = "message" + + # Count total messages + attributes['agno.team.messages_count'] = str(len(run_messages.messages)) + + # Process keyword arguments + if kwargs: + if kwargs.get('user_id'): + attributes[SpanAttributes.LLM_USER] = kwargs['user_id'] + + if kwargs.get('session_id'): + attributes['agno.team.session_id'] = kwargs['session_id'] + + if kwargs.get('response_format'): + attributes['agno.team.response_format'] = str(type(kwargs['response_format']).__name__) + + # Process return value (TeamRunResponse) + if return_value: + if hasattr(return_value, 'content'): + content = str(return_value.content) + # Truncate if too long + if len(content) > 1000: + content = content[:997] + "..." + attributes[WorkflowAttributes.WORKFLOW_OUTPUT] = content + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_TYPE] = "team_run_response" + else: + output = str(return_value) + if len(output) > 1000: + output = output[:997] + "..." + attributes[WorkflowAttributes.WORKFLOW_OUTPUT] = output + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_TYPE] = type(return_value).__name__ + + # Set additional team response attributes + if hasattr(return_value, 'run_id'): + attributes['agno.team.run_id'] = str(return_value.run_id) + + if hasattr(return_value, 'session_id'): + attributes['agno.team.response_session_id'] = str(return_value.session_id) + + if hasattr(return_value, 'team_id'): + attributes['agno.team.response_team_id'] = str(return_value.team_id) + + if hasattr(return_value, 'model'): + attributes[SpanAttributes.LLM_RESPONSE_MODEL] = str(return_value.model) + + if hasattr(return_value, 'model_provider'): + attributes['agno.team.model_provider'] = str(return_value.model_provider) + + if hasattr(return_value, 'event'): + attributes['agno.team.event'] = str(return_value.event) + + # Team-specific attributes + if hasattr(return_value, 'content_type'): + attributes['agno.team.response_content_type'] = str(return_value.content_type) + + return attributes + + +def get_team_public_run_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> AttributeMap: + """Extract span attributes for Team.run method calls (public API). + + Args: + args: Positional arguments passed to the Team.run method (self, message, ...) + kwargs: Keyword arguments passed to the Team.run method + return_value: The return value from the Team.run method + + Returns: + A dictionary of span attributes to be set on the workflow span + """ + attributes: AttributeMap = {} + + # Base attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.WORKFLOW + attributes[SpanAttributes.LLM_SYSTEM] = "agno" + attributes[WorkflowAttributes.WORKFLOW_TYPE] = "team_run" + + # Process input arguments from Team.run() method + if args and len(args) >= 2: + # args[0] is self (Team instance), args[1] is message + team_instance = args[0] + message = args[1] + + # Extract team information + if hasattr(team_instance, 'name') and team_instance.name: + attributes['agno.team.name'] = str(team_instance.name) + + if hasattr(team_instance, 'team_id') and team_instance.team_id: + attributes['agno.team.team_id'] = str(team_instance.team_id) + + if hasattr(team_instance, 'mode') and team_instance.mode: + attributes['agno.team.mode'] = str(team_instance.mode) + + if hasattr(team_instance, 'members') and team_instance.members: + attributes['agno.team.members_count'] = str(len(team_instance.members)) + + # Extract workflow input from message + if message is not None: + if isinstance(message, str): + message_content = message + elif hasattr(message, 'content'): + message_content = str(message.content) + elif hasattr(message, 'get_content_string'): + message_content = message.get_content_string() + else: + message_content = str(message) + + # Truncate if too long + if len(message_content) > 1000: + message_content = message_content[:997] + "..." + attributes[WorkflowAttributes.WORKFLOW_INPUT] = message_content + attributes[WorkflowAttributes.WORKFLOW_INPUT_TYPE] = "message" + + # Process keyword arguments + if kwargs: + if kwargs.get('user_id'): + attributes[SpanAttributes.LLM_USER] = kwargs['user_id'] + + if kwargs.get('session_id'): + attributes['agno.team.session_id'] = kwargs['session_id'] + + if kwargs.get('stream'): + attributes['agno.team.streaming'] = str(kwargs['stream']) + + if kwargs.get('stream_intermediate_steps'): + attributes['agno.team.stream_intermediate_steps'] = str(kwargs['stream_intermediate_steps']) + + if kwargs.get('retries'): + attributes['agno.team.retries'] = str(kwargs['retries']) + + # Media attachments + if kwargs.get('audio'): + attributes['agno.team.has_audio'] = "true" + if kwargs.get('images'): + attributes['agno.team.has_images'] = "true" + if kwargs.get('videos'): + attributes['agno.team.has_videos'] = "true" + if kwargs.get('files'): + attributes['agno.team.has_files'] = "true" + + if kwargs.get('knowledge_filters'): + attributes['agno.team.has_knowledge_filters'] = "true" + + # Process return value (TeamRunResponse or Iterator) + if return_value: + # Handle both single response and iterator + if hasattr(return_value, '__iter__') and not isinstance(return_value, str): + # It's an iterator for streaming + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_TYPE] = "team_run_response_stream" + attributes['agno.team.is_streaming'] = "true" + elif hasattr(return_value, 'content'): + # It's a TeamRunResponse + content = str(return_value.content) + # Truncate if too long + if len(content) > 1000: + content = content[:997] + "..." + attributes[WorkflowAttributes.WORKFLOW_OUTPUT] = content + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_TYPE] = "team_run_response" + + # Set additional team response attributes + if hasattr(return_value, 'run_id'): + attributes['agno.team.run_id'] = str(return_value.run_id) + + if hasattr(return_value, 'session_id'): + attributes['agno.team.response_session_id'] = str(return_value.session_id) + + if hasattr(return_value, 'team_id'): + attributes['agno.team.response_team_id'] = str(return_value.team_id) + + if hasattr(return_value, 'model'): + attributes[SpanAttributes.LLM_RESPONSE_MODEL] = str(return_value.model) + + if hasattr(return_value, 'model_provider'): + attributes['agno.team.model_provider'] = str(return_value.model_provider) + + if hasattr(return_value, 'event'): + attributes['agno.team.event'] = str(return_value.event) + + # Team-specific attributes + if hasattr(return_value, 'content_type'): + attributes['agno.team.response_content_type'] = str(return_value.content_type) + else: + # Unknown return type + output = str(return_value) + if len(output) > 1000: + output = output[:997] + "..." + attributes[WorkflowAttributes.WORKFLOW_OUTPUT] = output + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_TYPE] = type(return_value).__name__ + + return attributes \ No newline at end of file diff --git a/agentops/instrumentation/agno/attributes/tool.py b/agentops/instrumentation/agno/attributes/tool.py new file mode 100644 index 000000000..3a9557aed --- /dev/null +++ b/agentops/instrumentation/agno/attributes/tool.py @@ -0,0 +1,649 @@ +"""Agno tool execution attributes handler.""" + +import json +from typing import Optional, Tuple, Dict, Any + +from agentops.logging import logger +from agentops.instrumentation.common.attributes import AttributeMap +from agentops.semconv import SpanAttributes +from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind + + +def get_tool_decorator_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> AttributeMap: + """Extract span attributes for tool decorator calls. + + The @tool decorator has multiple calling patterns: + 1. @tool - direct decoration, args[0] is the function + 2. @tool() - parameterless call, return_value is a decorator function + 3. @tool(name="...") - parameterized call, return_value is a decorator function + + Args: + args: Positional arguments passed to the tool decorator + kwargs: Keyword arguments passed to the tool decorator + return_value: The return value from the tool decorator + + Returns: + A dictionary of span attributes to be set on the tool span + """ + attributes: AttributeMap = {} + + # Base attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.TOOL + attributes[SpanAttributes.LLM_SYSTEM] = "agno" + attributes["agno.tool.operation"] = "create" + + # Determine the calling pattern + direct_decoration = ( + args and len(args) == 1 and callable(args[0]) and not kwargs + ) + + if direct_decoration: + # Pattern 1: @tool (direct decoration) + func = args[0] + attributes["agno.tool.call_pattern"] = "direct" + attributes["agno.tool.function_name"] = func.__name__ + + # Check if it's an async function + from inspect import iscoroutinefunction, isasyncgenfunction + if iscoroutinefunction(func): + attributes["agno.tool.function_type"] = "async" + elif isasyncgenfunction(func): + attributes["agno.tool.function_type"] = "async_generator" + else: + attributes["agno.tool.function_type"] = "sync" + + # Get docstring if available + if func.__doc__: + docstring = func.__doc__.strip() + if len(docstring) > 200: + docstring = docstring[:197] + "..." + attributes["agno.tool.function_docstring"] = docstring + + # Since it's direct decoration, return_value should be a Function + if return_value and hasattr(return_value, 'name'): + attributes["agno.tool.created_name"] = str(return_value.name) + + else: + # Pattern 2 & 3: @tool() or @tool(name="...") - parameterized decoration + attributes["agno.tool.call_pattern"] = "parameterized" + + # Process decorator arguments from kwargs + if kwargs: + if kwargs.get('name'): + attributes["agno.tool.config_name"] = kwargs['name'] + + if kwargs.get('description'): + attributes["agno.tool.config_description"] = kwargs['description'] + + if kwargs.get('instructions'): + attributes["agno.tool.config_instructions"] = kwargs['instructions'] + + if 'strict' in kwargs and kwargs['strict'] is not None: + attributes["agno.tool.config_strict"] = str(kwargs['strict']) + + if 'show_result' in kwargs and kwargs['show_result'] is not None: + attributes["agno.tool.config_show_result"] = str(kwargs['show_result']) + + if 'stop_after_tool_call' in kwargs and kwargs['stop_after_tool_call'] is not None: + attributes["agno.tool.config_stop_after_tool_call"] = str(kwargs['stop_after_tool_call']) + + if 'requires_confirmation' in kwargs and kwargs['requires_confirmation'] is not None: + attributes["agno.tool.config_requires_confirmation"] = str(kwargs['requires_confirmation']) + + if 'requires_user_input' in kwargs and kwargs['requires_user_input'] is not None: + attributes["agno.tool.config_requires_user_input"] = str(kwargs['requires_user_input']) + + if 'external_execution' in kwargs and kwargs['external_execution'] is not None: + attributes["agno.tool.config_external_execution"] = str(kwargs['external_execution']) + + if kwargs.get('user_input_fields'): + attributes["agno.tool.config_user_input_fields_count"] = str(len(kwargs['user_input_fields'])) + + if 'cache_results' in kwargs and kwargs['cache_results'] is not None: + attributes["agno.tool.config_cache_results"] = str(kwargs['cache_results']) + + if kwargs.get('cache_dir'): + attributes["agno.tool.config_cache_dir"] = kwargs['cache_dir'] + + if 'cache_ttl' in kwargs and kwargs['cache_ttl'] is not None: + attributes["agno.tool.config_cache_ttl"] = str(kwargs['cache_ttl']) + + # For parameterized calls, return_value is a decorator function + if return_value and callable(return_value): + attributes["agno.tool.returns_decorator"] = "true" + + return attributes + + +def get_tool_execution_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> AttributeMap: + """Extract span attributes for tool execution calls (FunctionCall.execute/aexecute). + + Args: + args: Positional arguments passed to the execute method (self) + kwargs: Keyword arguments passed to the execute method + return_value: The return value from the execute method (FunctionExecutionResult) + + Returns: + A dictionary of span attributes to be set on the tool execution span + """ + attributes: AttributeMap = {} + + # Base attributes - Use "tool.usage" to match yellow color coding in frontend + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = "tool.usage" + attributes[SpanAttributes.LLM_SYSTEM] = "agno" + attributes["agno.tool.operation"] = "execute" + + # Standard AgentOps attributes for consistency with other tool spans + attributes["deployment.environment"] = "default_environment" + attributes["service.name"] = "default_application" + attributes["telemetry.sdk.name"] = "agentops" + + # Add execution context and debugging information + import time + import traceback + + attributes["agno.tool.execution_timestamp"] = str(int(time.time() * 1000)) + + # Try to get calling context for debugging + try: + stack = traceback.extract_stack() + # Look for relevant calling frames + calling_info = [] + for frame in stack[-10:]: # Last 10 frames + if any(keyword in frame.filename.lower() for keyword in ['agno', 'agent', 'team', 'tool']): + calling_info.append(f"{frame.filename.split('/')[-1]}:{frame.lineno}:{frame.name}") + + if calling_info: + attributes["agno.tool.call_stack"] = " -> ".join(calling_info[-3:]) # Last 3 relevant frames + except Exception as e: + attributes["agno.tool.call_stack_error"] = str(e) + + # Process the FunctionCall object (self in execute method) + if args and len(args) > 0: + function_call = args[0] + + # Add detailed function call information + attributes["agno.tool.function_call_type"] = str(type(function_call).__name__) + + # Extract tool information + if hasattr(function_call, 'function') and function_call.function: + function = function_call.function + tool_name = getattr(function, 'name', 'unknown_tool') + + # Set span attributes for the tool execution span + attributes["tool.name"] = tool_name + attributes["agno.tool.function_name"] = tool_name + + # Function details and context + if hasattr(function, 'description'): + description = getattr(function, 'description', '') + if description: + # Truncate long descriptions but keep them readable + if len(description) > 300: + description = description[:297] + "..." + attributes["tool.description"] = description + attributes["agno.tool.function_description"] = description + + # Function source information + if hasattr(function, 'entrypoint') and function.entrypoint: + entrypoint = function.entrypoint + if hasattr(entrypoint, '__module__'): + attributes["agno.tool.function_module"] = str(entrypoint.__module__) + if hasattr(entrypoint, '__name__'): + attributes["agno.tool.function_method"] = str(entrypoint.__name__) + if hasattr(entrypoint, '__qualname__'): + attributes["agno.tool.function_qualname"] = str(entrypoint.__qualname__) + + # Tool capabilities + if hasattr(function, 'requires_confirmation'): + attributes["agno.tool.requires_confirmation"] = str(function.requires_confirmation) + if hasattr(function, 'show_result'): + attributes["agno.tool.show_result"] = str(function.show_result) + if hasattr(function, 'stop_after_tool_call'): + attributes["agno.tool.stop_after_tool_call"] = str(function.stop_after_tool_call) + + # Extract tool arguments with better formatting + if hasattr(function_call, 'arguments') and function_call.arguments: + try: + if isinstance(function_call.arguments, str): + args_dict = json.loads(function_call.arguments) + else: + args_dict = function_call.arguments + + # Format arguments nicely + formatted_args = [] + for key, value in args_dict.items(): + value_str = str(value) + if len(value_str) > 100: + value_str = value_str[:97] + "..." + formatted_args.append(f"{key}={value_str}") + + attributes["tool.parameters"] = json.dumps(args_dict) + attributes["agno.tool.formatted_args"] = ", ".join(formatted_args) + attributes["agno.tool.args_count"] = str(len(args_dict)) + except Exception as e: + attributes["tool.parameters"] = str(function_call.arguments) + attributes["agno.tool.args_parse_error"] = str(e) + + # Extract call ID and metadata + if hasattr(function_call, 'tool_call_id'): + attributes["agno.tool.call_id"] = str(function_call.tool_call_id) + + # Check for any agent context + if hasattr(function_call, '_agent') and function_call._agent: + agent = function_call._agent + if hasattr(agent, 'name'): + attributes["agno.tool.calling_agent_name"] = str(agent.name) + if hasattr(agent, 'agent_id'): + attributes["agno.tool.calling_agent_id"] = str(agent.agent_id) + + # Process return value + if return_value is not None: + # Add timing information + import time + attributes["agno.tool.execution_timestamp"] = str(int(time.time() * 1000)) + + # Determine execution status and result information + if hasattr(return_value, 'value'): + # FunctionExecutionResult with value + result_value = return_value.value + attributes["agno.tool.execution_status"] = "success" + else: + # Direct return value + result_value = return_value + attributes["agno.tool.execution_status"] = "success" + + # Process result value + if result_value is not None: + result_type = type(result_value).__name__ + attributes["agno.tool.result_type"] = result_type + + # Handle FunctionExecutionResult objects specifically + if hasattr(result_value, 'status') and hasattr(result_value, 'result'): + # This looks like a FunctionExecutionResult + status = getattr(result_value, 'status', 'unknown') + actual_result = getattr(result_value, 'result', None) + error = getattr(result_value, 'error', None) + + attributes["agno.tool.execution_result_status"] = str(status) + attributes["tool.status"] = str(status) + + if error: + attributes["agno.tool.execution_error"] = str(error) + attributes["tool.error"] = str(error) + + if actual_result is not None: + actual_result_type = type(actual_result).__name__ + attributes["agno.tool.actual_result_type"] = actual_result_type + + # Enhanced generator handling + if hasattr(actual_result, '__iter__') and hasattr(actual_result, '__next__'): + attributes["agno.tool.result_is_generator"] = "true" + + # Try to get more meaningful information about the generator + generator_info = [] + + # Get function name from the generator + if hasattr(actual_result, 'gi_code'): + func_name = actual_result.gi_code.co_name + attributes["agno.tool.generator_function"] = func_name + generator_info.append(f"function={func_name}") + + # Get local variables from generator frame for context + if hasattr(actual_result, 'gi_frame') and actual_result.gi_frame: + try: + locals_dict = actual_result.gi_frame.f_locals + # Look for interesting variables that give context + context_vars = ['task_description', 'expected_output', 'member_agent', 'agent_name', 'team', 'message'] + for var_name in context_vars: + if var_name in locals_dict: + value = str(locals_dict[var_name]) + if len(value) > 100: + value = value[:97] + "..." + generator_info.append(f"{var_name}={value}") + attributes[f"agno.tool.generator_{var_name}"] = value + + # Count total local variables for debugging + attributes["agno.tool.generator_locals_count"] = str(len(locals_dict)) + except Exception as e: + attributes["agno.tool.generator_locals_error"] = str(e) + + # Try to identify what type of transfer this is + generator_str = str(actual_result) + if 'transfer_task_to_member' in generator_str: + attributes["agno.tool.transfer_type"] = "task_to_member" + elif 'transfer' in generator_str.lower(): + attributes["agno.tool.transfer_type"] = "general_transfer" + + if generator_info: + result_str = f"Generator<{actual_result_type}>({', '.join(generator_info)})" + else: + result_str = f"Generator<{actual_result_type}> - {str(actual_result)}" + else: + # Regular result - safe to convert to string + result_str = str(actual_result) + if len(result_str) > 500: + result_str = result_str[:497] + "..." + else: + result_str = f"FunctionExecutionResult(status={status}, result=None)" + else: + # Not a FunctionExecutionResult, handle as direct result + if hasattr(result_value, '__iter__') and hasattr(result_value, '__next__'): + # It's a generator + attributes["agno.tool.result_is_generator"] = "true" + + if hasattr(result_value, 'gi_code'): + func_name = result_value.gi_code.co_name + attributes["agno.tool.generator_function"] = func_name + result_str = f"Generator<{result_type}> function={func_name} - {str(result_value)}" + else: + result_str = f"Generator<{result_type}> - {str(result_value)}" + else: + # Regular result + result_str = str(result_value) + if len(result_str) > 500: + result_str = result_str[:497] + "..." + else: + result_str = "None" + + # Set the main result attribute + attributes["tool.result"] = result_str + + # Add additional analysis attributes + attributes["agno.tool.result_length"] = str(len(result_str)) + + # Provide a preview for long results + if len(result_str) > 100: + preview = result_str[:97] + "..." + attributes["agno.tool.result_preview"] = preview + else: + attributes["agno.tool.result_preview"] = result_str + + # Set final execution status + if not attributes.get("tool.status"): + attributes["tool.status"] = "success" + + # Add execution summary for debugging + tool_name = attributes.get("tool.name", "unknown") + call_type = attributes.get("agno.tool.transfer_type", "unknown") + attributes["agno.tool.execution_summary"] = f"Tool '{tool_name}' executed with type '{call_type}'" + + return attributes + + +def get_function_constructor_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> AttributeMap: + """Extract span attributes for Function constructor calls. + + This captures when Function objects are created (which happens for all @tool decorators). + + Args: + args: Positional arguments passed to Function.__init__ (self, ...) + kwargs: Keyword arguments passed to Function.__init__ + return_value: The return value from Function.__init__ (None) + + Returns: + A dictionary of span attributes to be set on the function creation span + """ + attributes: AttributeMap = {} + + # Base attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.TOOL + attributes[SpanAttributes.LLM_SYSTEM] = "agno" + attributes["agno.tool.operation"] = "function_create" + + # Try to find active agent span to establish proper hierarchy + try: + from opentelemetry import trace + current_span = trace.get_current_span() + if current_span and current_span.is_recording(): + # Check if there's an agent-related span in the context + span_context = current_span.get_span_context() + if span_context and span_context.is_valid: + attributes["agno.tool.created_during_agent_run"] = "true" + else: + attributes["agno.tool.created_during_agent_run"] = "false" + else: + attributes["agno.tool.created_during_agent_run"] = "false" + except Exception: + attributes["agno.tool.created_during_agent_run"] = "unknown" + + # Extract Function constructor arguments + if kwargs: + if kwargs.get('name'): + attributes["agno.function.name"] = kwargs['name'] + + if kwargs.get('description'): + description = kwargs['description'] + if len(description) > 200: + description = description[:197] + "..." + attributes["agno.function.description"] = description + + if kwargs.get('instructions'): + instructions = kwargs['instructions'] + if len(instructions) > 200: + instructions = instructions[:197] + "..." + attributes["agno.function.instructions"] = instructions + + if 'strict' in kwargs and kwargs['strict'] is not None: + attributes["agno.function.strict"] = str(kwargs['strict']) + + if 'show_result' in kwargs and kwargs['show_result'] is not None: + attributes["agno.function.show_result"] = str(kwargs['show_result']) + + if 'stop_after_tool_call' in kwargs and kwargs['stop_after_tool_call'] is not None: + attributes["agno.function.stop_after_tool_call"] = str(kwargs['stop_after_tool_call']) + + if 'requires_confirmation' in kwargs and kwargs['requires_confirmation'] is not None: + attributes["agno.function.requires_confirmation"] = str(kwargs['requires_confirmation']) + + if 'requires_user_input' in kwargs and kwargs['requires_user_input'] is not None: + attributes["agno.function.requires_user_input"] = str(kwargs['requires_user_input']) + + if 'external_execution' in kwargs and kwargs['external_execution'] is not None: + attributes["agno.function.external_execution"] = str(kwargs['external_execution']) + + if kwargs.get('user_input_fields'): + attributes["agno.function.user_input_fields_count"] = str(len(kwargs['user_input_fields'])) + + if 'cache_results' in kwargs and kwargs['cache_results'] is not None: + attributes["agno.function.cache_results"] = str(kwargs['cache_results']) + + if kwargs.get('cache_dir'): + attributes["agno.function.cache_dir"] = kwargs['cache_dir'] + + if 'cache_ttl' in kwargs and kwargs['cache_ttl'] is not None: + attributes["agno.function.cache_ttl"] = str(kwargs['cache_ttl']) + + # Check the entrypoint function if available + if kwargs.get('entrypoint') and callable(kwargs['entrypoint']): + func = kwargs['entrypoint'] + + # Check if it's an async function + from inspect import iscoroutinefunction, isasyncgenfunction + if iscoroutinefunction(func): + attributes["agno.function.entrypoint_type"] = "async" + elif isasyncgenfunction(func): + attributes["agno.function.entrypoint_type"] = "async_generator" + else: + attributes["agno.function.entrypoint_type"] = "sync" + + # Get function name from entrypoint + if hasattr(func, '__name__'): + attributes["agno.function.entrypoint_name"] = func.__name__ + + return attributes + + +def get_tool_preparation_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> AttributeMap: + """Extract span attributes for agent tool preparation. + + This captures when an agent processes and registers tools during determine_tools_for_model. + + Args: + args: Positional arguments passed to determine_tools_for_model (self, model, session_id, ...) + kwargs: Keyword arguments passed to determine_tools_for_model + return_value: The return value from determine_tools_for_model (None) + + Returns: + A dictionary of span attributes to be set on the tool preparation span + """ + attributes: AttributeMap = {} + + # Base attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.WORKFLOW + attributes[SpanAttributes.LLM_SYSTEM] = "agno" + attributes["agno.agent.operation"] = "prepare_tools" + + # Extract agent information from args[0] (self) + if args and len(args) >= 1: + agent = args[0] + + if hasattr(agent, 'name') and agent.name: + attributes["agno.agent.name"] = agent.name + + if hasattr(agent, 'tools') and agent.tools: + tools_count = len(agent.tools) + attributes["agno.agent.tools_count"] = str(tools_count) + + # Capture tool names if available + tool_names = [] + for tool in agent.tools: + if hasattr(tool, 'name'): + tool_names.append(tool.name) + elif hasattr(tool, '__name__'): + tool_names.append(tool.__name__) + elif callable(tool): + tool_names.append(getattr(tool, '__name__', 'unknown')) + + if tool_names: + # Limit to first 5 tools to avoid overly long attributes + limited_names = tool_names[:5] + if len(tool_names) > 5: + limited_names.append(f"...+{len(tool_names)-5} more") + attributes["agno.agent.tool_names"] = ",".join(limited_names) + + # Extract model information + if len(args) >= 2: + model = args[1] + if hasattr(model, 'id'): + attributes["agno.agent.model_id"] = str(model.id) + if hasattr(model, 'provider'): + attributes["agno.agent.model_provider"] = str(model.provider) + + # Extract session information + if len(args) >= 3: + session_id = args[2] + if session_id: + attributes["agno.agent.session_id"] = str(session_id) + + # Extract additional info from kwargs + if kwargs: + if kwargs.get('async_mode') is not None: + attributes["agno.agent.async_mode"] = str(kwargs['async_mode']) + + if kwargs.get('knowledge_filters'): + attributes["agno.agent.has_knowledge_filters"] = "true" + else: + attributes["agno.agent.has_knowledge_filters"] = "false" + + return attributes + + +def get_tool_registration_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> AttributeMap: + """Extract span attributes for individual tool registration via Function.from_callable. + + This captures when individual tools (callables) are converted to Function objects during agent tool preparation. + + Args: + args: Positional arguments passed to Function.from_callable (callable, ...) + kwargs: Keyword arguments passed to Function.from_callable + return_value: The return value from Function.from_callable (Function object) + + Returns: + A dictionary of span attributes to be set on the tool registration span + """ + attributes: AttributeMap = {} + + # Base attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.TOOL + attributes[SpanAttributes.LLM_SYSTEM] = "agno" + attributes["agno.tool.operation"] = "register" + + # Extract callable information from args[0] + if args and len(args) >= 1: + callable_func = args[0] + + if hasattr(callable_func, '__name__'): + attributes["agno.tool.function_name"] = callable_func.__name__ + + # Check if it's an async function + from inspect import iscoroutinefunction, isasyncgenfunction + if iscoroutinefunction(callable_func): + attributes["agno.tool.function_type"] = "async" + elif isasyncgenfunction(callable_func): + attributes["agno.tool.function_type"] = "async_generator" + else: + attributes["agno.tool.function_type"] = "sync" + + # Get docstring if available + if hasattr(callable_func, '__doc__') and callable_func.__doc__: + docstring = callable_func.__doc__.strip() + if len(docstring) > 200: + docstring = docstring[:197] + "..." + attributes["agno.tool.function_docstring"] = docstring + + # Check if it's already a Function object (has agno-specific attributes) + if hasattr(callable_func, 'name'): + attributes["agno.tool.source_name"] = str(callable_func.name) + if hasattr(callable_func, 'description'): + description = str(callable_func.description) + if len(description) > 200: + description = description[:197] + "..." + attributes["agno.tool.source_description"] = description + + # Extract kwargs passed to from_callable + if kwargs: + if kwargs.get('strict') is not None: + attributes["agno.tool.strict"] = str(kwargs['strict']) + + # Extract information from the created Function object + if return_value and hasattr(return_value, 'name'): + attributes["agno.tool.created_name"] = str(return_value.name) + + if hasattr(return_value, 'description'): + description = str(return_value.description) + if len(description) > 200: + description = description[:197] + "..." + attributes["agno.tool.created_description"] = description + + # Tool capabilities from the created Function + if hasattr(return_value, 'requires_confirmation'): + attributes["agno.tool.requires_confirmation"] = str(return_value.requires_confirmation) + + if hasattr(return_value, 'requires_user_input'): + attributes["agno.tool.requires_user_input"] = str(return_value.requires_user_input) + + if hasattr(return_value, 'external_execution'): + attributes["agno.tool.external_execution"] = str(return_value.external_execution) + + return attributes \ No newline at end of file diff --git a/agentops/instrumentation/agno/attributes/workflow.py b/agentops/instrumentation/agno/attributes/workflow.py new file mode 100644 index 000000000..41d28b7a0 --- /dev/null +++ b/agentops/instrumentation/agno/attributes/workflow.py @@ -0,0 +1,232 @@ +"""Workflow attribute extraction for agno workflow instrumentation.""" + +from typing import Any, Dict, Optional, Tuple +from opentelemetry.util.types import AttributeValue + +from agentops.semconv.instrumentation import InstrumentationAttributes +from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind +from agentops.instrumentation.common.attributes import get_common_attributes + + +def get_workflow_run_attributes( + args: Tuple[Any, ...] = (), + kwargs: Optional[Dict[str, Any]] = None, + return_value: Optional[Any] = None, +) -> Dict[str, AttributeValue]: + """Extract attributes from workflow run operations. + + Args: + args: Positional arguments passed to the workflow run method + kwargs: Keyword arguments passed to the workflow run method + return_value: Return value from the workflow run method + + Returns: + Dictionary of OpenTelemetry attributes for workflow runs + """ + attributes = get_common_attributes() + kwargs = kwargs or {} + + if args and len(args) > 0: + workflow = args[0] + + # Core workflow attributes + if hasattr(workflow, 'name') and workflow.name: + attributes["workflow.name"] = str(workflow.name) + if hasattr(workflow, 'workflow_id') and workflow.workflow_id: + attributes["workflow.workflow_id"] = str(workflow.workflow_id) + if hasattr(workflow, 'description') and workflow.description: + attributes["workflow.description"] = str(workflow.description) + if hasattr(workflow, 'app_id') and workflow.app_id: + attributes["workflow.app_id"] = str(workflow.app_id) + + # Session and user attributes + if hasattr(workflow, 'session_id') and workflow.session_id: + attributes["workflow.session_id"] = str(workflow.session_id) + if hasattr(workflow, 'session_name') and workflow.session_name: + attributes["workflow.session_name"] = str(workflow.session_name) + if hasattr(workflow, 'user_id') and workflow.user_id: + attributes["workflow.user_id"] = str(workflow.user_id) + + # Run-specific attributes + if hasattr(workflow, 'run_id') and workflow.run_id: + attributes["workflow.run_id"] = str(workflow.run_id) + + # Configuration attributes + if hasattr(workflow, 'debug_mode'): + attributes["workflow.debug_mode"] = bool(workflow.debug_mode) + if hasattr(workflow, 'monitoring'): + attributes["workflow.monitoring"] = bool(workflow.monitoring) + if hasattr(workflow, 'telemetry'): + attributes["workflow.telemetry"] = bool(workflow.telemetry) + + # Memory and storage attributes + if hasattr(workflow, 'memory') and workflow.memory: + memory_type = type(workflow.memory).__name__ + attributes["workflow.memory.type"] = memory_type + + if hasattr(workflow, 'storage') and workflow.storage: + storage_type = type(workflow.storage).__name__ + attributes["workflow.storage.type"] = storage_type + + # Input parameters from kwargs + if kwargs: + # Count and types of input parameters + attributes["workflow.input.parameter_count"] = len(kwargs) + param_types = list(set(type(v).__name__ for v in kwargs.values())) + if param_types: + attributes["workflow.input.parameter_types"] = str(param_types) + + # Store specific input keys (without values for privacy) + input_keys = list(kwargs.keys()) + if input_keys: + attributes["workflow.input.parameter_keys"] = str(input_keys) + + # Workflow method parameters if available + if hasattr(workflow, '_run_parameters') and workflow._run_parameters: + param_count = len(workflow._run_parameters) + attributes["workflow.method.parameter_count"] = param_count + + if hasattr(workflow, '_run_return_type') and workflow._run_return_type: + attributes["workflow.method.return_type"] = str(workflow._run_return_type) + + # Process return value attributes + if return_value is not None: + return_type = type(return_value).__name__ + attributes["workflow.output.type"] = return_type + + # Handle RunResponse objects + if hasattr(return_value, 'content'): + if hasattr(return_value, 'content_type'): + attributes["workflow.output.content_type"] = str(return_value.content_type) + if hasattr(return_value, 'event'): + attributes["workflow.output.event"] = str(return_value.event) + if hasattr(return_value, 'model'): + attributes["workflow.output.model"] = str(return_value.model) if return_value.model else "" + if hasattr(return_value, 'model_provider'): + attributes["workflow.output.model_provider"] = str(return_value.model_provider) if return_value.model_provider else "" + + # Count various response components + if hasattr(return_value, 'messages') and return_value.messages: + attributes["workflow.output.message_count"] = len(return_value.messages) + if hasattr(return_value, 'tools') and return_value.tools: + attributes["workflow.output.tool_count"] = len(return_value.tools) + if hasattr(return_value, 'images') and return_value.images: + attributes["workflow.output.image_count"] = len(return_value.images) + if hasattr(return_value, 'videos') and return_value.videos: + attributes["workflow.output.video_count"] = len(return_value.videos) + if hasattr(return_value, 'audio') and return_value.audio: + attributes["workflow.output.audio_count"] = len(return_value.audio) + + # Handle generators/iterators + elif hasattr(return_value, '__iter__') and not isinstance(return_value, (str, bytes)): + attributes["workflow.output.is_streaming"] = True + + # Set span kind - AgentOpsSpanKind.WORKFLOW is already a string + attributes[InstrumentationAttributes.INSTRUMENTATION_TYPE] = AgentOpsSpanKind.WORKFLOW + + return attributes + + +def get_workflow_session_attributes( + args: Tuple[Any, ...] = (), + kwargs: Optional[Dict[str, Any]] = None, + return_value: Optional[Any] = None, +) -> Dict[str, AttributeValue]: + """Extract attributes from workflow session operations. + + Args: + args: Positional arguments passed to the session method + kwargs: Keyword arguments passed to the session method + return_value: Return value from the session method + + Returns: + Dictionary of OpenTelemetry attributes for workflow sessions + """ + attributes = get_common_attributes() + kwargs = kwargs or {} + + if args and len(args) > 0: + workflow = args[0] + + # Session attributes + if hasattr(workflow, 'session_id') and workflow.session_id: + attributes["workflow.session.session_id"] = str(workflow.session_id) + if hasattr(workflow, 'session_name') and workflow.session_name: + attributes["workflow.session.session_name"] = str(workflow.session_name) + if hasattr(workflow, 'workflow_id') and workflow.workflow_id: + attributes["workflow.session.workflow_id"] = str(workflow.workflow_id) + if hasattr(workflow, 'user_id') and workflow.user_id: + attributes["workflow.session.user_id"] = str(workflow.user_id) + + # Session state attributes + if hasattr(workflow, 'session_state') and workflow.session_state: + if isinstance(workflow.session_state, dict): + attributes["workflow.session.state_keys"] = str(list(workflow.session_state.keys())) + attributes["workflow.session.state_size"] = len(workflow.session_state) + + # Storage attributes + if hasattr(workflow, 'storage') and workflow.storage: + storage_type = type(workflow.storage).__name__ + attributes["workflow.session.storage_type"] = storage_type + + # Process session return value if it's a WorkflowSession + if return_value is not None and hasattr(return_value, 'session_id'): + attributes["workflow.session.returned_session_id"] = str(return_value.session_id) + if hasattr(return_value, 'created_at') and return_value.created_at: + attributes["workflow.session.created_at"] = int(return_value.created_at) + if hasattr(return_value, 'updated_at') and return_value.updated_at: + attributes["workflow.session.updated_at"] = int(return_value.updated_at) + + # Set span kind - AgentOpsSpanKind.WORKFLOW is already a string + attributes[InstrumentationAttributes.INSTRUMENTATION_TYPE] = AgentOpsSpanKind.WORKFLOW + + return attributes + + +def get_workflow_storage_attributes( + args: Tuple[Any, ...] = (), + kwargs: Optional[Dict[str, Any]] = None, + return_value: Optional[Any] = None, +) -> Dict[str, AttributeValue]: + """Extract attributes from workflow storage operations. + + Args: + args: Positional arguments passed to the storage method + kwargs: Keyword arguments passed to the storage method + return_value: Return value from the storage method + + Returns: + Dictionary of OpenTelemetry attributes for workflow storage + """ + attributes = get_common_attributes() + kwargs = kwargs or {} + + if args and len(args) > 0: + workflow = args[0] + + # Storage attributes + if hasattr(workflow, 'storage') and workflow.storage: + storage_type = type(workflow.storage).__name__ + attributes["workflow.storage.type"] = storage_type + + if hasattr(workflow.storage, 'mode'): + attributes["workflow.storage.mode"] = str(workflow.storage.mode) + + # Workflow identification for storage context + if hasattr(workflow, 'workflow_id') and workflow.workflow_id: + attributes["workflow.storage.workflow_id"] = str(workflow.workflow_id) + if hasattr(workflow, 'session_id') and workflow.session_id: + attributes["workflow.storage.session_id"] = str(workflow.session_id) + + # Process storage operation result + if return_value is not None: + if hasattr(return_value, 'session_id'): + attributes["workflow.storage.operation_result"] = "success" + attributes["workflow.storage.result_session_id"] = str(return_value.session_id) + else: + attributes["workflow.storage.operation_result"] = "unknown" + + # Set span kind - AgentOpsSpanKind.WORKFLOW is already a string + attributes[InstrumentationAttributes.INSTRUMENTATION_TYPE] = AgentOpsSpanKind.WORKFLOW + + return attributes \ No newline at end of file diff --git a/agentops/instrumentation/agno/instrumentor.py b/agentops/instrumentation/agno/instrumentor.py new file mode 100644 index 000000000..3684817a0 --- /dev/null +++ b/agentops/instrumentation/agno/instrumentor.py @@ -0,0 +1,794 @@ +"""Agno Agent Instrumentation for AgentOps + +This module provides instrumentation for the Agno Agent library, implementing OpenTelemetry +instrumentation for agent workflows and LLM model calls. + +We focus on instrumenting the following key endpoints: +- Agent.run/arun - Main agent workflow execution (sync/async) +- Team._run/_arun - Team workflow execution (sync/async) +- Team._run_stream/_arun_stream - Team streaming workflow execution (sync/async) +- FunctionCall.execute/aexecute - Tool execution when agents call tools (sync/async) +- Agent._run_tool/_arun_tool - Agent internal tool execution (sync/async) +- Agent._set_session_metrics - Session metrics capture for token usage and timing + +This provides clean visibility into agent workflows and actual tool usage with proper +parent-child span relationships. +""" + +from typing import List, Collection, Any, Optional, Dict +from opentelemetry.trace import get_tracer, SpanKind +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.metrics import get_meter +from opentelemetry.util.types import AttributeValue +from opentelemetry import trace, context as otel_context +from opentelemetry.trace import Status, StatusCode +import threading +import weakref + +from agentops.logging import logger +from agentops.semconv import Meters +from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind + +# Import attribute handlers +from agentops.instrumentation.agno.attributes.agent import get_agent_run_attributes +from agentops.instrumentation.agno.attributes.team import get_team_run_attributes +from agentops.instrumentation.agno.attributes.tool import get_tool_execution_attributes +from agentops.instrumentation.agno.attributes.metrics import get_metrics_attributes +from agentops.instrumentation.agno.attributes.workflow import ( + get_workflow_run_attributes, + get_workflow_session_attributes, + get_workflow_storage_attributes +) + + +class StreamingContextManager: + """Manages span contexts for streaming agent and workflow executions.""" + + def __init__(self): + self._contexts = {} # context_id -> (span_context, span) + self._agent_sessions = {} # session_id -> agent_id mapping for context lookup + self._lock = threading.Lock() + + def store_context(self, context_id: str, span_context: Any, span: Any) -> None: + """Store span context for streaming execution.""" + with self._lock: + self._contexts[context_id] = (span_context, span) + + def get_context(self, context_id: str) -> Optional[tuple]: + """Retrieve stored span context.""" + with self._lock: + return self._contexts.get(context_id) + + def remove_context(self, context_id: str) -> None: + """Remove stored context (when streaming completes).""" + with self._lock: + self._contexts.pop(context_id, None) + + def store_agent_session_mapping(self, session_id: str, agent_id: str) -> None: + """Store mapping between session and agent for context lookup.""" + with self._lock: + self._agent_sessions[session_id] = agent_id + + def get_agent_context_by_session(self, session_id: str) -> Optional[tuple]: + """Get agent context using session ID.""" + with self._lock: + agent_id = self._agent_sessions.get(session_id) + if agent_id: + return self._contexts.get(agent_id) + return None + + def clear_all(self) -> None: + """Clear all stored contexts.""" + with self._lock: + self._contexts.clear() + self._agent_sessions.clear() + + +# Global context manager instance +_streaming_context_manager = StreamingContextManager() + + +def create_streaming_workflow_wrapper(original_func, is_async=False): + """Create a streaming-aware wrapper for workflow run methods.""" + + if is_async: + async def async_wrapper(self, *args, **kwargs): + tracer = trace.get_tracer(__name__) + + # Get workflow ID for context storage + workflow_id = getattr(self, 'workflow_id', None) or getattr(self, 'id', None) or id(self) + workflow_id = str(workflow_id) + + # Check if streaming is enabled + is_streaming = kwargs.get('stream', getattr(self, 'stream', False)) + + with tracer.start_as_current_span("agno.workflow.run.workflow") as span: + try: + # Set workflow attributes + attributes = get_workflow_run_attributes(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Store context for streaming if needed + if is_streaming: + current_context = otel_context.get_current() + _streaming_context_manager.store_context(workflow_id, current_context, span) + + # Execute the original function + result = await original_func(self, *args, **kwargs) + + # Set result attributes + result_attributes = get_workflow_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + finally: + # For non-streaming, remove context immediately + if not is_streaming: + _streaming_context_manager.remove_context(workflow_id) + + return async_wrapper + else: + def sync_wrapper(self, *args, **kwargs): + tracer = trace.get_tracer(__name__) + + # Get workflow ID for context storage + workflow_id = getattr(self, 'workflow_id', None) or getattr(self, 'id', None) or id(self) + workflow_id = str(workflow_id) + + # Check if streaming is enabled + is_streaming = kwargs.get('stream', getattr(self, 'stream', False)) + + with tracer.start_as_current_span("agno.workflow.run.workflow") as span: + try: + # Set workflow attributes + attributes = get_workflow_run_attributes(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Store context for streaming if needed + if is_streaming: + current_context = otel_context.get_current() + _streaming_context_manager.store_context(workflow_id, current_context, span) + + # Execute the original function + result = original_func(self, *args, **kwargs) + + # Set result attributes + result_attributes = get_workflow_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + finally: + # For non-streaming, remove context immediately + if not is_streaming: + _streaming_context_manager.remove_context(workflow_id) + + return sync_wrapper + + +def create_streaming_agent_wrapper(original_func, is_async=False): + """Create a streaming-aware wrapper for agent run methods with enhanced context propagation.""" + + if is_async: + async def async_wrapper(self, *args, **kwargs): + tracer = trace.get_tracer(__name__) + + # Get agent ID for context storage + agent_id = getattr(self, 'agent_id', None) or getattr(self, 'id', None) or id(self) + agent_id = str(agent_id) + + # Get session ID for context mapping + session_id = getattr(self, 'session_id', None) + + # Check if streaming is enabled + is_streaming = kwargs.get('stream', getattr(self, 'stream', False)) + + # For streaming, manually manage span lifecycle + if is_streaming: + span = tracer.start_span("agno.agent.run.agent") + + try: + # Set agent attributes + attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Store context for streaming - capture current context with active span + current_context = trace.set_span_in_context(span, otel_context.get_current()) + _streaming_context_manager.store_context(agent_id, current_context, span) + + # Store session-to-agent mapping for LLM context lookup + if session_id: + _streaming_context_manager.store_agent_session_mapping(session_id, agent_id) + + # Execute the original function within agent context + context_token = otel_context.attach(current_context) + try: + result = await original_func(self, *args, **kwargs) + finally: + otel_context.detach(context_token) + + # Set result attributes + result_attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + + # Wrap the result to maintain context and end span when complete + if hasattr(result, '__iter__'): + return StreamingResultWrapper(result, span, agent_id, current_context) + else: + # Not actually streaming, clean up immediately + span.end() + _streaming_context_manager.remove_context(agent_id) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + span.end() + _streaming_context_manager.remove_context(agent_id) + raise + else: + # For non-streaming, use normal context manager + with tracer.start_as_current_span("agno.agent.run.agent") as span: + try: + # Set agent attributes + attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Execute the original function + result = await original_func(self, *args, **kwargs) + + # Set result attributes + result_attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + + return async_wrapper + else: + def sync_wrapper(self, *args, **kwargs): + tracer = trace.get_tracer(__name__) + + # Get agent ID for context storage + agent_id = getattr(self, 'agent_id', None) or getattr(self, 'id', None) or id(self) + agent_id = str(agent_id) + + # Get session ID for context mapping + session_id = getattr(self, 'session_id', None) + + # Check if streaming is enabled + is_streaming = kwargs.get('stream', getattr(self, 'stream', False)) + + # For streaming, manually manage span lifecycle + if is_streaming: + span = tracer.start_span("agno.agent.run.agent") + + try: + # Set agent attributes + attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Store context for streaming - capture current context with active span + current_context = trace.set_span_in_context(span, otel_context.get_current()) + _streaming_context_manager.store_context(agent_id, current_context, span) + + # Store session-to-agent mapping for LLM context lookup + if session_id: + _streaming_context_manager.store_agent_session_mapping(session_id, agent_id) + + # Execute the original function within agent context + context_token = otel_context.attach(current_context) + try: + result = original_func(self, *args, **kwargs) + finally: + otel_context.detach(context_token) + + # Set result attributes + result_attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + + # Wrap the result to maintain context and end span when complete + if hasattr(result, '__iter__'): + return StreamingResultWrapper(result, span, agent_id, current_context) + else: + # Not actually streaming, clean up immediately + span.end() + _streaming_context_manager.remove_context(agent_id) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + span.end() + _streaming_context_manager.remove_context(agent_id) + raise + else: + # For non-streaming, use normal context manager + with tracer.start_as_current_span("agno.agent.run.agent") as span: + try: + # Set agent attributes + attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Execute the original function + result = original_func(self, *args, **kwargs) + + # Set result attributes + result_attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + + return sync_wrapper + + +class StreamingResultWrapper: + """Wrapper for streaming results that maintains agent span as active throughout iteration.""" + + def __init__(self, original_result, span, agent_id, agent_context): + self.original_result = original_result + self.span = span + self.agent_id = agent_id + self.agent_context = agent_context + self._consumed = False + + def __iter__(self): + """Return iterator that keeps agent span active during iteration.""" + context_token = otel_context.attach(self.agent_context) + try: + # Execute iteration within agent context + for item in self.original_result: + # Each item is yielded within the agent span context + yield item + finally: + # Clean up when iteration is complete + otel_context.detach(context_token) + if not self._consumed: + self._consumed = True + self.span.end() + _streaming_context_manager.remove_context(self.agent_id) + + def __getattr__(self, name): + """Delegate attribute access to the original result.""" + return getattr(self.original_result, name) + + +def create_streaming_tool_wrapper(original_func): + """Create a streaming-aware wrapper for tool execution methods.""" + + def wrapper(self, *args, **kwargs): + tracer = trace.get_tracer(__name__) + + # Try to find the agent or workflow context for proper span hierarchy + parent_context = None + parent_span = None + + # Try to get context from agent + try: + if hasattr(self, '_agent'): + agent = self._agent + agent_id = getattr(agent, 'agent_id', None) or getattr(agent, 'id', None) or id(agent) + agent_id = str(agent_id) + context_info = _streaming_context_manager.get_context(agent_id) + if context_info: + parent_context, parent_span = context_info + except Exception: + pass # Continue without agent context if not found + + # Try to get context from workflow if agent context not found + if not parent_context: + try: + if hasattr(self, '_workflow'): + workflow = self._workflow + workflow_id = getattr(workflow, 'workflow_id', None) or getattr(workflow, 'id', None) or id(workflow) + workflow_id = str(workflow_id) + context_info = _streaming_context_manager.get_context(workflow_id) + if context_info: + parent_context, parent_span = context_info + except Exception: + pass # Continue without workflow context if not found + + # Use parent context if available, otherwise use current context + if parent_context: + with otel_context.use_context(parent_context): + with tracer.start_as_current_span("agno.tool.execute.tool_usage") as span: + try: + # Set tool attributes + attributes = get_tool_execution_attributes(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Execute the original function + result = original_func(self, *args, **kwargs) + + # Set result attributes + result_attributes = get_tool_execution_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + else: + # Fallback to normal span creation + with tracer.start_as_current_span("agno.tool.execute.tool_usage") as span: + try: + # Set tool attributes + attributes = get_tool_execution_attributes(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Execute the original function + result = original_func(self, *args, **kwargs) + + # Set result attributes + result_attributes = get_tool_execution_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + + return wrapper + + +def get_agent_context_for_llm(): + """Helper function for LLM instrumentation to get current agent context.""" + current_context = otel_context.get_current() + current_span = trace.get_current_span(current_context) + + # Check if we're already in an agent span + if current_span and hasattr(current_span, 'name') and 'agent' in current_span.name: + return current_context, current_span + + # Try to find stored agent context by checking active contexts + # This is a fallback for cases where context isn't properly propagated + return None, None + + +class AgnoInstrumentor(BaseInstrumentor): + """Agno instrumentation class.""" + + _original_methods = {} # Store original methods for cleanup + + def instrumentation_dependencies(self) -> Collection[str]: + """Returns list of packages required for instrumentation.""" + return [] + + def _instrument(self, **kwargs): + """Install instrumentation for Agno.""" + tracer = get_tracer(__name__) + + try: + # Apply workflow instrumentation + try: + import agno.workflow.workflow + if hasattr(agno.workflow.workflow, 'Workflow'): + # Store original methods for cleanup + self._original_methods['Workflow.run_workflow'] = getattr(agno.workflow.workflow.Workflow, 'run_workflow', None) + self._original_methods['Workflow.arun_workflow'] = getattr(agno.workflow.workflow.Workflow, 'arun_workflow', None) + + # Wrap main workflow execution methods + if self._original_methods['Workflow.run_workflow']: + agno.workflow.workflow.Workflow.run_workflow = create_streaming_workflow_wrapper( + agno.workflow.workflow.Workflow.run_workflow, is_async=False + ) + if self._original_methods['Workflow.arun_workflow']: + agno.workflow.workflow.Workflow.arun_workflow = create_streaming_workflow_wrapper( + agno.workflow.workflow.Workflow.arun_workflow, is_async=True + ) + + # Wrap session management methods + session_methods = ['load_session', 'new_session', 'read_from_storage', 'write_to_storage'] + for method_name in session_methods: + original_method = getattr(agno.workflow.workflow.Workflow, method_name, None) + if original_method: + self._original_methods[f'Workflow.{method_name}'] = original_method + setattr(agno.workflow.workflow.Workflow, method_name, + self._create_session_wrapper(original_method, method_name)) + + logger.debug("Successfully wrapped Workflow methods with streaming context support") + except ImportError: + logger.debug("Workflow module not found, skipping workflow instrumentation") + + # Apply streaming-aware agent wrappers + import agno.agent + if hasattr(agno.agent, 'Agent'): + # Store original methods for cleanup + self._original_methods['Agent.run'] = agno.agent.Agent.run + self._original_methods['Agent.arun'] = agno.agent.Agent.arun + + agno.agent.Agent.run = create_streaming_agent_wrapper(agno.agent.Agent.run, is_async=False) + agno.agent.Agent.arun = create_streaming_agent_wrapper(agno.agent.Agent.arun, is_async=True) + + logger.debug("Successfully wrapped Agent.run and Agent.arun with enhanced streaming context support") + + # Apply streaming-aware tool wrappers + import agno.tools.function + if hasattr(agno.tools.function, 'FunctionCall'): + # Store original method for cleanup + self._original_methods['FunctionCall.execute'] = agno.tools.function.FunctionCall.execute + + agno.tools.function.FunctionCall.execute = create_streaming_tool_wrapper(agno.tools.function.FunctionCall.execute) + + logger.debug("Successfully wrapped FunctionCall.execute with streaming context support") + + # Apply standard team and metrics wrappers if needed + try: + import agno.team.team + if hasattr(agno.team.team, 'Team'): + self._original_methods['Team._run'] = getattr(agno.team.team.Team, '_run', None) + self._original_methods['Team._arun'] = getattr(agno.team.team.Team, '_arun', None) + + if self._original_methods['Team._run']: + agno.team.team.Team._run = self._create_standard_wrapper( + agno.team.team.Team._run, "agno.team.run.workflow", get_team_run_attributes, is_async=False + ) + if self._original_methods['Team._arun']: + agno.team.team.Team._arun = self._create_standard_wrapper( + agno.team.team.Team._arun, "agno.team.run.workflow", get_team_run_attributes, is_async=True + ) + + logger.debug("Successfully wrapped Team._run and Team._arun") + except ImportError: + logger.debug("Team module not found, skipping team instrumentation") + + # Apply metrics wrapper + try: + if hasattr(agno.agent.Agent, '_set_session_metrics'): + self._original_methods['Agent._set_session_metrics'] = agno.agent.Agent._set_session_metrics + agno.agent.Agent._set_session_metrics = self._create_llm_metrics_wrapper( + agno.agent.Agent._set_session_metrics, get_metrics_attributes + ) + logger.debug("Successfully wrapped Agent._set_session_metrics") + except AttributeError: + logger.debug("_set_session_metrics method not found, skipping metrics instrumentation") + + logger.info("Agno instrumentation installed successfully with enhanced workflow and streaming context support") + + except Exception as e: + logger.error(f"Failed to install Agno instrumentation: {e}") + raise + + def _create_session_wrapper(self, original_func, method_name): + """Create a wrapper for workflow session management methods.""" + + def wrapper(self, *args, **kwargs): + tracer = trace.get_tracer(__name__) + span_name = f"agno.workflow.session.{method_name}" + + with tracer.start_as_current_span(span_name) as span: + try: + # Set session attributes + attributes = get_workflow_session_attributes(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Execute the original function + result = original_func(self, *args, **kwargs) + + # Set result attributes + result_attributes = get_workflow_session_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + + return wrapper + + def _create_standard_wrapper(self, original_func, span_name, attributes_handler, is_async=False): + """Create a standard wrapper for non-streaming methods.""" + + if is_async: + async def async_wrapper(self, *args, **kwargs): + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span(span_name) as span: + try: + # Set attributes + attributes = attributes_handler(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Execute the original function + result = await original_func(self, *args, **kwargs) + + # Set result attributes + result_attributes = attributes_handler(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + + return async_wrapper + else: + def sync_wrapper(self, *args, **kwargs): + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span(span_name) as span: + try: + # Set attributes + attributes = attributes_handler(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Execute the original function + result = original_func(self, *args, **kwargs) + + # Set result attributes + result_attributes = attributes_handler(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + + return sync_wrapper + + def _create_llm_metrics_wrapper(self, original_func, attributes_handler): + """Create an LLM metrics wrapper with dynamic span naming.""" + + def wrapper(self, *args, **kwargs): + tracer = trace.get_tracer(__name__) + + # Extract model ID for dynamic span naming + span_name = "agno.agent.metrics" # fallback + if hasattr(self, 'model') and self.model and hasattr(self.model, 'id'): + model_id = str(self.model.id) + span_name = f"{model_id}.llm" + + with tracer.start_as_current_span(span_name) as span: + try: + # Set attributes + attributes = attributes_handler(args=(self,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Execute the original function + result = original_func(self, *args, **kwargs) + + # Set result attributes + result_attributes = attributes_handler(args=(self,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + + return wrapper + + def _uninstrument(self, **kwargs): + """Remove instrumentation for Agno.""" + try: + # Clear streaming contexts + _streaming_context_manager.clear_all() + + # Restore original workflow methods + if 'Workflow.run_workflow' in self._original_methods and self._original_methods['Workflow.run_workflow']: + import agno.workflow.workflow + agno.workflow.workflow.Workflow.run_workflow = self._original_methods['Workflow.run_workflow'] + agno.workflow.workflow.Workflow.arun_workflow = self._original_methods['Workflow.arun_workflow'] + logger.debug("Restored original Workflow.run_workflow and Workflow.arun_workflow methods") + + # Restore workflow session methods + workflow_session_methods = ['load_session', 'new_session', 'read_from_storage', 'write_to_storage'] + for method_name in workflow_session_methods: + key = f'Workflow.{method_name}' + if key in self._original_methods and self._original_methods[key]: + import agno.workflow.workflow + setattr(agno.workflow.workflow.Workflow, method_name, self._original_methods[key]) + logger.debug(f"Restored original Workflow.{method_name} method") + + # Restore original agent methods + if 'Agent.run' in self._original_methods: + import agno.agent + agno.agent.Agent.run = self._original_methods['Agent.run'] + agno.agent.Agent.arun = self._original_methods['Agent.arun'] + logger.debug("Restored original Agent.run and Agent.arun methods") + + # Restore original tool methods + if 'FunctionCall.execute' in self._original_methods: + import agno.tools.function + agno.tools.function.FunctionCall.execute = self._original_methods['FunctionCall.execute'] + logger.debug("Restored original FunctionCall.execute method") + + # Restore team methods + if 'Team._run' in self._original_methods and self._original_methods['Team._run']: + import agno.team.team + agno.team.team.Team._run = self._original_methods['Team._run'] + agno.team.team.Team._arun = self._original_methods['Team._arun'] + logger.debug("Restored original Team methods") + + # Restore metrics methods + if 'Agent._set_session_metrics' in self._original_methods: + import agno.agent + agno.agent.Agent._set_session_metrics = self._original_methods['Agent._set_session_metrics'] + logger.debug("Restored original Agent._set_session_metrics method") + + # Clear stored original methods + self._original_methods.clear() + + logger.info("Agno instrumentation removed successfully") + + except Exception as e: + logger.error(f"Failed to remove Agno instrumentation: {e}") + raise \ No newline at end of file From 7273c4dcbf6a79bcbc7cd641b6aa68df1c6ac703 Mon Sep 17 00:00:00 2001 From: fenilfaldu Date: Fri, 6 Jun 2025 19:22:19 +0530 Subject: [PATCH 02/14] Refactored Agno instrumentation --- agentops/instrumentation/agno/__init__.py | 64 +- .../agno/attributes/__init__.py | 7 +- .../instrumentation/agno/attributes/agent.py | 189 ++- .../agno/attributes/metrics.py | 220 +-- .../instrumentation/agno/attributes/model.py | 371 ----- .../instrumentation/agno/attributes/team.py | 294 ++-- .../instrumentation/agno/attributes/tool.py | 545 +------ .../agno/attributes/workflow.py | 181 +-- agentops/instrumentation/agno/instrumentor.py | 1388 ++++++++++------- 9 files changed, 1400 insertions(+), 1859 deletions(-) delete mode 100644 agentops/instrumentation/agno/attributes/model.py diff --git a/agentops/instrumentation/agno/__init__.py b/agentops/instrumentation/agno/__init__.py index 444d94b4c..7ebf85c09 100644 --- a/agentops/instrumentation/agno/__init__.py +++ b/agentops/instrumentation/agno/__init__.py @@ -2,74 +2,28 @@ import logging -logger = logging.getLogger(__name__) - -__version__ = "1.0.0" - -LIBRARY_NAME = "agno" -LIBRARY_VERSION = __version__ - from .instrumentor import AgnoInstrumentor -def get_current_agno_context(): - """ - Get the current Agno agent or workflow context for use by other instrumentations. - - This function allows other instrumentations (like LLM providers) to find and use - the current agent or team context for proper parent-child span relationships. - - Returns: - tuple: (context, span) if found, (None, None) otherwise - """ - try: - # Try to get current OpenTelemetry context first - from opentelemetry import context as otel_context, trace - current_context = otel_context.get_current() - current_span = trace.get_current_span(current_context) - - # Check if we're already in an agno span (agent, team, or workflow) - if current_span and hasattr(current_span, 'name'): - span_name = getattr(current_span, 'name', '') - if any(keyword in span_name for keyword in ['agno.agent.run', 'agno.team.run', 'agno.workflow']): - logger.debug(f"Found active agno span: {span_name}") - return current_context, current_span - - return None, None - - except Exception as e: - logger.debug(f"Error getting agno context: {e}") - return None, None - - -def get_agno_context_by_session(session_id: str): - """ - Legacy function for backward compatibility. - - Args: - session_id: Session identifier - - Returns: - tuple: (None, None) - not supported in new implementation - """ - logger.debug("get_agno_context_by_session is deprecated - context is managed automatically") - return None, None - - # Export attribute handlers for external use from .attributes.agent import get_agent_run_attributes from .attributes.team import get_team_run_attributes, get_team_public_run_attributes from .attributes.tool import get_tool_execution_attributes from .attributes.metrics import get_metrics_attributes +logger = logging.getLogger(__name__) + +__version__ = "1.0.0" + +LIBRARY_NAME = "agno" +LIBRARY_VERSION = __version__ + __all__ = [ "AgnoInstrumentor", "LIBRARY_NAME", "LIBRARY_VERSION", - "get_current_agno_context", - "get_agno_context_by_session", "get_agent_run_attributes", "get_team_run_attributes", "get_team_public_run_attributes", "get_tool_execution_attributes", - "get_metrics_attributes" -] \ No newline at end of file + "get_metrics_attributes", +] diff --git a/agentops/instrumentation/agno/attributes/__init__.py b/agentops/instrumentation/agno/attributes/__init__.py index d84223bbe..377f465a7 100644 --- a/agentops/instrumentation/agno/attributes/__init__.py +++ b/agentops/instrumentation/agno/attributes/__init__.py @@ -1,17 +1,14 @@ """Agno Agent attributes package for span instrumentation.""" from .agent import get_agent_run_attributes -from .model import get_session_metrics_attributes from .team import get_team_run_attributes from .tool import get_tool_execution_attributes -from .workflow import get_workflow_run_attributes, get_workflow_session_attributes, get_workflow_storage_attributes +from .workflow import get_workflow_run_attributes, get_workflow_session_attributes __all__ = [ "get_agent_run_attributes", - "get_session_metrics_attributes", "get_team_run_attributes", "get_tool_execution_attributes", "get_workflow_run_attributes", "get_workflow_session_attributes", - "get_workflow_storage_attributes", -] \ No newline at end of file +] diff --git a/agentops/instrumentation/agno/attributes/agent.py b/agentops/instrumentation/agno/attributes/agent.py index ecbcbbb37..535420ffe 100644 --- a/agentops/instrumentation/agno/attributes/agent.py +++ b/agentops/instrumentation/agno/attributes/agent.py @@ -24,6 +24,9 @@ def get_agent_run_attributes( """ attributes: AttributeMap = {} + # Initialize variables to avoid UnboundLocalError + agent_name = None + # Base attributes attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.WORKFLOW attributes[SpanAttributes.LLM_SYSTEM] = "agno" @@ -35,74 +38,83 @@ def get_agent_run_attributes( # Extract agent information from args[0] (self) if args and len(args) >= 1: agent = args[0] - + # Core agent identification using AgentAttributes - if hasattr(agent, 'agent_id') and agent.agent_id: + if hasattr(agent, "agent_id") and agent.agent_id: agent_id = str(agent.agent_id) attributes[AgentAttributes.AGENT_ID] = agent_id attributes["agno.agent.id"] = agent_id - - if hasattr(agent, 'name') and agent.name: + + if hasattr(agent, "name") and agent.name: agent_name = str(agent.name) attributes[AgentAttributes.AGENT_NAME] = agent_name attributes["agno.agent.name"] = agent_name - - if hasattr(agent, 'role') and agent.role: + + if hasattr(agent, "role") and agent.role: agent_role = str(agent.role) attributes[AgentAttributes.AGENT_ROLE] = agent_role attributes["agno.agent.role"] = agent_role + # Check if agent is part of a team + if hasattr(agent, "_team") and agent._team: + team = agent._team + if hasattr(team, "name") and team.name: + attributes["agno.agent.parent_team"] = str(team.name) + attributes["agno.agent.parent_team_display"] = f"Under {team.name}" + if hasattr(team, "team_id") and team.team_id: + attributes["agno.agent.parent_team_id"] = str(team.team_id) + # Model information using AgentAttributes - if hasattr(agent, 'model') and agent.model: + if hasattr(agent, "model") and agent.model: model = agent.model - if hasattr(model, 'id'): + if hasattr(model, "id"): model_id = str(model.id) attributes[AgentAttributes.AGENT_MODELS] = model_id attributes["agno.agent.model_id"] = model_id attributes[SpanAttributes.LLM_RESPONSE_MODEL] = model_id - - if hasattr(model, 'provider'): + + if hasattr(model, "provider"): model_provider = str(model.provider) attributes["agno.agent.model_provider"] = model_provider - attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_id if hasattr(model, 'id') else 'unknown' + attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_id if hasattr(model, "id") else "unknown" # Agent configuration details agent_config = {} - - if hasattr(agent, 'description') and agent.description: + + if hasattr(agent, "description") and agent.description: agent_config["description"] = str(agent.description)[:500] # Limit length - - if hasattr(agent, 'goal') and agent.goal: + + if hasattr(agent, "goal") and agent.goal: agent_config["goal"] = str(agent.goal)[:500] # Limit length - - if hasattr(agent, 'instructions') and agent.instructions: + + if hasattr(agent, "instructions") and agent.instructions: if isinstance(agent.instructions, list): agent_config["instructions"] = " | ".join(str(i) for i in agent.instructions[:3]) # First 3 else: agent_config["instructions"] = str(agent.instructions)[:500] - - if hasattr(agent, 'expected_output') and agent.expected_output: + + if hasattr(agent, "expected_output") and agent.expected_output: agent_config["expected_output"] = str(agent.expected_output)[:300] - - if hasattr(agent, 'markdown'): + + if hasattr(agent, "markdown"): agent_config["markdown"] = str(agent.markdown) - - if hasattr(agent, 'reasoning'): + + if hasattr(agent, "reasoning"): agent_config["reasoning"] = str(agent.reasoning) - - if hasattr(agent, 'stream'): + + if hasattr(agent, "stream"): agent_config["stream"] = str(agent.stream) - - if hasattr(agent, 'retries'): + + if hasattr(agent, "retries"): agent_config["max_retry_limit"] = str(agent.retries) - - if hasattr(agent, 'response_model') and agent.response_model: + + if hasattr(agent, "response_model") and agent.response_model: agent_config["response_model"] = str(agent.response_model.__name__) - - if hasattr(agent, 'show_tool_calls'): + + if hasattr(agent, "show_tool_calls"): agent_config["show_tool_calls"] = str(agent.show_tool_calls) - - if hasattr(agent, 'tool_call_limit') and agent.tool_call_limit: + + if hasattr(agent, "tool_call_limit") and agent.tool_call_limit: agent_config["tool_call_limit"] = str(agent.tool_call_limit) # Add agent config to attributes @@ -110,42 +122,43 @@ def get_agent_run_attributes( attributes[f"agno.agent.{key}"] = value # Tools information - if hasattr(agent, 'tools') and agent.tools: + if hasattr(agent, "tools") and agent.tools: tools_info = [] tool_names = [] - + for tool in agent.tools: tool_dict = {} - - if hasattr(tool, 'name'): + + if hasattr(tool, "name"): tool_name = str(tool.name) tool_dict["name"] = tool_name tool_names.append(tool_name) - elif hasattr(tool, '__name__'): + elif hasattr(tool, "__name__"): tool_name = str(tool.__name__) tool_dict["name"] = tool_name tool_names.append(tool_name) elif callable(tool): - tool_name = getattr(tool, '__name__', 'unknown_tool') + tool_name = getattr(tool, "__name__", "unknown_tool") tool_dict["name"] = tool_name tool_names.append(tool_name) - - if hasattr(tool, 'description'): + + if hasattr(tool, "description"): description = str(tool.description) if len(description) > 200: description = description[:197] + "..." tool_dict["description"] = description - + if tool_dict: # Only add if we have some info tools_info.append(tool_dict) - + # Set tool attributes if tool_names: attributes["agent.tools_names"] = ",".join(tool_names[:5]) # Limit to first 5 attributes["agno.agent.tools_count"] = str(len(tool_names)) - + if tools_info: import json + try: # Limit to first 3 tools to avoid overly long attributes limited_tools = tools_info[:3] @@ -156,24 +169,24 @@ def get_agent_run_attributes( attributes[AgentAttributes.AGENT_TOOLS] = str(tools_info) # Memory and knowledge information - if hasattr(agent, 'memory') and agent.memory: + if hasattr(agent, "memory") and agent.memory: memory_type = type(agent.memory).__name__ attributes["agno.agent.memory_type"] = memory_type - - if hasattr(agent, 'knowledge') and agent.knowledge: + + if hasattr(agent, "knowledge") and agent.knowledge: knowledge_type = type(agent.knowledge).__name__ attributes["agno.agent.knowledge_type"] = knowledge_type - - if hasattr(agent, 'storage') and agent.storage: + + if hasattr(agent, "storage") and agent.storage: storage_type = type(agent.storage).__name__ attributes["agno.agent.storage_type"] = storage_type # Session information - if hasattr(agent, 'session_id') and agent.session_id: + if hasattr(agent, "session_id") and agent.session_id: session_id = str(agent.session_id) attributes["agno.agent.session_id"] = session_id - - if hasattr(agent, 'user_id') and agent.user_id: + + if hasattr(agent, "user_id") and agent.user_id: user_id = str(agent.user_id) attributes["agno.agent.user_id"] = user_id @@ -191,73 +204,74 @@ def get_agent_run_attributes( # Extract kwargs information if kwargs: - if kwargs.get('stream') is not None: - attributes[SpanAttributes.LLM_REQUEST_STREAMING] = str(kwargs['stream']) - - if kwargs.get('session_id'): - attributes["agno.agent.run_session_id"] = str(kwargs['session_id']) - - if kwargs.get('user_id'): - attributes["agno.agent.run_user_id"] = str(kwargs['user_id']) + if kwargs.get("stream") is not None: + attributes[SpanAttributes.LLM_REQUEST_STREAMING] = str(kwargs["stream"]) + + if kwargs.get("session_id"): + attributes["agno.agent.run_session_id"] = str(kwargs["session_id"]) + + if kwargs.get("user_id"): + attributes["agno.agent.run_user_id"] = str(kwargs["user_id"]) # Extract return value information if return_value: - if hasattr(return_value, 'run_id') and return_value.run_id: + if hasattr(return_value, "run_id") and return_value.run_id: run_id = str(return_value.run_id) attributes["agno.agent.run_id"] = run_id - - if hasattr(return_value, 'session_id') and return_value.session_id: + + if hasattr(return_value, "session_id") and return_value.session_id: session_id = str(return_value.session_id) attributes["agno.agent.response_session_id"] = session_id - - if hasattr(return_value, 'agent_id') and return_value.agent_id: + + if hasattr(return_value, "agent_id") and return_value.agent_id: agent_id = str(return_value.agent_id) attributes["agno.agent.response_agent_id"] = agent_id - - if hasattr(return_value, 'content') and return_value.content: + + if hasattr(return_value, "content") and return_value.content: content = str(return_value.content) if len(content) > 500: content = content[:497] + "..." attributes[WorkflowAttributes.WORKFLOW_OUTPUT] = content attributes["agno.agent.output"] = content - - if hasattr(return_value, 'event') and return_value.event: + + if hasattr(return_value, "event") and return_value.event: event = str(return_value.event) attributes["agno.agent.event"] = event - + # Tool executions from the response - if hasattr(return_value, 'tools') and return_value.tools: + if hasattr(return_value, "tools") and return_value.tools: tool_executions = [] for tool_exec in return_value.tools: tool_exec_dict = {} - - if hasattr(tool_exec, 'tool_name') and tool_exec.tool_name: + + if hasattr(tool_exec, "tool_name") and tool_exec.tool_name: tool_exec_dict["name"] = str(tool_exec.tool_name) - - if hasattr(tool_exec, 'tool_args') and tool_exec.tool_args: + + if hasattr(tool_exec, "tool_args") and tool_exec.tool_args: try: import json + args_str = json.dumps(tool_exec.tool_args) if len(args_str) > 200: args_str = args_str[:197] + "..." tool_exec_dict["parameters"] = args_str except: tool_exec_dict["parameters"] = str(tool_exec.tool_args) - - if hasattr(tool_exec, 'result') and tool_exec.result: + + if hasattr(tool_exec, "result") and tool_exec.result: result_str = str(tool_exec.result) if len(result_str) > 200: result_str = result_str[:197] + "..." tool_exec_dict["result"] = result_str - - if hasattr(tool_exec, 'tool_call_error') and tool_exec.tool_call_error: + + if hasattr(tool_exec, "tool_call_error") and tool_exec.tool_call_error: tool_exec_dict["error"] = str(tool_exec.tool_call_error) - + tool_exec_dict["status"] = "success" # Default to success - + if tool_exec_dict: tool_executions.append(tool_exec_dict) - + if tool_executions: # Add tool executions (limit to first 3) limited_executions = tool_executions[:3] @@ -268,4 +282,13 @@ def get_agent_run_attributes( # Workflow type attributes[WorkflowAttributes.WORKFLOW_TYPE] = "agent_run" - return attributes \ No newline at end of file + # Add display name for better UI visualization + if agent_name: + # Check if we have parent team info + parent_team = attributes.get("agno.agent.parent_team") + if parent_team: + attributes["agno.agent.display_name"] = f"{agent_name} (Agent under {parent_team})" + else: + attributes["agno.agent.display_name"] = f"{agent_name} (Agent)" + + return attributes diff --git a/agentops/instrumentation/agno/attributes/metrics.py b/agentops/instrumentation/agno/attributes/metrics.py index 28305459a..306d702fa 100644 --- a/agentops/instrumentation/agno/attributes/metrics.py +++ b/agentops/instrumentation/agno/attributes/metrics.py @@ -4,7 +4,6 @@ from agentops.instrumentation.common.attributes import AttributeMap from agentops.semconv import SpanAttributes -from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind def get_metrics_attributes( @@ -29,17 +28,8 @@ def get_metrics_attributes( attributes[SpanAttributes.LLM_SYSTEM] = "agno" attributes[SpanAttributes.AGENTOPS_ENTITY_NAME] = "LLM" - # Initialize default gen_ai.usage attributes to ensure they're always present - usage_attrs = { - "prompt_tokens": 0, - "completion_tokens": 0, - "total_tokens": 0, - "cache_read_input_tokens": 0, - "reasoning_tokens": 0, - "success_tokens": 0, - "fail_tokens": 0, - "indeterminate_tokens": 0 - } + # Initialize usage tracking variables (but don't set attributes yet) + usage_data = {} # Initialize counters for indexed messages prompt_count = 0 @@ -49,28 +39,32 @@ def get_metrics_attributes( if args and len(args) >= 2: agent = args[0] # self (Agent instance) run_messages = args[1] # RunMessages object - + + # Add agent display name for LLM calls + if hasattr(agent, "name") and agent.name: + attributes["agno.llm.display_name"] = f"{agent.name} → LLM" + # Model information - get additional request parameters if available - if hasattr(agent, 'model') and agent.model: + if hasattr(agent, "model") and agent.model: model = agent.model # Set model ID first - if hasattr(model, 'id'): + if hasattr(model, "id"): attributes[SpanAttributes.LLM_REQUEST_MODEL] = str(model.id) attributes[SpanAttributes.LLM_RESPONSE_MODEL] = str(model.id) # Additional model parameters - if hasattr(model, 'temperature') and model.temperature is not None: + if hasattr(model, "temperature") and model.temperature is not None: attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = str(model.temperature) - if hasattr(model, 'max_tokens') and model.max_tokens is not None: + if hasattr(model, "max_tokens") and model.max_tokens is not None: attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = str(model.max_tokens) - if hasattr(model, 'top_p') and model.top_p is not None: + if hasattr(model, "top_p") and model.top_p is not None: attributes[SpanAttributes.LLM_REQUEST_TOP_P] = str(model.top_p) - if hasattr(model, 'provider'): - attributes['agno.model.provider'] = str(model.provider) - + if hasattr(model, "provider"): + attributes["agno.model.provider"] = str(model.provider) + # === EXTRACT CONVERSATION STRUCTURE === - if hasattr(run_messages, 'messages') and run_messages.messages: + if hasattr(run_messages, "messages") and run_messages.messages: messages = run_messages.messages - + # Initialize token tracking total_prompt_tokens = 0 total_completion_tokens = 0 @@ -78,159 +72,167 @@ def get_metrics_attributes( total_input_tokens = 0 total_tokens = 0 total_time = 0.0 - + # Process messages to create individual indexed gen_ai.prompt.{i} and gen_ai.completion.{i} attributes for i, msg in enumerate(messages): # Extract message content for prompts/completions - if hasattr(msg, 'role') and hasattr(msg, 'content'): + if hasattr(msg, "role") and hasattr(msg, "content"): # Only set content if it's not None/empty if msg.content is not None and str(msg.content).strip() != "" and str(msg.content) != "None": content = str(msg.content) # Truncate very long content to avoid oversized attributes if len(content) > 1000: content = content[:997] + "..." - - if msg.role == 'user': - attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'user' - attributes[f'gen_ai.prompt.{prompt_count}.content'] = content + + if msg.role == "user": + attributes[f"{SpanAttributes.LLM_PROMPTS}.{prompt_count}.role"] = "user" + attributes[f"{SpanAttributes.LLM_PROMPTS}.{prompt_count}.content"] = content prompt_count += 1 - elif msg.role == 'assistant': - attributes[f'gen_ai.completion.{completion_count}.role'] = 'assistant' - attributes[f'gen_ai.completion.{completion_count}.content'] = content + elif msg.role == "assistant": + attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_count}.role"] = "assistant" + attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_count}.content"] = content completion_count += 1 - elif msg.role == 'system': - attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'system' - attributes[f'gen_ai.prompt.{prompt_count}.content'] = content + elif msg.role == "system": + attributes[f"{SpanAttributes.LLM_PROMPTS}.{prompt_count}.role"] = "system" + attributes[f"{SpanAttributes.LLM_PROMPTS}.{prompt_count}.content"] = content prompt_count += 1 else: # For messages with None content, still set the role but skip content - if msg.role == 'user': - attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'user' + if msg.role == "user": + attributes[f"{SpanAttributes.LLM_PROMPTS}.{prompt_count}.role"] = "user" prompt_count += 1 - elif msg.role == 'assistant': - attributes[f'gen_ai.completion.{completion_count}.role'] = 'assistant' + elif msg.role == "assistant": + attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_count}.role"] = "assistant" completion_count += 1 - elif msg.role == 'system': - attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'system' + elif msg.role == "system": + attributes[f"{SpanAttributes.LLM_PROMPTS}.{prompt_count}.role"] = "system" prompt_count += 1 - + # Extract token metrics from message - if hasattr(msg, 'metrics') and msg.metrics: + if hasattr(msg, "metrics") and msg.metrics: metrics = msg.metrics - + # Handle different token metric patterns - if hasattr(metrics, 'prompt_tokens') and metrics.prompt_tokens > 0: + if hasattr(metrics, "prompt_tokens") and metrics.prompt_tokens > 0: total_prompt_tokens += metrics.prompt_tokens - if hasattr(metrics, 'completion_tokens') and metrics.completion_tokens > 0: + if hasattr(metrics, "completion_tokens") and metrics.completion_tokens > 0: total_completion_tokens += metrics.completion_tokens - if hasattr(metrics, 'total_tokens') and metrics.total_tokens > 0: + if hasattr(metrics, "total_tokens") and metrics.total_tokens > 0: total_tokens += metrics.total_tokens # For messages that only have output_tokens (like Anthropic) - if hasattr(metrics, 'output_tokens') and metrics.output_tokens > 0: + if hasattr(metrics, "output_tokens") and metrics.output_tokens > 0: total_output_tokens += metrics.output_tokens - if hasattr(metrics, 'input_tokens') and metrics.input_tokens > 0: + if hasattr(metrics, "input_tokens") and metrics.input_tokens > 0: total_input_tokens += metrics.input_tokens - if hasattr(metrics, 'time') and metrics.time: + if hasattr(metrics, "time") and metrics.time: total_time += metrics.time # === TOKEN METRICS FROM AGENT SESSION METRICS === - if hasattr(agent, 'session_metrics') and agent.session_metrics: + if hasattr(agent, "session_metrics") and agent.session_metrics: session_metrics = agent.session_metrics - + # Try to get model name from session metrics if not already set if SpanAttributes.LLM_REQUEST_MODEL not in attributes: - if hasattr(session_metrics, 'model') and session_metrics.model: + if hasattr(session_metrics, "model") and session_metrics.model: model_id = str(session_metrics.model) attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_id attributes[SpanAttributes.LLM_RESPONSE_MODEL] = model_id - + # Use session metrics for more accurate token counts - session_prompt_tokens = getattr(session_metrics, 'prompt_tokens', 0) - session_completion_tokens = getattr(session_metrics, 'completion_tokens', 0) - session_output_tokens = getattr(session_metrics, 'output_tokens', 0) - session_input_tokens = getattr(session_metrics, 'input_tokens', 0) - session_total_tokens = getattr(session_metrics, 'total_tokens', 0) - + session_prompt_tokens = getattr(session_metrics, "prompt_tokens", 0) + session_completion_tokens = getattr(session_metrics, "completion_tokens", 0) + session_output_tokens = getattr(session_metrics, "output_tokens", 0) + session_input_tokens = getattr(session_metrics, "input_tokens", 0) + session_total_tokens = getattr(session_metrics, "total_tokens", 0) + # For Anthropic, output_tokens represents completion tokens if session_output_tokens > 0 and session_completion_tokens == 0: session_completion_tokens = session_output_tokens - + # For some providers, input_tokens represents prompt tokens if session_input_tokens > 0 and session_prompt_tokens == 0: session_prompt_tokens = session_input_tokens - - # Only override if session metrics provide better data + + # Only set token attributes if we have actual values if session_total_tokens > 0: - usage_attrs["total_tokens"] = session_total_tokens - + usage_data["total_tokens"] = session_total_tokens + # Set breakdown if available if session_prompt_tokens > 0: - usage_attrs["prompt_tokens"] = session_prompt_tokens + usage_data["prompt_tokens"] = session_prompt_tokens if session_completion_tokens > 0: - usage_attrs["completion_tokens"] = session_completion_tokens - - # Additional token types from session metrics - if hasattr(session_metrics, 'cached_tokens') and session_metrics.cached_tokens > 0: - usage_attrs["cache_read_input_tokens"] = session_metrics.cached_tokens - if hasattr(session_metrics, 'reasoning_tokens') and session_metrics.reasoning_tokens > 0: - usage_attrs["reasoning_tokens"] = session_metrics.reasoning_tokens - - # Success/fail token metrics + usage_data["completion_tokens"] = session_completion_tokens + + # Additional token types from session metrics - only set if present + if hasattr(session_metrics, "cached_tokens") and session_metrics.cached_tokens > 0: + usage_data["cache_read_input_tokens"] = session_metrics.cached_tokens + if hasattr(session_metrics, "reasoning_tokens") and session_metrics.reasoning_tokens > 0: + usage_data["reasoning_tokens"] = session_metrics.reasoning_tokens + + # Success/fail token metrics - only set if we have tokens if session_total_tokens > 0: - usage_attrs["success_tokens"] = session_total_tokens - usage_attrs["fail_tokens"] = 0 - usage_attrs["indeterminate_tokens"] = 0 + usage_data["success_tokens"] = session_total_tokens + # Only set fail/indeterminate as 0 when we have success tokens + usage_data["fail_tokens"] = 0 + usage_data["indeterminate_tokens"] = 0 # === FALLBACK TO MESSAGE AGGREGATION IF SESSION METRICS ARE EMPTY === - # If session metrics don't have token info, use message aggregation - if usage_attrs["total_tokens"] == 0: + # If we don't have token data from session metrics, try message aggregation + if "total_tokens" not in usage_data: # Set aggregated token usage from messages if total_prompt_tokens > 0 or total_input_tokens > 0: - usage_attrs["prompt_tokens"] = total_prompt_tokens or total_input_tokens + usage_data["prompt_tokens"] = total_prompt_tokens or total_input_tokens if total_completion_tokens > 0 or total_output_tokens > 0: - usage_attrs["completion_tokens"] = total_completion_tokens or total_output_tokens + usage_data["completion_tokens"] = total_completion_tokens or total_output_tokens if total_tokens > 0: - usage_attrs["total_tokens"] = total_tokens - + usage_data["total_tokens"] = total_tokens + # Handle case where we have total but no breakdown (common with Anthropic) - if usage_attrs["prompt_tokens"] == 0 and usage_attrs["completion_tokens"] == 0: + if usage_data.get("prompt_tokens", 0) == 0 and usage_data.get("completion_tokens", 0) == 0: # If we only have completion tokens from output_tokens, assume all are completion if total_output_tokens > 0: - usage_attrs["completion_tokens"] = total_output_tokens - usage_attrs["prompt_tokens"] = max(0, total_tokens - total_output_tokens) + usage_data["completion_tokens"] = total_output_tokens + usage_data["prompt_tokens"] = max(0, total_tokens - total_output_tokens) # Otherwise try to split reasonably elif total_tokens > 0: # For pure generation, most tokens are usually completion estimated_completion = int(total_tokens * 0.7) # Rough estimate estimated_prompt = total_tokens - estimated_completion - usage_attrs["completion_tokens"] = estimated_completion - usage_attrs["prompt_tokens"] = estimated_prompt - - # Success/fail tokens from message aggregation + usage_data["completion_tokens"] = estimated_completion + usage_data["prompt_tokens"] = estimated_prompt + + # Success/fail tokens from message aggregation - only set if we have tokens if total_tokens > 0: - usage_attrs["success_tokens"] = total_tokens - usage_attrs["fail_tokens"] = 0 - usage_attrs["indeterminate_tokens"] = 0 + usage_data["success_tokens"] = total_tokens + usage_data["fail_tokens"] = 0 + usage_data["indeterminate_tokens"] = 0 # Extract user message info if available - if hasattr(run_messages, 'user_message') and run_messages.user_message: + if hasattr(run_messages, "user_message") and run_messages.user_message: user_msg = run_messages.user_message - if hasattr(user_msg, 'content'): + if hasattr(user_msg, "content"): content = str(user_msg.content) if len(content) > 1000: content = content[:997] + "..." - attributes['agno.metrics.user_input'] = content - - # Set individual LLM usage attributes that AgentOps expects - attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage_attrs["prompt_tokens"] - attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage_attrs["completion_tokens"] - attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage_attrs["total_tokens"] - attributes[SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS] = usage_attrs["cache_read_input_tokens"] - attributes[SpanAttributes.LLM_USAGE_REASONING_TOKENS] = usage_attrs["reasoning_tokens"] - + attributes["agno.metrics.user_input"] = content + + # Set individual LLM usage attributes only for values we actually have + if "prompt_tokens" in usage_data: + attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage_data["prompt_tokens"] + if "completion_tokens" in usage_data: + attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage_data["completion_tokens"] + if "total_tokens" in usage_data: + attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage_data["total_tokens"] + if "cache_read_input_tokens" in usage_data: + attributes[SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS] = usage_data["cache_read_input_tokens"] + if "reasoning_tokens" in usage_data: + attributes[SpanAttributes.LLM_USAGE_REASONING_TOKENS] = usage_data["reasoning_tokens"] + # Also keep the nested format and individual gen_ai.usage.* attributes for compatibility - attributes["gen_ai.usage"] = usage_attrs - for key, value in usage_attrs.items(): - attributes[f"gen_ai.usage.{key}"] = value + # But only if we have any usage data + if usage_data: + attributes["gen_ai.usage"] = usage_data + for key, value in usage_data.items(): + attributes[f"gen_ai.usage.{key}"] = value - return attributes \ No newline at end of file + return attributes diff --git a/agentops/instrumentation/agno/attributes/model.py b/agentops/instrumentation/agno/attributes/model.py deleted file mode 100644 index ec7a59abe..000000000 --- a/agentops/instrumentation/agno/attributes/model.py +++ /dev/null @@ -1,371 +0,0 @@ -"""Agno Model response attributes handler.""" - -from typing import Optional, Tuple, Dict, Any - -from agentops.instrumentation.common.attributes import AttributeMap -from agentops.semconv import SpanAttributes -from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind - - -def get_model_response_attributes( - args: Optional[Tuple] = None, - kwargs: Optional[Dict] = None, - return_value: Optional[Any] = None, -) -> AttributeMap: - """Extract span attributes for Model.response method calls. - - Args: - args: Positional arguments passed to the Model.response method - kwargs: Keyword arguments passed to the Model.response method - return_value: The return value from the Model.response method - - Returns: - A dictionary of span attributes to be set on the LLM span - """ - attributes: AttributeMap = {} - - # Base attributes - attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.LLM_CALL - attributes[SpanAttributes.LLM_SYSTEM] = "agno" - - print(f"[DEBUG] get_model_response_attributes called") - - # Process input arguments - if kwargs: - # Extract messages from kwargs - messages = kwargs.get('messages') - if messages: - for i, msg in enumerate(messages): - if isinstance(msg, dict): - if 'role' in msg: - attributes[f'gen_ai.prompt.{i}.role'] = msg['role'] - if 'content' in msg: - content = msg['content'] - if len(str(content)) > 1000: - content = str(content)[:997] + "..." - attributes[f'gen_ai.prompt.{i}.content'] = str(content) - elif hasattr(msg, 'role') and hasattr(msg, 'content'): - attributes[f'gen_ai.prompt.{i}.role'] = msg.role - content = msg.content - if len(str(content)) > 1000: - content = str(content)[:997] + "..." - attributes[f'gen_ai.prompt.{i}.content'] = str(content) - - # Extract response format - if kwargs.get('response_format'): - attributes['agno.model.response_format'] = str(kwargs['response_format']) - - # Extract tools information - tools = kwargs.get('tools') - if tools: - attributes['agno.model.tools_count'] = str(len(tools)) - for i, tool in enumerate(tools): - if hasattr(tool, 'name'): - attributes[f'agno.model.tools.{i}.name'] = tool.name - if hasattr(tool, 'description'): - description = tool.description - if len(str(description)) > 200: - description = str(description)[:197] + "..." - attributes[f'agno.model.tools.{i}.description'] = str(description) - - # Extract functions information - functions = kwargs.get('functions') - if functions: - attributes['agno.model.functions_count'] = str(len(functions)) - for i, func in enumerate(functions): - if hasattr(func, 'name'): - attributes[f'agno.model.functions.{i}.name'] = func.name - - # Extract tool choice - if kwargs.get('tool_choice'): - attributes['agno.model.tool_choice'] = str(kwargs['tool_choice']) - - # Extract tool call limit - if kwargs.get('tool_call_limit'): - attributes['agno.model.tool_call_limit'] = str(kwargs['tool_call_limit']) - - # Process positional arguments (first arg is typically messages) - if args and args[0] and not kwargs.get('messages'): - messages = args[0] - if isinstance(messages, list): - for i, msg in enumerate(messages): - if isinstance(msg, dict): - if 'role' in msg: - attributes[f'gen_ai.prompt.{i}.role'] = msg['role'] - if 'content' in msg: - content = msg['content'] - if len(str(content)) > 1000: - content = str(content)[:997] + "..." - attributes[f'gen_ai.prompt.{i}.content'] = str(content) - - # Process return value - if return_value: - # Set completion content - if hasattr(return_value, 'content'): - content = return_value.content - if len(str(content)) > 1000: - content = str(content)[:997] + "..." - attributes['gen_ai.completion.0.content'] = str(content) - attributes['gen_ai.completion.0.role'] = 'assistant' - - # Set usage metrics - Enhanced to capture all token types - if hasattr(return_value, 'usage'): - usage = return_value.usage - if hasattr(usage, 'prompt_tokens'): - attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage.prompt_tokens - if hasattr(usage, 'completion_tokens'): - attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage.completion_tokens - if hasattr(usage, 'total_tokens'): - attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage.total_tokens - if hasattr(usage, 'reasoning_tokens'): - attributes[SpanAttributes.LLM_USAGE_REASONING_TOKENS] = usage.reasoning_tokens - if hasattr(usage, 'cached_tokens'): - attributes[SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS] = usage.cached_tokens - if hasattr(usage, 'cache_creation_input_tokens'): - attributes[SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS] = usage.cache_creation_input_tokens - - # Set response usage if available - if hasattr(return_value, 'response_usage') and return_value.response_usage: - response_usage = return_value.response_usage - if hasattr(response_usage, 'prompt_tokens'): - attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = response_usage.prompt_tokens - if hasattr(response_usage, 'completion_tokens'): - attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = response_usage.completion_tokens - if hasattr(response_usage, 'total_tokens'): - attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = response_usage.total_tokens - - # Set finish reason - if hasattr(return_value, 'finish_reason'): - attributes[SpanAttributes.LLM_RESPONSE_FINISH_REASON] = return_value.finish_reason - - # Set response ID - if hasattr(return_value, 'id'): - attributes[SpanAttributes.LLM_RESPONSE_ID] = str(return_value.id) - - # Set tool calls if present - if hasattr(return_value, 'tool_calls') and return_value.tool_calls: - for i, tool_call in enumerate(return_value.tool_calls): - if hasattr(tool_call, 'function'): - function = tool_call.function - if hasattr(function, 'name'): - attributes[f'agno.model.response.tool_calls.{i}.name'] = function.name - if hasattr(function, 'arguments'): - args_str = str(function.arguments) - if len(args_str) > 500: - args_str = args_str[:497] + "..." - attributes[f'agno.model.response.tool_calls.{i}.arguments'] = args_str - - # Set raw response for debugging - if hasattr(return_value, 'raw'): - raw_response = str(return_value.raw) - if len(raw_response) > 2000: - raw_response = raw_response[:1997] + "..." - attributes['agno.model.raw_response'] = raw_response - - return attributes - - -def get_session_metrics_attributes( - args: Optional[Tuple] = None, - kwargs: Optional[Dict] = None, - return_value: Optional[Any] = None, -) -> AttributeMap: - """Extract span attributes from Agent._set_session_metrics method calls. - - This captures comprehensive session metrics AND model request/response data. - - Args: - args: Positional arguments passed to the _set_session_metrics method - kwargs: Keyword arguments passed to the _set_session_metrics method - return_value: The return value from the _set_session_metrics method - - Returns: - A dictionary of span attributes to be set on the metrics span - """ - attributes: AttributeMap = {} - - # Base attributes - attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.AGENT - attributes[SpanAttributes.LLM_SYSTEM] = "agno" - - print(f"[DEBUG] get_session_metrics_attributes called") - print(f"[DEBUG] args: {args}") - print(f"[DEBUG] kwargs: {kwargs}") - print(f"[DEBUG] return_value: {return_value}") - - # The agent instance is the wrapped instance, args[0] is RunMessages - # We need to access the agent through the call stack or extract data from RunMessages - if args and len(args) > 0: - run_messages = args[0] - print(f"[DEBUG] run_messages type: {type(run_messages)}") - - # === EXTRACT DATA FROM RUNMESSAGES === - if hasattr(run_messages, 'messages') and run_messages.messages: - messages = run_messages.messages - print(f"[DEBUG] Found {len(messages)} messages") - - total_prompt_tokens = 0 - total_completion_tokens = 0 - total_tokens = 0 - total_time = 0 - - prompt_count = 0 - completion_count = 0 - - # Process each message - for i, msg in enumerate(messages): - print(f"[DEBUG] Message {i}: role={getattr(msg, 'role', 'unknown')}") - - # Extract message content for prompts/completions - if hasattr(msg, 'role') and hasattr(msg, 'content'): - # Only set content if it's not None/empty - if msg.content is not None and str(msg.content).strip() != "" and str(msg.content) != "None": - content = str(msg.content) - if len(content) > 1000: - content = content[:997] + "..." - - if msg.role == 'user': - attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'user' - attributes[f'gen_ai.prompt.{prompt_count}.content'] = content - prompt_count += 1 - elif msg.role == 'assistant': - attributes[f'gen_ai.completion.{completion_count}.role'] = 'assistant' - attributes[f'gen_ai.completion.{completion_count}.content'] = content - completion_count += 1 - else: - # For messages with None content, still set the role but skip content - if msg.role == 'user': - attributes[f'gen_ai.prompt.{prompt_count}.role'] = 'user' - prompt_count += 1 - elif msg.role == 'assistant': - attributes[f'gen_ai.completion.{completion_count}.role'] = 'assistant' - completion_count += 1 - - # Extract token metrics from message - if hasattr(msg, 'metrics') and msg.metrics: - metrics = msg.metrics - print(f"[DEBUG] Message {i} metrics: {metrics}") - - # Handle different token metric patterns - if hasattr(metrics, 'prompt_tokens') and metrics.prompt_tokens > 0: - total_prompt_tokens += metrics.prompt_tokens - if hasattr(metrics, 'completion_tokens') and metrics.completion_tokens > 0: - total_completion_tokens += metrics.completion_tokens - if hasattr(metrics, 'total_tokens') and metrics.total_tokens > 0: - total_tokens += metrics.total_tokens - # For messages that only have output_tokens (like Anthropic) - if hasattr(metrics, 'output_tokens') and metrics.output_tokens > 0: - total_completion_tokens += metrics.output_tokens - if hasattr(metrics, 'time') and metrics.time: - total_time += metrics.time - - # Set aggregated token usage - if total_prompt_tokens > 0: - attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = total_prompt_tokens - attributes['agno.metrics.prompt_tokens'] = total_prompt_tokens - if total_completion_tokens > 0: - attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = total_completion_tokens - attributes['agno.metrics.completion_tokens'] = total_completion_tokens - if total_tokens > 0: - attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = total_tokens - - # Handle case where we have total but no breakdown (common with Anthropic) - if total_prompt_tokens == 0 and total_completion_tokens == 0: - # We'll try to get the breakdown from session_metrics later - print(f"[DEBUG] Total tokens ({total_tokens}) available but no breakdown - will try session_metrics") - elif total_prompt_tokens > 0 or total_completion_tokens > 0: - # Ensure totals are consistent - calculated_total = total_prompt_tokens + total_completion_tokens - if calculated_total != total_tokens: - print(f"[DEBUG] Token mismatch: calculated={calculated_total}, reported={total_tokens}") - # Use the more reliable total - attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = max(total_tokens, calculated_total) - - if total_time > 0: - attributes['agno.metrics.total_time'] = total_time - - print(f"[DEBUG] Aggregated tokens: prompt={total_prompt_tokens}, completion={total_completion_tokens}, total={total_tokens}, time={total_time}") - - # Extract user message info - if hasattr(run_messages, 'user_message') and run_messages.user_message: - user_msg = run_messages.user_message - if hasattr(user_msg, 'content'): - content = str(user_msg.content) - if len(content) > 1000: - content = content[:997] + "..." - attributes['agno.metrics.user_input'] = content - - # Try to get agent instance from the call stack for additional data - import inspect - try: - for frame in inspect.stack(): - frame_locals = frame.frame.f_locals - # Look for agent instance in the call stack - for var_name, var_value in frame_locals.items(): - if (hasattr(var_value, 'session_metrics') and - hasattr(var_value, 'run') and - var_name in ['self', 'agent', 'instance']): - agent_instance = var_value - print(f"[DEBUG] Found agent instance in call stack: {type(agent_instance)}") - - # === MODEL INFO FROM AGENT === - if hasattr(agent_instance, 'model') and agent_instance.model: - model = agent_instance.model - if hasattr(model, 'id'): - attributes[SpanAttributes.LLM_REQUEST_MODEL] = str(model.id) - attributes[SpanAttributes.LLM_RESPONSE_MODEL] = str(model.id) - if hasattr(model, 'provider'): - attributes['agno.model.provider'] = str(model.provider) - - # === TOOLS INFO FROM AGENT === - if hasattr(agent_instance, 'tools') and agent_instance.tools: - tools = agent_instance.tools - attributes['agno.model.tools_count'] = str(len(tools)) - for i, tool in enumerate(tools): - if hasattr(tool, 'name'): - attributes[f'agno.model.tools.{i}.name'] = tool.name - - # === SESSION METRICS FROM AGENT (if available) === - if hasattr(agent_instance, 'session_metrics') and agent_instance.session_metrics: - session_metrics = agent_instance.session_metrics - print(f"[DEBUG] Found session_metrics on agent: {session_metrics}") - - # Use session metrics for more accurate token counts - session_prompt_tokens = getattr(session_metrics, 'prompt_tokens', 0) - session_completion_tokens = getattr(session_metrics, 'completion_tokens', 0) - session_output_tokens = getattr(session_metrics, 'output_tokens', 0) - session_total_tokens = getattr(session_metrics, 'total_tokens', 0) - - # For Anthropic, output_tokens represents completion tokens - if session_output_tokens > 0 and session_completion_tokens == 0: - session_completion_tokens = session_output_tokens - - # Only override if session metrics provide better breakdown - if session_total_tokens > 0: - attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = session_total_tokens - - # Set breakdown if available - if session_prompt_tokens > 0: - attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = session_prompt_tokens - attributes['agno.metrics.prompt_tokens'] = session_prompt_tokens - if session_completion_tokens > 0: - attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = session_completion_tokens - attributes['agno.metrics.completion_tokens'] = session_completion_tokens - - # If we have total but still no breakdown, estimate it - if session_prompt_tokens == 0 and session_completion_tokens > 0: - # All tokens are completion tokens (common for generative responses) - print(f"[DEBUG] Using all {session_total_tokens} tokens as completion tokens") - elif session_prompt_tokens > 0 and session_completion_tokens == 0: - # All tokens are prompt tokens (rare case) - print(f"[DEBUG] Using all {session_total_tokens} tokens as prompt tokens") - - if hasattr(session_metrics, 'time') and session_metrics.time: - attributes['agno.metrics.total_time'] = session_metrics.time - - break - except Exception as e: - print(f"[DEBUG] Error accessing call stack: {e}") - - print(f"[DEBUG] Final attributes keys: {list(attributes.keys())}") - return attributes \ No newline at end of file diff --git a/agentops/instrumentation/agno/attributes/team.py b/agentops/instrumentation/agno/attributes/team.py index e327e633a..b3cc68081 100644 --- a/agentops/instrumentation/agno/attributes/team.py +++ b/agentops/instrumentation/agno/attributes/team.py @@ -29,36 +29,88 @@ def get_team_run_attributes( attributes[SpanAttributes.LLM_SYSTEM] = "agno" attributes[WorkflowAttributes.WORKFLOW_TYPE] = "team_run" + # Extract team information from instance + if args and len(args) > 0: + team = args[0] # self (Team instance) + + # Team identification + if hasattr(team, "name") and team.name: + attributes["agno.team.name"] = str(team.name) + attributes["agno.team.display_name"] = f"{team.name} (Team)" + + if hasattr(team, "team_id") and team.team_id: + attributes["agno.team.team_id"] = str(team.team_id) + + if hasattr(team, "mode") and team.mode: + attributes["agno.team.mode"] = str(team.mode) + + if hasattr(team, "members") and team.members: + attributes["agno.team.members_count"] = str(len(team.members)) + + # Add detailed member information + member_agents = [] + for i, member in enumerate(team.members): + member_info = {} + if hasattr(member, "name") and member.name: + member_info["name"] = str(member.name) + if hasattr(member, "agent_id") and member.agent_id: + member_info["id"] = str(member.agent_id) + if hasattr(member, "role") and member.role: + member_info["role"] = str(member.role) + if hasattr(member, "model") and member.model: + if hasattr(member.model, "id"): + member_info["model"] = str(member.model.id) + + # Add member info to list + if member_info: + member_agents.append(member_info) + + # Also add individual member attributes + for key, value in member_info.items(): + attributes[f"agno.team.member.{i}.{key}"] = value + + # Add aggregated member list + if member_agents: + import json + + try: + attributes["agno.team.members"] = json.dumps(member_agents) + # Also add a simple list of member names + member_names = [m.get("name", "Unknown") for m in member_agents] + attributes["agno.team.member_names"] = ", ".join(member_names) + except: + attributes["agno.team.members"] = str(member_agents) + # Process input arguments from the run_messages parameter if args and len(args) >= 2: # args[0] is run_response, args[1] is run_messages run_messages = args[1] - if hasattr(run_messages, 'messages') and run_messages.messages: + if hasattr(run_messages, "messages") and run_messages.messages: # Get the user message for workflow input - user_messages = [msg for msg in run_messages.messages if hasattr(msg, 'role') and msg.role == 'user'] + user_messages = [msg for msg in run_messages.messages if hasattr(msg, "role") and msg.role == "user"] if user_messages: last_user_msg = user_messages[-1] - if hasattr(last_user_msg, 'content'): + if hasattr(last_user_msg, "content"): attributes[WorkflowAttributes.WORKFLOW_INPUT] = str(last_user_msg.content) attributes[WorkflowAttributes.WORKFLOW_INPUT_TYPE] = "message" - + # Count total messages - attributes['agno.team.messages_count'] = str(len(run_messages.messages)) + attributes["agno.team.messages_count"] = str(len(run_messages.messages)) # Process keyword arguments if kwargs: - if kwargs.get('user_id'): - attributes[SpanAttributes.LLM_USER] = kwargs['user_id'] - - if kwargs.get('session_id'): - attributes['agno.team.session_id'] = kwargs['session_id'] - - if kwargs.get('response_format'): - attributes['agno.team.response_format'] = str(type(kwargs['response_format']).__name__) + if kwargs.get("user_id"): + attributes[SpanAttributes.LLM_USER] = kwargs["user_id"] + + if kwargs.get("session_id"): + attributes["agno.team.session_id"] = kwargs["session_id"] + + if kwargs.get("response_format"): + attributes["agno.team.response_format"] = str(type(kwargs["response_format"]).__name__) # Process return value (TeamRunResponse) if return_value: - if hasattr(return_value, 'content'): + if hasattr(return_value, "content"): content = str(return_value.content) # Truncate if too long if len(content) > 1000: @@ -71,29 +123,29 @@ def get_team_run_attributes( output = output[:997] + "..." attributes[WorkflowAttributes.WORKFLOW_OUTPUT] = output attributes[WorkflowAttributes.WORKFLOW_OUTPUT_TYPE] = type(return_value).__name__ - + # Set additional team response attributes - if hasattr(return_value, 'run_id'): - attributes['agno.team.run_id'] = str(return_value.run_id) - - if hasattr(return_value, 'session_id'): - attributes['agno.team.response_session_id'] = str(return_value.session_id) - - if hasattr(return_value, 'team_id'): - attributes['agno.team.response_team_id'] = str(return_value.team_id) - - if hasattr(return_value, 'model'): + if hasattr(return_value, "run_id"): + attributes["agno.team.run_id"] = str(return_value.run_id) + + if hasattr(return_value, "session_id"): + attributes["agno.team.response_session_id"] = str(return_value.session_id) + + if hasattr(return_value, "team_id"): + attributes["agno.team.response_team_id"] = str(return_value.team_id) + + if hasattr(return_value, "model"): attributes[SpanAttributes.LLM_RESPONSE_MODEL] = str(return_value.model) - - if hasattr(return_value, 'model_provider'): - attributes['agno.team.model_provider'] = str(return_value.model_provider) - - if hasattr(return_value, 'event'): - attributes['agno.team.event'] = str(return_value.event) - + + if hasattr(return_value, "model_provider"): + attributes["agno.team.model_provider"] = str(return_value.model_provider) + + if hasattr(return_value, "event"): + attributes["agno.team.event"] = str(return_value.event) + # Team-specific attributes - if hasattr(return_value, 'content_type'): - attributes['agno.team.response_content_type'] = str(return_value.content_type) + if hasattr(return_value, "content_type"): + attributes["agno.team.response_content_type"] = str(return_value.content_type) return attributes @@ -120,36 +172,74 @@ def get_team_public_run_attributes( attributes[SpanAttributes.LLM_SYSTEM] = "agno" attributes[WorkflowAttributes.WORKFLOW_TYPE] = "team_run" + # Extract team information from instance + if args and len(args) > 0: + team = args[0] # self (Team instance) + + # Team identification + if hasattr(team, "name") and team.name: + attributes["agno.team.name"] = str(team.name) + attributes["agno.team.display_name"] = f"{team.name} (Team)" + + if hasattr(team, "team_id") and team.team_id: + attributes["agno.team.team_id"] = str(team.team_id) + + if hasattr(team, "mode") and team.mode: + attributes["agno.team.mode"] = str(team.mode) + + if hasattr(team, "members") and team.members: + attributes["agno.team.members_count"] = str(len(team.members)) + + # Add detailed member information + member_agents = [] + for i, member in enumerate(team.members): + member_info = {} + if hasattr(member, "name") and member.name: + member_info["name"] = str(member.name) + if hasattr(member, "agent_id") and member.agent_id: + member_info["id"] = str(member.agent_id) + if hasattr(member, "role") and member.role: + member_info["role"] = str(member.role) + if hasattr(member, "model") and member.model: + if hasattr(member.model, "id"): + member_info["model"] = str(member.model.id) + + # Add member info to list + if member_info: + member_agents.append(member_info) + + # Also add individual member attributes + for key, value in member_info.items(): + attributes[f"agno.team.member.{i}.{key}"] = value + + # Add aggregated member list + if member_agents: + import json + + try: + attributes["agno.team.members"] = json.dumps(member_agents) + # Also add a simple list of member names + member_names = [m.get("name", "Unknown") for m in member_agents] + attributes["agno.team.member_names"] = ", ".join(member_names) + except: + attributes["agno.team.members"] = str(member_agents) + # Process input arguments from Team.run() method if args and len(args) >= 2: # args[0] is self (Team instance), args[1] is message - team_instance = args[0] message = args[1] - - # Extract team information - if hasattr(team_instance, 'name') and team_instance.name: - attributes['agno.team.name'] = str(team_instance.name) - - if hasattr(team_instance, 'team_id') and team_instance.team_id: - attributes['agno.team.team_id'] = str(team_instance.team_id) - - if hasattr(team_instance, 'mode') and team_instance.mode: - attributes['agno.team.mode'] = str(team_instance.mode) - - if hasattr(team_instance, 'members') and team_instance.members: - attributes['agno.team.members_count'] = str(len(team_instance.members)) - + # Extract workflow input from message if message is not None: if isinstance(message, str): message_content = message - elif hasattr(message, 'content'): + elif hasattr(message, "content"): message_content = str(message.content) - elif hasattr(message, 'get_content_string'): + elif hasattr(message, "get_content_string"): message_content = message.get_content_string() else: message_content = str(message) - + # Truncate if too long if len(message_content) > 1000: message_content = message_content[:997] + "..." @@ -158,42 +248,42 @@ def get_team_public_run_attributes( # Process keyword arguments if kwargs: - if kwargs.get('user_id'): - attributes[SpanAttributes.LLM_USER] = kwargs['user_id'] - - if kwargs.get('session_id'): - attributes['agno.team.session_id'] = kwargs['session_id'] - - if kwargs.get('stream'): - attributes['agno.team.streaming'] = str(kwargs['stream']) - - if kwargs.get('stream_intermediate_steps'): - attributes['agno.team.stream_intermediate_steps'] = str(kwargs['stream_intermediate_steps']) - - if kwargs.get('retries'): - attributes['agno.team.retries'] = str(kwargs['retries']) - + if kwargs.get("user_id"): + attributes[SpanAttributes.LLM_USER] = kwargs["user_id"] + + if kwargs.get("session_id"): + attributes["agno.team.session_id"] = kwargs["session_id"] + + if kwargs.get("stream"): + attributes["agno.team.streaming"] = str(kwargs["stream"]) + + if kwargs.get("stream_intermediate_steps"): + attributes["agno.team.stream_intermediate_steps"] = str(kwargs["stream_intermediate_steps"]) + + if kwargs.get("retries"): + attributes["agno.team.retries"] = str(kwargs["retries"]) + # Media attachments - if kwargs.get('audio'): - attributes['agno.team.has_audio'] = "true" - if kwargs.get('images'): - attributes['agno.team.has_images'] = "true" - if kwargs.get('videos'): - attributes['agno.team.has_videos'] = "true" - if kwargs.get('files'): - attributes['agno.team.has_files'] = "true" - - if kwargs.get('knowledge_filters'): - attributes['agno.team.has_knowledge_filters'] = "true" + if kwargs.get("audio"): + attributes["agno.team.has_audio"] = "true" + if kwargs.get("images"): + attributes["agno.team.has_images"] = "true" + if kwargs.get("videos"): + attributes["agno.team.has_videos"] = "true" + if kwargs.get("files"): + attributes["agno.team.has_files"] = "true" + + if kwargs.get("knowledge_filters"): + attributes["agno.team.has_knowledge_filters"] = "true" # Process return value (TeamRunResponse or Iterator) if return_value: # Handle both single response and iterator - if hasattr(return_value, '__iter__') and not isinstance(return_value, str): + if hasattr(return_value, "__iter__") and not isinstance(return_value, str): # It's an iterator for streaming attributes[WorkflowAttributes.WORKFLOW_OUTPUT_TYPE] = "team_run_response_stream" - attributes['agno.team.is_streaming'] = "true" - elif hasattr(return_value, 'content'): + attributes["agno.team.is_streaming"] = "true" + elif hasattr(return_value, "content"): # It's a TeamRunResponse content = str(return_value.content) # Truncate if too long @@ -201,29 +291,29 @@ def get_team_public_run_attributes( content = content[:997] + "..." attributes[WorkflowAttributes.WORKFLOW_OUTPUT] = content attributes[WorkflowAttributes.WORKFLOW_OUTPUT_TYPE] = "team_run_response" - + # Set additional team response attributes - if hasattr(return_value, 'run_id'): - attributes['agno.team.run_id'] = str(return_value.run_id) - - if hasattr(return_value, 'session_id'): - attributes['agno.team.response_session_id'] = str(return_value.session_id) - - if hasattr(return_value, 'team_id'): - attributes['agno.team.response_team_id'] = str(return_value.team_id) - - if hasattr(return_value, 'model'): + if hasattr(return_value, "run_id"): + attributes["agno.team.run_id"] = str(return_value.run_id) + + if hasattr(return_value, "session_id"): + attributes["agno.team.response_session_id"] = str(return_value.session_id) + + if hasattr(return_value, "team_id"): + attributes["agno.team.response_team_id"] = str(return_value.team_id) + + if hasattr(return_value, "model"): attributes[SpanAttributes.LLM_RESPONSE_MODEL] = str(return_value.model) - - if hasattr(return_value, 'model_provider'): - attributes['agno.team.model_provider'] = str(return_value.model_provider) - - if hasattr(return_value, 'event'): - attributes['agno.team.event'] = str(return_value.event) - + + if hasattr(return_value, "model_provider"): + attributes["agno.team.model_provider"] = str(return_value.model_provider) + + if hasattr(return_value, "event"): + attributes["agno.team.event"] = str(return_value.event) + # Team-specific attributes - if hasattr(return_value, 'content_type'): - attributes['agno.team.response_content_type'] = str(return_value.content_type) + if hasattr(return_value, "content_type"): + attributes["agno.team.response_content_type"] = str(return_value.content_type) else: # Unknown return type output = str(return_value) @@ -232,4 +322,4 @@ def get_team_public_run_attributes( attributes[WorkflowAttributes.WORKFLOW_OUTPUT] = output attributes[WorkflowAttributes.WORKFLOW_OUTPUT_TYPE] = type(return_value).__name__ - return attributes \ No newline at end of file + return attributes diff --git a/agentops/instrumentation/agno/attributes/tool.py b/agentops/instrumentation/agno/attributes/tool.py index 3a9557aed..55fa4ad16 100644 --- a/agentops/instrumentation/agno/attributes/tool.py +++ b/agentops/instrumentation/agno/attributes/tool.py @@ -3,122 +3,11 @@ import json from typing import Optional, Tuple, Dict, Any -from agentops.logging import logger from agentops.instrumentation.common.attributes import AttributeMap from agentops.semconv import SpanAttributes from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind -def get_tool_decorator_attributes( - args: Optional[Tuple] = None, - kwargs: Optional[Dict] = None, - return_value: Optional[Any] = None, -) -> AttributeMap: - """Extract span attributes for tool decorator calls. - - The @tool decorator has multiple calling patterns: - 1. @tool - direct decoration, args[0] is the function - 2. @tool() - parameterless call, return_value is a decorator function - 3. @tool(name="...") - parameterized call, return_value is a decorator function - - Args: - args: Positional arguments passed to the tool decorator - kwargs: Keyword arguments passed to the tool decorator - return_value: The return value from the tool decorator - - Returns: - A dictionary of span attributes to be set on the tool span - """ - attributes: AttributeMap = {} - - # Base attributes - attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.TOOL - attributes[SpanAttributes.LLM_SYSTEM] = "agno" - attributes["agno.tool.operation"] = "create" - - # Determine the calling pattern - direct_decoration = ( - args and len(args) == 1 and callable(args[0]) and not kwargs - ) - - if direct_decoration: - # Pattern 1: @tool (direct decoration) - func = args[0] - attributes["agno.tool.call_pattern"] = "direct" - attributes["agno.tool.function_name"] = func.__name__ - - # Check if it's an async function - from inspect import iscoroutinefunction, isasyncgenfunction - if iscoroutinefunction(func): - attributes["agno.tool.function_type"] = "async" - elif isasyncgenfunction(func): - attributes["agno.tool.function_type"] = "async_generator" - else: - attributes["agno.tool.function_type"] = "sync" - - # Get docstring if available - if func.__doc__: - docstring = func.__doc__.strip() - if len(docstring) > 200: - docstring = docstring[:197] + "..." - attributes["agno.tool.function_docstring"] = docstring - - # Since it's direct decoration, return_value should be a Function - if return_value and hasattr(return_value, 'name'): - attributes["agno.tool.created_name"] = str(return_value.name) - - else: - # Pattern 2 & 3: @tool() or @tool(name="...") - parameterized decoration - attributes["agno.tool.call_pattern"] = "parameterized" - - # Process decorator arguments from kwargs - if kwargs: - if kwargs.get('name'): - attributes["agno.tool.config_name"] = kwargs['name'] - - if kwargs.get('description'): - attributes["agno.tool.config_description"] = kwargs['description'] - - if kwargs.get('instructions'): - attributes["agno.tool.config_instructions"] = kwargs['instructions'] - - if 'strict' in kwargs and kwargs['strict'] is not None: - attributes["agno.tool.config_strict"] = str(kwargs['strict']) - - if 'show_result' in kwargs and kwargs['show_result'] is not None: - attributes["agno.tool.config_show_result"] = str(kwargs['show_result']) - - if 'stop_after_tool_call' in kwargs and kwargs['stop_after_tool_call'] is not None: - attributes["agno.tool.config_stop_after_tool_call"] = str(kwargs['stop_after_tool_call']) - - if 'requires_confirmation' in kwargs and kwargs['requires_confirmation'] is not None: - attributes["agno.tool.config_requires_confirmation"] = str(kwargs['requires_confirmation']) - - if 'requires_user_input' in kwargs and kwargs['requires_user_input'] is not None: - attributes["agno.tool.config_requires_user_input"] = str(kwargs['requires_user_input']) - - if 'external_execution' in kwargs and kwargs['external_execution'] is not None: - attributes["agno.tool.config_external_execution"] = str(kwargs['external_execution']) - - if kwargs.get('user_input_fields'): - attributes["agno.tool.config_user_input_fields_count"] = str(len(kwargs['user_input_fields'])) - - if 'cache_results' in kwargs and kwargs['cache_results'] is not None: - attributes["agno.tool.config_cache_results"] = str(kwargs['cache_results']) - - if kwargs.get('cache_dir'): - attributes["agno.tool.config_cache_dir"] = kwargs['cache_dir'] - - if 'cache_ttl' in kwargs and kwargs['cache_ttl'] is not None: - attributes["agno.tool.config_cache_ttl"] = str(kwargs['cache_ttl']) - - # For parameterized calls, return_value is a decorator function - if return_value and callable(return_value): - attributes["agno.tool.returns_decorator"] = "true" - - return attributes - - def get_tool_execution_attributes( args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, @@ -136,88 +25,70 @@ def get_tool_execution_attributes( """ attributes: AttributeMap = {} - # Base attributes - Use "tool.usage" to match yellow color coding in frontend - attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = "tool.usage" + # Base attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.TOOL attributes[SpanAttributes.LLM_SYSTEM] = "agno" attributes["agno.tool.operation"] = "execute" - # Standard AgentOps attributes for consistency with other tool spans - attributes["deployment.environment"] = "default_environment" - attributes["service.name"] = "default_application" - attributes["telemetry.sdk.name"] = "agentops" - - # Add execution context and debugging information - import time - import traceback - - attributes["agno.tool.execution_timestamp"] = str(int(time.time() * 1000)) - - # Try to get calling context for debugging - try: - stack = traceback.extract_stack() - # Look for relevant calling frames - calling_info = [] - for frame in stack[-10:]: # Last 10 frames - if any(keyword in frame.filename.lower() for keyword in ['agno', 'agent', 'team', 'tool']): - calling_info.append(f"{frame.filename.split('/')[-1]}:{frame.lineno}:{frame.name}") - - if calling_info: - attributes["agno.tool.call_stack"] = " -> ".join(calling_info[-3:]) # Last 3 relevant frames - except Exception as e: - attributes["agno.tool.call_stack_error"] = str(e) - # Process the FunctionCall object (self in execute method) if args and len(args) > 0: function_call = args[0] - + # Add detailed function call information attributes["agno.tool.function_call_type"] = str(type(function_call).__name__) - + # Extract tool information - if hasattr(function_call, 'function') and function_call.function: + if hasattr(function_call, "function") and function_call.function: function = function_call.function - tool_name = getattr(function, 'name', 'unknown_tool') - + + # Get function name and add display name + if hasattr(function, "__name__"): + func_name = function.__name__ + attributes["agno.tool.function_name"] = func_name + attributes["agno.tool.display_name"] = f"{func_name} (Tool)" + + tool_name = getattr(function, "name", "unknown_tool") + # Set span attributes for the tool execution span attributes["tool.name"] = tool_name attributes["agno.tool.function_name"] = tool_name - + # Function details and context - if hasattr(function, 'description'): - description = getattr(function, 'description', '') + if hasattr(function, "description"): + description = getattr(function, "description", "") if description: # Truncate long descriptions but keep them readable if len(description) > 300: description = description[:297] + "..." attributes["tool.description"] = description attributes["agno.tool.function_description"] = description - + # Function source information - if hasattr(function, 'entrypoint') and function.entrypoint: + if hasattr(function, "entrypoint") and function.entrypoint: entrypoint = function.entrypoint - if hasattr(entrypoint, '__module__'): + if hasattr(entrypoint, "__module__"): attributes["agno.tool.function_module"] = str(entrypoint.__module__) - if hasattr(entrypoint, '__name__'): + if hasattr(entrypoint, "__name__"): attributes["agno.tool.function_method"] = str(entrypoint.__name__) - if hasattr(entrypoint, '__qualname__'): + if hasattr(entrypoint, "__qualname__"): attributes["agno.tool.function_qualname"] = str(entrypoint.__qualname__) - + # Tool capabilities - if hasattr(function, 'requires_confirmation'): + if hasattr(function, "requires_confirmation"): attributes["agno.tool.requires_confirmation"] = str(function.requires_confirmation) - if hasattr(function, 'show_result'): + if hasattr(function, "show_result"): attributes["agno.tool.show_result"] = str(function.show_result) - if hasattr(function, 'stop_after_tool_call'): + if hasattr(function, "stop_after_tool_call"): attributes["agno.tool.stop_after_tool_call"] = str(function.stop_after_tool_call) - + # Extract tool arguments with better formatting - if hasattr(function_call, 'arguments') and function_call.arguments: + if hasattr(function_call, "arguments") and function_call.arguments: try: if isinstance(function_call.arguments, str): args_dict = json.loads(function_call.arguments) else: args_dict = function_call.arguments - + # Format arguments nicely formatted_args = [] for key, value in args_dict.items(): @@ -225,34 +96,35 @@ def get_tool_execution_attributes( if len(value_str) > 100: value_str = value_str[:97] + "..." formatted_args.append(f"{key}={value_str}") - + attributes["tool.parameters"] = json.dumps(args_dict) attributes["agno.tool.formatted_args"] = ", ".join(formatted_args) attributes["agno.tool.args_count"] = str(len(args_dict)) except Exception as e: attributes["tool.parameters"] = str(function_call.arguments) attributes["agno.tool.args_parse_error"] = str(e) - + # Extract call ID and metadata - if hasattr(function_call, 'tool_call_id'): + if hasattr(function_call, "tool_call_id"): attributes["agno.tool.call_id"] = str(function_call.tool_call_id) - + # Check for any agent context - if hasattr(function_call, '_agent') and function_call._agent: + if hasattr(function_call, "_agent") and function_call._agent: agent = function_call._agent - if hasattr(agent, 'name'): + if hasattr(agent, "name"): attributes["agno.tool.calling_agent_name"] = str(agent.name) - if hasattr(agent, 'agent_id'): + if hasattr(agent, "agent_id"): attributes["agno.tool.calling_agent_id"] = str(agent.agent_id) # Process return value if return_value is not None: # Add timing information import time + attributes["agno.tool.execution_timestamp"] = str(int(time.time() * 1000)) - + # Determine execution status and result information - if hasattr(return_value, 'value'): + if hasattr(return_value, "value"): # FunctionExecutionResult with value result_value = return_value.value attributes["agno.tool.execution_status"] = "success" @@ -260,49 +132,56 @@ def get_tool_execution_attributes( # Direct return value result_value = return_value attributes["agno.tool.execution_status"] = "success" - + # Process result value if result_value is not None: result_type = type(result_value).__name__ - attributes["agno.tool.result_type"] = result_type - + attributes["agno.tool.execution_result_status"] = str(result_type) + # Handle FunctionExecutionResult objects specifically - if hasattr(result_value, 'status') and hasattr(result_value, 'result'): + if hasattr(result_value, "status") and hasattr(result_value, "result"): # This looks like a FunctionExecutionResult - status = getattr(result_value, 'status', 'unknown') - actual_result = getattr(result_value, 'result', None) - error = getattr(result_value, 'error', None) - + status = getattr(result_value, "status", "unknown") + actual_result = getattr(result_value, "result", None) + error = getattr(result_value, "error", None) + attributes["agno.tool.execution_result_status"] = str(status) attributes["tool.status"] = str(status) - + if error: attributes["agno.tool.execution_error"] = str(error) attributes["tool.error"] = str(error) - + if actual_result is not None: actual_result_type = type(actual_result).__name__ attributes["agno.tool.actual_result_type"] = actual_result_type - + # Enhanced generator handling - if hasattr(actual_result, '__iter__') and hasattr(actual_result, '__next__'): + if hasattr(actual_result, "__iter__") and hasattr(actual_result, "__next__"): attributes["agno.tool.result_is_generator"] = "true" - + # Try to get more meaningful information about the generator generator_info = [] - + # Get function name from the generator - if hasattr(actual_result, 'gi_code'): + if hasattr(actual_result, "gi_code"): func_name = actual_result.gi_code.co_name attributes["agno.tool.generator_function"] = func_name generator_info.append(f"function={func_name}") - + # Get local variables from generator frame for context - if hasattr(actual_result, 'gi_frame') and actual_result.gi_frame: + if hasattr(actual_result, "gi_frame") and actual_result.gi_frame: try: locals_dict = actual_result.gi_frame.f_locals # Look for interesting variables that give context - context_vars = ['task_description', 'expected_output', 'member_agent', 'agent_name', 'team', 'message'] + context_vars = [ + "task_description", + "expected_output", + "member_agent", + "agent_name", + "team", + "message", + ] for var_name in context_vars: if var_name in locals_dict: value = str(locals_dict[var_name]) @@ -310,19 +189,19 @@ def get_tool_execution_attributes( value = value[:97] + "..." generator_info.append(f"{var_name}={value}") attributes[f"agno.tool.generator_{var_name}"] = value - + # Count total local variables for debugging attributes["agno.tool.generator_locals_count"] = str(len(locals_dict)) except Exception as e: attributes["agno.tool.generator_locals_error"] = str(e) - + # Try to identify what type of transfer this is generator_str = str(actual_result) - if 'transfer_task_to_member' in generator_str: + if "transfer_task_to_member" in generator_str: attributes["agno.tool.transfer_type"] = "task_to_member" - elif 'transfer' in generator_str.lower(): + elif "transfer" in generator_str.lower(): attributes["agno.tool.transfer_type"] = "general_transfer" - + if generator_info: result_str = f"Generator<{actual_result_type}>({', '.join(generator_info)})" else: @@ -336,11 +215,11 @@ def get_tool_execution_attributes( result_str = f"FunctionExecutionResult(status={status}, result=None)" else: # Not a FunctionExecutionResult, handle as direct result - if hasattr(result_value, '__iter__') and hasattr(result_value, '__next__'): + if hasattr(result_value, "__iter__") and hasattr(result_value, "__next__"): # It's a generator attributes["agno.tool.result_is_generator"] = "true" - - if hasattr(result_value, 'gi_code'): + + if hasattr(result_value, "gi_code"): func_name = result_value.gi_code.co_name attributes["agno.tool.generator_function"] = func_name result_str = f"Generator<{result_type}> function={func_name} - {str(result_value)}" @@ -353,297 +232,27 @@ def get_tool_execution_attributes( result_str = result_str[:497] + "..." else: result_str = "None" - + # Set the main result attribute attributes["tool.result"] = result_str - + # Add additional analysis attributes attributes["agno.tool.result_length"] = str(len(result_str)) - + # Provide a preview for long results if len(result_str) > 100: preview = result_str[:97] + "..." attributes["agno.tool.result_preview"] = preview else: attributes["agno.tool.result_preview"] = result_str - + # Set final execution status if not attributes.get("tool.status"): attributes["tool.status"] = "success" - + # Add execution summary for debugging tool_name = attributes.get("tool.name", "unknown") call_type = attributes.get("agno.tool.transfer_type", "unknown") attributes["agno.tool.execution_summary"] = f"Tool '{tool_name}' executed with type '{call_type}'" return attributes - - -def get_function_constructor_attributes( - args: Optional[Tuple] = None, - kwargs: Optional[Dict] = None, - return_value: Optional[Any] = None, -) -> AttributeMap: - """Extract span attributes for Function constructor calls. - - This captures when Function objects are created (which happens for all @tool decorators). - - Args: - args: Positional arguments passed to Function.__init__ (self, ...) - kwargs: Keyword arguments passed to Function.__init__ - return_value: The return value from Function.__init__ (None) - - Returns: - A dictionary of span attributes to be set on the function creation span - """ - attributes: AttributeMap = {} - - # Base attributes - attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.TOOL - attributes[SpanAttributes.LLM_SYSTEM] = "agno" - attributes["agno.tool.operation"] = "function_create" - - # Try to find active agent span to establish proper hierarchy - try: - from opentelemetry import trace - current_span = trace.get_current_span() - if current_span and current_span.is_recording(): - # Check if there's an agent-related span in the context - span_context = current_span.get_span_context() - if span_context and span_context.is_valid: - attributes["agno.tool.created_during_agent_run"] = "true" - else: - attributes["agno.tool.created_during_agent_run"] = "false" - else: - attributes["agno.tool.created_during_agent_run"] = "false" - except Exception: - attributes["agno.tool.created_during_agent_run"] = "unknown" - - # Extract Function constructor arguments - if kwargs: - if kwargs.get('name'): - attributes["agno.function.name"] = kwargs['name'] - - if kwargs.get('description'): - description = kwargs['description'] - if len(description) > 200: - description = description[:197] + "..." - attributes["agno.function.description"] = description - - if kwargs.get('instructions'): - instructions = kwargs['instructions'] - if len(instructions) > 200: - instructions = instructions[:197] + "..." - attributes["agno.function.instructions"] = instructions - - if 'strict' in kwargs and kwargs['strict'] is not None: - attributes["agno.function.strict"] = str(kwargs['strict']) - - if 'show_result' in kwargs and kwargs['show_result'] is not None: - attributes["agno.function.show_result"] = str(kwargs['show_result']) - - if 'stop_after_tool_call' in kwargs and kwargs['stop_after_tool_call'] is not None: - attributes["agno.function.stop_after_tool_call"] = str(kwargs['stop_after_tool_call']) - - if 'requires_confirmation' in kwargs and kwargs['requires_confirmation'] is not None: - attributes["agno.function.requires_confirmation"] = str(kwargs['requires_confirmation']) - - if 'requires_user_input' in kwargs and kwargs['requires_user_input'] is not None: - attributes["agno.function.requires_user_input"] = str(kwargs['requires_user_input']) - - if 'external_execution' in kwargs and kwargs['external_execution'] is not None: - attributes["agno.function.external_execution"] = str(kwargs['external_execution']) - - if kwargs.get('user_input_fields'): - attributes["agno.function.user_input_fields_count"] = str(len(kwargs['user_input_fields'])) - - if 'cache_results' in kwargs and kwargs['cache_results'] is not None: - attributes["agno.function.cache_results"] = str(kwargs['cache_results']) - - if kwargs.get('cache_dir'): - attributes["agno.function.cache_dir"] = kwargs['cache_dir'] - - if 'cache_ttl' in kwargs and kwargs['cache_ttl'] is not None: - attributes["agno.function.cache_ttl"] = str(kwargs['cache_ttl']) - - # Check the entrypoint function if available - if kwargs.get('entrypoint') and callable(kwargs['entrypoint']): - func = kwargs['entrypoint'] - - # Check if it's an async function - from inspect import iscoroutinefunction, isasyncgenfunction - if iscoroutinefunction(func): - attributes["agno.function.entrypoint_type"] = "async" - elif isasyncgenfunction(func): - attributes["agno.function.entrypoint_type"] = "async_generator" - else: - attributes["agno.function.entrypoint_type"] = "sync" - - # Get function name from entrypoint - if hasattr(func, '__name__'): - attributes["agno.function.entrypoint_name"] = func.__name__ - - return attributes - - -def get_tool_preparation_attributes( - args: Optional[Tuple] = None, - kwargs: Optional[Dict] = None, - return_value: Optional[Any] = None, -) -> AttributeMap: - """Extract span attributes for agent tool preparation. - - This captures when an agent processes and registers tools during determine_tools_for_model. - - Args: - args: Positional arguments passed to determine_tools_for_model (self, model, session_id, ...) - kwargs: Keyword arguments passed to determine_tools_for_model - return_value: The return value from determine_tools_for_model (None) - - Returns: - A dictionary of span attributes to be set on the tool preparation span - """ - attributes: AttributeMap = {} - - # Base attributes - attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.WORKFLOW - attributes[SpanAttributes.LLM_SYSTEM] = "agno" - attributes["agno.agent.operation"] = "prepare_tools" - - # Extract agent information from args[0] (self) - if args and len(args) >= 1: - agent = args[0] - - if hasattr(agent, 'name') and agent.name: - attributes["agno.agent.name"] = agent.name - - if hasattr(agent, 'tools') and agent.tools: - tools_count = len(agent.tools) - attributes["agno.agent.tools_count"] = str(tools_count) - - # Capture tool names if available - tool_names = [] - for tool in agent.tools: - if hasattr(tool, 'name'): - tool_names.append(tool.name) - elif hasattr(tool, '__name__'): - tool_names.append(tool.__name__) - elif callable(tool): - tool_names.append(getattr(tool, '__name__', 'unknown')) - - if tool_names: - # Limit to first 5 tools to avoid overly long attributes - limited_names = tool_names[:5] - if len(tool_names) > 5: - limited_names.append(f"...+{len(tool_names)-5} more") - attributes["agno.agent.tool_names"] = ",".join(limited_names) - - # Extract model information - if len(args) >= 2: - model = args[1] - if hasattr(model, 'id'): - attributes["agno.agent.model_id"] = str(model.id) - if hasattr(model, 'provider'): - attributes["agno.agent.model_provider"] = str(model.provider) - - # Extract session information - if len(args) >= 3: - session_id = args[2] - if session_id: - attributes["agno.agent.session_id"] = str(session_id) - - # Extract additional info from kwargs - if kwargs: - if kwargs.get('async_mode') is not None: - attributes["agno.agent.async_mode"] = str(kwargs['async_mode']) - - if kwargs.get('knowledge_filters'): - attributes["agno.agent.has_knowledge_filters"] = "true" - else: - attributes["agno.agent.has_knowledge_filters"] = "false" - - return attributes - - -def get_tool_registration_attributes( - args: Optional[Tuple] = None, - kwargs: Optional[Dict] = None, - return_value: Optional[Any] = None, -) -> AttributeMap: - """Extract span attributes for individual tool registration via Function.from_callable. - - This captures when individual tools (callables) are converted to Function objects during agent tool preparation. - - Args: - args: Positional arguments passed to Function.from_callable (callable, ...) - kwargs: Keyword arguments passed to Function.from_callable - return_value: The return value from Function.from_callable (Function object) - - Returns: - A dictionary of span attributes to be set on the tool registration span - """ - attributes: AttributeMap = {} - - # Base attributes - attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.TOOL - attributes[SpanAttributes.LLM_SYSTEM] = "agno" - attributes["agno.tool.operation"] = "register" - - # Extract callable information from args[0] - if args and len(args) >= 1: - callable_func = args[0] - - if hasattr(callable_func, '__name__'): - attributes["agno.tool.function_name"] = callable_func.__name__ - - # Check if it's an async function - from inspect import iscoroutinefunction, isasyncgenfunction - if iscoroutinefunction(callable_func): - attributes["agno.tool.function_type"] = "async" - elif isasyncgenfunction(callable_func): - attributes["agno.tool.function_type"] = "async_generator" - else: - attributes["agno.tool.function_type"] = "sync" - - # Get docstring if available - if hasattr(callable_func, '__doc__') and callable_func.__doc__: - docstring = callable_func.__doc__.strip() - if len(docstring) > 200: - docstring = docstring[:197] + "..." - attributes["agno.tool.function_docstring"] = docstring - - # Check if it's already a Function object (has agno-specific attributes) - if hasattr(callable_func, 'name'): - attributes["agno.tool.source_name"] = str(callable_func.name) - if hasattr(callable_func, 'description'): - description = str(callable_func.description) - if len(description) > 200: - description = description[:197] + "..." - attributes["agno.tool.source_description"] = description - - # Extract kwargs passed to from_callable - if kwargs: - if kwargs.get('strict') is not None: - attributes["agno.tool.strict"] = str(kwargs['strict']) - - # Extract information from the created Function object - if return_value and hasattr(return_value, 'name'): - attributes["agno.tool.created_name"] = str(return_value.name) - - if hasattr(return_value, 'description'): - description = str(return_value.description) - if len(description) > 200: - description = description[:197] + "..." - attributes["agno.tool.created_description"] = description - - # Tool capabilities from the created Function - if hasattr(return_value, 'requires_confirmation'): - attributes["agno.tool.requires_confirmation"] = str(return_value.requires_confirmation) - - if hasattr(return_value, 'requires_user_input'): - attributes["agno.tool.requires_user_input"] = str(return_value.requires_user_input) - - if hasattr(return_value, 'external_execution'): - attributes["agno.tool.external_execution"] = str(return_value.external_execution) - - return attributes \ No newline at end of file diff --git a/agentops/instrumentation/agno/attributes/workflow.py b/agentops/instrumentation/agno/attributes/workflow.py index 41d28b7a0..a0b4b1251 100644 --- a/agentops/instrumentation/agno/attributes/workflow.py +++ b/agentops/instrumentation/agno/attributes/workflow.py @@ -14,60 +14,60 @@ def get_workflow_run_attributes( return_value: Optional[Any] = None, ) -> Dict[str, AttributeValue]: """Extract attributes from workflow run operations. - + Args: args: Positional arguments passed to the workflow run method - kwargs: Keyword arguments passed to the workflow run method + kwargs: Keyword arguments passed to the workflow run method return_value: Return value from the workflow run method - + Returns: Dictionary of OpenTelemetry attributes for workflow runs """ attributes = get_common_attributes() kwargs = kwargs or {} - + if args and len(args) > 0: workflow = args[0] - + # Core workflow attributes - if hasattr(workflow, 'name') and workflow.name: + if hasattr(workflow, "name") and workflow.name: attributes["workflow.name"] = str(workflow.name) - if hasattr(workflow, 'workflow_id') and workflow.workflow_id: + if hasattr(workflow, "workflow_id") and workflow.workflow_id: attributes["workflow.workflow_id"] = str(workflow.workflow_id) - if hasattr(workflow, 'description') and workflow.description: + if hasattr(workflow, "description") and workflow.description: attributes["workflow.description"] = str(workflow.description) - if hasattr(workflow, 'app_id') and workflow.app_id: + if hasattr(workflow, "app_id") and workflow.app_id: attributes["workflow.app_id"] = str(workflow.app_id) - + # Session and user attributes - if hasattr(workflow, 'session_id') and workflow.session_id: + if hasattr(workflow, "session_id") and workflow.session_id: attributes["workflow.session_id"] = str(workflow.session_id) - if hasattr(workflow, 'session_name') and workflow.session_name: + if hasattr(workflow, "session_name") and workflow.session_name: attributes["workflow.session_name"] = str(workflow.session_name) - if hasattr(workflow, 'user_id') and workflow.user_id: + if hasattr(workflow, "user_id") and workflow.user_id: attributes["workflow.user_id"] = str(workflow.user_id) - + # Run-specific attributes - if hasattr(workflow, 'run_id') and workflow.run_id: + if hasattr(workflow, "run_id") and workflow.run_id: attributes["workflow.run_id"] = str(workflow.run_id) - + # Configuration attributes - if hasattr(workflow, 'debug_mode'): + if hasattr(workflow, "debug_mode"): attributes["workflow.debug_mode"] = bool(workflow.debug_mode) - if hasattr(workflow, 'monitoring'): + if hasattr(workflow, "monitoring"): attributes["workflow.monitoring"] = bool(workflow.monitoring) - if hasattr(workflow, 'telemetry'): + if hasattr(workflow, "telemetry"): attributes["workflow.telemetry"] = bool(workflow.telemetry) - + # Memory and storage attributes - if hasattr(workflow, 'memory') and workflow.memory: + if hasattr(workflow, "memory") and workflow.memory: memory_type = type(workflow.memory).__name__ attributes["workflow.memory.type"] = memory_type - - if hasattr(workflow, 'storage') and workflow.storage: + + if hasattr(workflow, "storage") and workflow.storage: storage_type = type(workflow.storage).__name__ attributes["workflow.storage.type"] = storage_type - + # Input parameters from kwargs if kwargs: # Count and types of input parameters @@ -75,55 +75,57 @@ def get_workflow_run_attributes( param_types = list(set(type(v).__name__ for v in kwargs.values())) if param_types: attributes["workflow.input.parameter_types"] = str(param_types) - + # Store specific input keys (without values for privacy) input_keys = list(kwargs.keys()) if input_keys: attributes["workflow.input.parameter_keys"] = str(input_keys) - + # Workflow method parameters if available - if hasattr(workflow, '_run_parameters') and workflow._run_parameters: + if hasattr(workflow, "_run_parameters") and workflow._run_parameters: param_count = len(workflow._run_parameters) attributes["workflow.method.parameter_count"] = param_count - - if hasattr(workflow, '_run_return_type') and workflow._run_return_type: + + if hasattr(workflow, "_run_return_type") and workflow._run_return_type: attributes["workflow.method.return_type"] = str(workflow._run_return_type) - + # Process return value attributes if return_value is not None: return_type = type(return_value).__name__ attributes["workflow.output.type"] = return_type - + # Handle RunResponse objects - if hasattr(return_value, 'content'): - if hasattr(return_value, 'content_type'): + if hasattr(return_value, "content"): + if hasattr(return_value, "content_type"): attributes["workflow.output.content_type"] = str(return_value.content_type) - if hasattr(return_value, 'event'): + if hasattr(return_value, "event"): attributes["workflow.output.event"] = str(return_value.event) - if hasattr(return_value, 'model'): + if hasattr(return_value, "model"): attributes["workflow.output.model"] = str(return_value.model) if return_value.model else "" - if hasattr(return_value, 'model_provider'): - attributes["workflow.output.model_provider"] = str(return_value.model_provider) if return_value.model_provider else "" - + if hasattr(return_value, "model_provider"): + attributes["workflow.output.model_provider"] = ( + str(return_value.model_provider) if return_value.model_provider else "" + ) + # Count various response components - if hasattr(return_value, 'messages') and return_value.messages: + if hasattr(return_value, "messages") and return_value.messages: attributes["workflow.output.message_count"] = len(return_value.messages) - if hasattr(return_value, 'tools') and return_value.tools: + if hasattr(return_value, "tools") and return_value.tools: attributes["workflow.output.tool_count"] = len(return_value.tools) - if hasattr(return_value, 'images') and return_value.images: + if hasattr(return_value, "images") and return_value.images: attributes["workflow.output.image_count"] = len(return_value.images) - if hasattr(return_value, 'videos') and return_value.videos: + if hasattr(return_value, "videos") and return_value.videos: attributes["workflow.output.video_count"] = len(return_value.videos) - if hasattr(return_value, 'audio') and return_value.audio: + if hasattr(return_value, "audio") and return_value.audio: attributes["workflow.output.audio_count"] = len(return_value.audio) - + # Handle generators/iterators - elif hasattr(return_value, '__iter__') and not isinstance(return_value, (str, bytes)): + elif hasattr(return_value, "__iter__") and not isinstance(return_value, (str, bytes)): attributes["workflow.output.is_streaming"] = True - + # Set span kind - AgentOpsSpanKind.WORKFLOW is already a string attributes[InstrumentationAttributes.INSTRUMENTATION_TYPE] = AgentOpsSpanKind.WORKFLOW - + return attributes @@ -133,100 +135,51 @@ def get_workflow_session_attributes( return_value: Optional[Any] = None, ) -> Dict[str, AttributeValue]: """Extract attributes from workflow session operations. - + Args: args: Positional arguments passed to the session method kwargs: Keyword arguments passed to the session method return_value: Return value from the session method - + Returns: Dictionary of OpenTelemetry attributes for workflow sessions """ attributes = get_common_attributes() kwargs = kwargs or {} - + if args and len(args) > 0: workflow = args[0] - + # Session attributes - if hasattr(workflow, 'session_id') and workflow.session_id: + if hasattr(workflow, "session_id") and workflow.session_id: attributes["workflow.session.session_id"] = str(workflow.session_id) - if hasattr(workflow, 'session_name') and workflow.session_name: + if hasattr(workflow, "session_name") and workflow.session_name: attributes["workflow.session.session_name"] = str(workflow.session_name) - if hasattr(workflow, 'workflow_id') and workflow.workflow_id: + if hasattr(workflow, "workflow_id") and workflow.workflow_id: attributes["workflow.session.workflow_id"] = str(workflow.workflow_id) - if hasattr(workflow, 'user_id') and workflow.user_id: + if hasattr(workflow, "user_id") and workflow.user_id: attributes["workflow.session.user_id"] = str(workflow.user_id) - + # Session state attributes - if hasattr(workflow, 'session_state') and workflow.session_state: + if hasattr(workflow, "session_state") and workflow.session_state: if isinstance(workflow.session_state, dict): attributes["workflow.session.state_keys"] = str(list(workflow.session_state.keys())) attributes["workflow.session.state_size"] = len(workflow.session_state) - + # Storage attributes - if hasattr(workflow, 'storage') and workflow.storage: + if hasattr(workflow, "storage") and workflow.storage: storage_type = type(workflow.storage).__name__ attributes["workflow.session.storage_type"] = storage_type - + # Process session return value if it's a WorkflowSession - if return_value is not None and hasattr(return_value, 'session_id'): + if return_value is not None and hasattr(return_value, "session_id"): attributes["workflow.session.returned_session_id"] = str(return_value.session_id) - if hasattr(return_value, 'created_at') and return_value.created_at: + if hasattr(return_value, "created_at") and return_value.created_at: attributes["workflow.session.created_at"] = int(return_value.created_at) - if hasattr(return_value, 'updated_at') and return_value.updated_at: + if hasattr(return_value, "updated_at") and return_value.updated_at: attributes["workflow.session.updated_at"] = int(return_value.updated_at) - - # Set span kind - AgentOpsSpanKind.WORKFLOW is already a string - attributes[InstrumentationAttributes.INSTRUMENTATION_TYPE] = AgentOpsSpanKind.WORKFLOW - - return attributes - -def get_workflow_storage_attributes( - args: Tuple[Any, ...] = (), - kwargs: Optional[Dict[str, Any]] = None, - return_value: Optional[Any] = None, -) -> Dict[str, AttributeValue]: - """Extract attributes from workflow storage operations. - - Args: - args: Positional arguments passed to the storage method - kwargs: Keyword arguments passed to the storage method - return_value: Return value from the storage method - - Returns: - Dictionary of OpenTelemetry attributes for workflow storage - """ - attributes = get_common_attributes() - kwargs = kwargs or {} - - if args and len(args) > 0: - workflow = args[0] - - # Storage attributes - if hasattr(workflow, 'storage') and workflow.storage: - storage_type = type(workflow.storage).__name__ - attributes["workflow.storage.type"] = storage_type - - if hasattr(workflow.storage, 'mode'): - attributes["workflow.storage.mode"] = str(workflow.storage.mode) - - # Workflow identification for storage context - if hasattr(workflow, 'workflow_id') and workflow.workflow_id: - attributes["workflow.storage.workflow_id"] = str(workflow.workflow_id) - if hasattr(workflow, 'session_id') and workflow.session_id: - attributes["workflow.storage.session_id"] = str(workflow.session_id) - - # Process storage operation result - if return_value is not None: - if hasattr(return_value, 'session_id'): - attributes["workflow.storage.operation_result"] = "success" - attributes["workflow.storage.result_session_id"] = str(return_value.session_id) - else: - attributes["workflow.storage.operation_result"] = "unknown" - # Set span kind - AgentOpsSpanKind.WORKFLOW is already a string attributes[InstrumentationAttributes.INSTRUMENTATION_TYPE] = AgentOpsSpanKind.WORKFLOW - - return attributes \ No newline at end of file + + return attributes diff --git a/agentops/instrumentation/agno/instrumentor.py b/agentops/instrumentation/agno/instrumentor.py index 3684817a0..2755dab9d 100644 --- a/agentops/instrumentation/agno/instrumentor.py +++ b/agentops/instrumentation/agno/instrumentor.py @@ -10,65 +10,69 @@ - FunctionCall.execute/aexecute - Tool execution when agents call tools (sync/async) - Agent._run_tool/_arun_tool - Agent internal tool execution (sync/async) - Agent._set_session_metrics - Session metrics capture for token usage and timing +- Workflow.run_workflow/arun_workflow - Workflow execution (sync/async) +- Workflow session management methods - Session lifecycle operations This provides clean visibility into agent workflows and actual tool usage with proper parent-child span relationships. """ -from typing import List, Collection, Any, Optional, Dict -from opentelemetry.trace import get_tracer, SpanKind +from typing import List, Collection, Any, Optional +from opentelemetry.trace import get_tracer from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.metrics import get_meter -from opentelemetry.util.types import AttributeValue from opentelemetry import trace, context as otel_context from opentelemetry.trace import Status, StatusCode +from wrapt import wrap_function_wrapper import threading -import weakref from agentops.logging import logger from agentops.semconv import Meters -from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind +from agentops.instrumentation.common.wrappers import WrapConfig, wrap, unwrap # Import attribute handlers from agentops.instrumentation.agno.attributes.agent import get_agent_run_attributes -from agentops.instrumentation.agno.attributes.team import get_team_run_attributes +from agentops.instrumentation.agno.attributes.team import get_team_run_attributes from agentops.instrumentation.agno.attributes.tool import get_tool_execution_attributes from agentops.instrumentation.agno.attributes.metrics import get_metrics_attributes from agentops.instrumentation.agno.attributes.workflow import ( get_workflow_run_attributes, get_workflow_session_attributes, - get_workflow_storage_attributes ) +# Library info for tracer/meter +LIBRARY_NAME = "agentops.instrumentation.agno" +LIBRARY_VERSION = "0.1.0" + class StreamingContextManager: """Manages span contexts for streaming agent and workflow executions.""" - + def __init__(self): self._contexts = {} # context_id -> (span_context, span) self._agent_sessions = {} # session_id -> agent_id mapping for context lookup self._lock = threading.Lock() - + def store_context(self, context_id: str, span_context: Any, span: Any) -> None: """Store span context for streaming execution.""" with self._lock: self._contexts[context_id] = (span_context, span) - + def get_context(self, context_id: str) -> Optional[tuple]: """Retrieve stored span context.""" with self._lock: return self._contexts.get(context_id) - + def remove_context(self, context_id: str) -> None: """Remove stored context (when streaming completes).""" with self._lock: self._contexts.pop(context_id, None) - + def store_agent_session_mapping(self, session_id: str, agent_id: str) -> None: """Store mapping between session and agent for context lookup.""" with self._lock: self._agent_sessions[session_id] = agent_id - + def get_agent_context_by_session(self, session_id: str) -> Optional[tuple]: """Get agent context using session ID.""" with self._lock: @@ -76,7 +80,7 @@ def get_agent_context_by_session(self, session_id: str) -> Optional[tuple]: if agent_id: return self._contexts.get(agent_id) return None - + def clear_all(self) -> None: """Clear all stored contexts.""" with self._lock: @@ -88,707 +92,987 @@ def clear_all(self) -> None: _streaming_context_manager = StreamingContextManager() -def create_streaming_workflow_wrapper(original_func, is_async=False): +# Methods to wrap for instrumentation +WRAPPED_METHODS: List[WrapConfig] = [ + # Workflow session methods + WrapConfig( + trace_name="agno.workflow.session.load_session", + package="agno.workflow.workflow", + class_name="Workflow", + method_name="load_session", + handler=get_workflow_session_attributes, + ), + WrapConfig( + trace_name="agno.workflow.session.new_session", + package="agno.workflow.workflow", + class_name="Workflow", + method_name="new_session", + handler=get_workflow_session_attributes, + ), + WrapConfig( + trace_name="agno.workflow.session.read_from_storage", + package="agno.workflow.workflow", + class_name="Workflow", + method_name="read_from_storage", + handler=get_workflow_session_attributes, + ), + WrapConfig( + trace_name="agno.workflow.session.write_to_storage", + package="agno.workflow.workflow", + class_name="Workflow", + method_name="write_to_storage", + handler=get_workflow_session_attributes, + ), +] + + +class StreamingResultWrapper: + """Wrapper for streaming results that maintains agent span as active throughout iteration.""" + + def __init__(self, original_result, span, agent_id, agent_context): + self.original_result = original_result + self.span = span + self.agent_id = agent_id + self.agent_context = agent_context + self._consumed = False + + def __iter__(self): + """Return iterator that keeps agent span active during iteration.""" + context_token = otel_context.attach(self.agent_context) + try: + # Execute iteration within agent context + for item in self.original_result: + # Each item is yielded within the agent span context + yield item + finally: + # Clean up when iteration is complete + otel_context.detach(context_token) + if not self._consumed: + self._consumed = True + self.span.end() + _streaming_context_manager.remove_context(self.agent_id) + + def __getattr__(self, name): + """Delegate attribute access to the original result.""" + return getattr(self.original_result, name) + + +def create_streaming_workflow_wrapper(tracer): """Create a streaming-aware wrapper for workflow run methods.""" - - if is_async: - async def async_wrapper(self, *args, **kwargs): - tracer = trace.get_tracer(__name__) - - # Get workflow ID for context storage - workflow_id = getattr(self, 'workflow_id', None) or getattr(self, 'id', None) or id(self) - workflow_id = str(workflow_id) - - # Check if streaming is enabled - is_streaming = kwargs.get('stream', getattr(self, 'stream', False)) - + + def wrapper(wrapped, instance, args, kwargs): + # Get workflow ID for context storage + workflow_id = getattr(instance, "workflow_id", None) or getattr(instance, "id", None) or id(instance) + workflow_id = str(workflow_id) + + # Check if streaming is enabled + is_streaming = kwargs.get("stream", getattr(instance, "stream", False)) + + # For streaming, manually manage span lifecycle + if is_streaming: + span = tracer.start_span("agno.workflow.run.workflow") + + try: + # Set workflow attributes + attributes = get_workflow_run_attributes(args=(instance,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Store context for streaming - capture current context with active span + current_context = trace.set_span_in_context(span, otel_context.get_current()) + _streaming_context_manager.store_context(workflow_id, current_context, span) + + # Execute the original function within workflow context + context_token = otel_context.attach(current_context) + try: + result = wrapped(*args, **kwargs) + finally: + otel_context.detach(context_token) + + # Set result attributes + result_attributes = get_workflow_run_attributes( + args=(instance,) + args, kwargs=kwargs, return_value=result + ) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + + # For streaming results, we need to keep the span open + # The span will be closed when streaming completes + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + span.end() + _streaming_context_manager.remove_context(workflow_id) + raise + else: + # For non-streaming, use normal context manager with tracer.start_as_current_span("agno.workflow.run.workflow") as span: try: # Set workflow attributes - attributes = get_workflow_run_attributes(args=(self,) + args, kwargs=kwargs) + attributes = get_workflow_run_attributes(args=(instance,) + args, kwargs=kwargs) for key, value in attributes.items(): span.set_attribute(key, value) - - # Store context for streaming if needed - if is_streaming: - current_context = otel_context.get_current() - _streaming_context_manager.store_context(workflow_id, current_context, span) - + # Execute the original function - result = await original_func(self, *args, **kwargs) - + result = wrapped(*args, **kwargs) + # Set result attributes - result_attributes = get_workflow_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + result_attributes = get_workflow_run_attributes( + args=(instance,) + args, kwargs=kwargs, return_value=result + ) for key, value in result_attributes.items(): if key not in attributes: # Avoid duplicates span.set_attribute(key, value) - + span.set_status(Status(StatusCode.OK)) return result - + except Exception as e: span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) raise + + return wrapper + + +def create_streaming_workflow_async_wrapper(tracer): + """Create a streaming-aware async wrapper for workflow run methods.""" + + async def wrapper(wrapped, instance, args, kwargs): + # Get workflow ID for context storage + workflow_id = getattr(instance, "workflow_id", None) or getattr(instance, "id", None) or id(instance) + workflow_id = str(workflow_id) + + # Check if streaming is enabled + is_streaming = kwargs.get("stream", getattr(instance, "stream", False)) + + # For streaming, manually manage span lifecycle + if is_streaming: + span = tracer.start_span("agno.workflow.run.workflow") + + try: + # Set workflow attributes + attributes = get_workflow_run_attributes(args=(instance,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Store context for streaming - capture current context with active span + current_context = trace.set_span_in_context(span, otel_context.get_current()) + _streaming_context_manager.store_context(workflow_id, current_context, span) + + # Execute the original function within workflow context + context_token = otel_context.attach(current_context) + try: + result = await wrapped(*args, **kwargs) finally: - # For non-streaming, remove context immediately - if not is_streaming: - _streaming_context_manager.remove_context(workflow_id) - - return async_wrapper - else: - def sync_wrapper(self, *args, **kwargs): - tracer = trace.get_tracer(__name__) - - # Get workflow ID for context storage - workflow_id = getattr(self, 'workflow_id', None) or getattr(self, 'id', None) or id(self) - workflow_id = str(workflow_id) - - # Check if streaming is enabled - is_streaming = kwargs.get('stream', getattr(self, 'stream', False)) - + otel_context.detach(context_token) + + # Set result attributes + result_attributes = get_workflow_run_attributes( + args=(instance,) + args, kwargs=kwargs, return_value=result + ) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + + # For streaming results, we need to keep the span open + # The span will be closed when streaming completes + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + span.end() + _streaming_context_manager.remove_context(workflow_id) + raise + else: + # For non-streaming, use normal context manager with tracer.start_as_current_span("agno.workflow.run.workflow") as span: try: # Set workflow attributes - attributes = get_workflow_run_attributes(args=(self,) + args, kwargs=kwargs) + attributes = get_workflow_run_attributes(args=(instance,) + args, kwargs=kwargs) for key, value in attributes.items(): span.set_attribute(key, value) - - # Store context for streaming if needed - if is_streaming: - current_context = otel_context.get_current() - _streaming_context_manager.store_context(workflow_id, current_context, span) - + # Execute the original function - result = original_func(self, *args, **kwargs) - + result = await wrapped(*args, **kwargs) + # Set result attributes - result_attributes = get_workflow_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + result_attributes = get_workflow_run_attributes( + args=(instance,) + args, kwargs=kwargs, return_value=result + ) for key, value in result_attributes.items(): if key not in attributes: # Avoid duplicates span.set_attribute(key, value) - + span.set_status(Status(StatusCode.OK)) return result - + except Exception as e: span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) raise + + return wrapper + + +def create_streaming_agent_wrapper(tracer): + """Create a streaming-aware wrapper for agent run methods.""" + + def wrapper(wrapped, instance, args, kwargs): + # Get agent ID for context storage + agent_id = getattr(instance, "agent_id", None) or getattr(instance, "id", None) or id(instance) + agent_id = str(agent_id) + + # Get session ID for context mapping + session_id = getattr(instance, "session_id", None) + + # Check if streaming is enabled + is_streaming = kwargs.get("stream", getattr(instance, "stream", False)) + + # For streaming, manually manage span lifecycle + if is_streaming: + span = tracer.start_span("agno.agent.run.agent") + + try: + # Set agent attributes + attributes = get_agent_run_attributes(args=(instance,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Store context for streaming - capture current context with active span + current_context = trace.set_span_in_context(span, otel_context.get_current()) + _streaming_context_manager.store_context(agent_id, current_context, span) + + # Store session-to-agent mapping for LLM context lookup + if session_id: + _streaming_context_manager.store_agent_session_mapping(session_id, agent_id) + + # Execute the original function within agent context + context_token = otel_context.attach(current_context) + try: + result = wrapped(*args, **kwargs) finally: - # For non-streaming, remove context immediately - if not is_streaming: - _streaming_context_manager.remove_context(workflow_id) - - return sync_wrapper - - -def create_streaming_agent_wrapper(original_func, is_async=False): - """Create a streaming-aware wrapper for agent run methods with enhanced context propagation.""" - - if is_async: - async def async_wrapper(self, *args, **kwargs): - tracer = trace.get_tracer(__name__) - - # Get agent ID for context storage - agent_id = getattr(self, 'agent_id', None) or getattr(self, 'id', None) or id(self) - agent_id = str(agent_id) - - # Get session ID for context mapping - session_id = getattr(self, 'session_id', None) - - # Check if streaming is enabled - is_streaming = kwargs.get('stream', getattr(self, 'stream', False)) - - # For streaming, manually manage span lifecycle - if is_streaming: - span = tracer.start_span("agno.agent.run.agent") - + otel_context.detach(context_token) + + # Set result attributes + result_attributes = get_agent_run_attributes( + args=(instance,) + args, kwargs=kwargs, return_value=result + ) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + + # Wrap the result to maintain context and end span when complete + if hasattr(result, "__iter__"): + return StreamingResultWrapper(result, span, agent_id, current_context) + else: + # Not actually streaming, clean up immediately + span.end() + _streaming_context_manager.remove_context(agent_id) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + span.end() + _streaming_context_manager.remove_context(agent_id) + raise + else: + # For non-streaming, use normal context manager + with tracer.start_as_current_span("agno.agent.run.agent") as span: try: # Set agent attributes - attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs) + attributes = get_agent_run_attributes(args=(instance,) + args, kwargs=kwargs) for key, value in attributes.items(): span.set_attribute(key, value) - - # Store context for streaming - capture current context with active span - current_context = trace.set_span_in_context(span, otel_context.get_current()) - _streaming_context_manager.store_context(agent_id, current_context, span) - - # Store session-to-agent mapping for LLM context lookup - if session_id: - _streaming_context_manager.store_agent_session_mapping(session_id, agent_id) - - # Execute the original function within agent context - context_token = otel_context.attach(current_context) - try: - result = await original_func(self, *args, **kwargs) - finally: - otel_context.detach(context_token) - + + # Execute the original function + result = wrapped(*args, **kwargs) + # Set result attributes - result_attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + result_attributes = get_agent_run_attributes( + args=(instance,) + args, kwargs=kwargs, return_value=result + ) for key, value in result_attributes.items(): if key not in attributes: # Avoid duplicates span.set_attribute(key, value) - + span.set_status(Status(StatusCode.OK)) - - # Wrap the result to maintain context and end span when complete - if hasattr(result, '__iter__'): - return StreamingResultWrapper(result, span, agent_id, current_context) - else: - # Not actually streaming, clean up immediately - span.end() - _streaming_context_manager.remove_context(agent_id) - return result - + return result + except Exception as e: span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) + raise + + return wrapper + + +def create_streaming_agent_async_wrapper(tracer): + """Create a streaming-aware async wrapper for agent run methods.""" + + async def wrapper(wrapped, instance, args, kwargs): + # Get agent ID for context storage + agent_id = getattr(instance, "agent_id", None) or getattr(instance, "id", None) or id(instance) + agent_id = str(agent_id) + + # Get session ID for context mapping + session_id = getattr(instance, "session_id", None) + + # Check if streaming is enabled + is_streaming = kwargs.get("stream", getattr(instance, "stream", False)) + + # For streaming, manually manage span lifecycle + if is_streaming: + span = tracer.start_span("agno.agent.run.agent") + + try: + # Set agent attributes + attributes = get_agent_run_attributes(args=(instance,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Store context for streaming - capture current context with active span + current_context = trace.set_span_in_context(span, otel_context.get_current()) + _streaming_context_manager.store_context(agent_id, current_context, span) + + # Store session-to-agent mapping for LLM context lookup + if session_id: + _streaming_context_manager.store_agent_session_mapping(session_id, agent_id) + + # Execute the original function within agent context + context_token = otel_context.attach(current_context) + try: + result = await wrapped(*args, **kwargs) + finally: + otel_context.detach(context_token) + + # Set result attributes + result_attributes = get_agent_run_attributes( + args=(instance,) + args, kwargs=kwargs, return_value=result + ) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + + # Wrap the result to maintain context and end span when complete + if hasattr(result, "__iter__"): + return StreamingResultWrapper(result, span, agent_id, current_context) + else: + # Not actually streaming, clean up immediately span.end() _streaming_context_manager.remove_context(agent_id) - raise - else: - # For non-streaming, use normal context manager - with tracer.start_as_current_span("agno.agent.run.agent") as span: - try: - # Set agent attributes - attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs) - for key, value in attributes.items(): - span.set_attribute(key, value) - - # Execute the original function - result = await original_func(self, *args, **kwargs) - - # Set result attributes - result_attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) - for key, value in result_attributes.items(): - if key not in attributes: # Avoid duplicates - span.set_attribute(key, value) - - span.set_status(Status(StatusCode.OK)) - return result - - except Exception as e: - span.set_status(Status(StatusCode.ERROR, str(e))) - span.record_exception(e) - raise - - return async_wrapper - else: - def sync_wrapper(self, *args, **kwargs): - tracer = trace.get_tracer(__name__) - - # Get agent ID for context storage - agent_id = getattr(self, 'agent_id', None) or getattr(self, 'id', None) or id(self) - agent_id = str(agent_id) - - # Get session ID for context mapping - session_id = getattr(self, 'session_id', None) - - # Check if streaming is enabled - is_streaming = kwargs.get('stream', getattr(self, 'stream', False)) - - # For streaming, manually manage span lifecycle - if is_streaming: - span = tracer.start_span("agno.agent.run.agent") - + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + span.end() + _streaming_context_manager.remove_context(agent_id) + raise + else: + # For non-streaming, use normal context manager + with tracer.start_as_current_span("agno.agent.run.agent") as span: try: # Set agent attributes - attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs) + attributes = get_agent_run_attributes(args=(instance,) + args, kwargs=kwargs) for key, value in attributes.items(): span.set_attribute(key, value) - - # Store context for streaming - capture current context with active span - current_context = trace.set_span_in_context(span, otel_context.get_current()) - _streaming_context_manager.store_context(agent_id, current_context, span) - - # Store session-to-agent mapping for LLM context lookup - if session_id: - _streaming_context_manager.store_agent_session_mapping(session_id, agent_id) - - # Execute the original function within agent context - context_token = otel_context.attach(current_context) - try: - result = original_func(self, *args, **kwargs) - finally: - otel_context.detach(context_token) - + + # Execute the original function + result = await wrapped(*args, **kwargs) + # Set result attributes - result_attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + result_attributes = get_agent_run_attributes( + args=(instance,) + args, kwargs=kwargs, return_value=result + ) for key, value in result_attributes.items(): if key not in attributes: # Avoid duplicates span.set_attribute(key, value) - + span.set_status(Status(StatusCode.OK)) - - # Wrap the result to maintain context and end span when complete - if hasattr(result, '__iter__'): - return StreamingResultWrapper(result, span, agent_id, current_context) - else: - # Not actually streaming, clean up immediately - span.end() - _streaming_context_manager.remove_context(agent_id) - return result - + return result + except Exception as e: span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) - span.end() - _streaming_context_manager.remove_context(agent_id) raise - else: - # For non-streaming, use normal context manager - with tracer.start_as_current_span("agno.agent.run.agent") as span: - try: - # Set agent attributes - attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs) - for key, value in attributes.items(): - span.set_attribute(key, value) - - # Execute the original function - result = original_func(self, *args, **kwargs) - - # Set result attributes - result_attributes = get_agent_run_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) - for key, value in result_attributes.items(): - if key not in attributes: # Avoid duplicates - span.set_attribute(key, value) - - span.set_status(Status(StatusCode.OK)) - return result - - except Exception as e: - span.set_status(Status(StatusCode.ERROR, str(e))) - span.record_exception(e) - raise - - return sync_wrapper - -class StreamingResultWrapper: - """Wrapper for streaming results that maintains agent span as active throughout iteration.""" - - def __init__(self, original_result, span, agent_id, agent_context): - self.original_result = original_result - self.span = span - self.agent_id = agent_id - self.agent_context = agent_context - self._consumed = False - - def __iter__(self): - """Return iterator that keeps agent span active during iteration.""" - context_token = otel_context.attach(self.agent_context) - try: - # Execute iteration within agent context - for item in self.original_result: - # Each item is yielded within the agent span context - yield item - finally: - # Clean up when iteration is complete - otel_context.detach(context_token) - if not self._consumed: - self._consumed = True - self.span.end() - _streaming_context_manager.remove_context(self.agent_id) - - def __getattr__(self, name): - """Delegate attribute access to the original result.""" - return getattr(self.original_result, name) + return wrapper -def create_streaming_tool_wrapper(original_func): +def create_streaming_tool_wrapper(tracer): """Create a streaming-aware wrapper for tool execution methods.""" - - def wrapper(self, *args, **kwargs): - tracer = trace.get_tracer(__name__) - + + def wrapper(wrapped, instance, args, kwargs): # Try to find the agent or workflow context for proper span hierarchy parent_context = None parent_span = None - + # Try to get context from agent try: - if hasattr(self, '_agent'): - agent = self._agent - agent_id = getattr(agent, 'agent_id', None) or getattr(agent, 'id', None) or id(agent) + if hasattr(instance, "_agent"): + agent = instance._agent + agent_id = getattr(agent, "agent_id", None) or getattr(agent, "id", None) or id(agent) agent_id = str(agent_id) context_info = _streaming_context_manager.get_context(agent_id) if context_info: parent_context, parent_span = context_info except Exception: pass # Continue without agent context if not found - + # Try to get context from workflow if agent context not found if not parent_context: try: - if hasattr(self, '_workflow'): - workflow = self._workflow - workflow_id = getattr(workflow, 'workflow_id', None) or getattr(workflow, 'id', None) or id(workflow) + if hasattr(instance, "_workflow"): + workflow = instance._workflow + workflow_id = ( + getattr(workflow, "workflow_id", None) or getattr(workflow, "id", None) or id(workflow) + ) workflow_id = str(workflow_id) context_info = _streaming_context_manager.get_context(workflow_id) if context_info: parent_context, parent_span = context_info except Exception: pass # Continue without workflow context if not found - + # Use parent context if available, otherwise use current context if parent_context: - with otel_context.use_context(parent_context): + context_token = otel_context.attach(parent_context) + try: with tracer.start_as_current_span("agno.tool.execute.tool_usage") as span: try: # Set tool attributes - attributes = get_tool_execution_attributes(args=(self,) + args, kwargs=kwargs) + attributes = get_tool_execution_attributes(args=(instance,) + args, kwargs=kwargs) for key, value in attributes.items(): span.set_attribute(key, value) - + # Execute the original function - result = original_func(self, *args, **kwargs) - + result = wrapped(*args, **kwargs) + # Set result attributes - result_attributes = get_tool_execution_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + result_attributes = get_tool_execution_attributes( + args=(instance,) + args, kwargs=kwargs, return_value=result + ) for key, value in result_attributes.items(): if key not in attributes: # Avoid duplicates span.set_attribute(key, value) - + span.set_status(Status(StatusCode.OK)) return result - + except Exception as e: span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) raise + finally: + otel_context.detach(context_token) else: # Fallback to normal span creation with tracer.start_as_current_span("agno.tool.execute.tool_usage") as span: try: # Set tool attributes - attributes = get_tool_execution_attributes(args=(self,) + args, kwargs=kwargs) + attributes = get_tool_execution_attributes(args=(instance,) + args, kwargs=kwargs) for key, value in attributes.items(): span.set_attribute(key, value) - + # Execute the original function - result = original_func(self, *args, **kwargs) - + result = wrapped(*args, **kwargs) + # Set result attributes - result_attributes = get_tool_execution_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) + result_attributes = get_tool_execution_attributes( + args=(instance,) + args, kwargs=kwargs, return_value=result + ) for key, value in result_attributes.items(): if key not in attributes: # Avoid duplicates span.set_attribute(key, value) - + span.set_status(Status(StatusCode.OK)) return result - + except Exception as e: span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) raise - + return wrapper -def get_agent_context_for_llm(): - """Helper function for LLM instrumentation to get current agent context.""" - current_context = otel_context.get_current() - current_span = trace.get_current_span(current_context) - - # Check if we're already in an agent span - if current_span and hasattr(current_span, 'name') and 'agent' in current_span.name: - return current_context, current_span - - # Try to find stored agent context by checking active contexts - # This is a fallback for cases where context isn't properly propagated - return None, None +def create_metrics_wrapper(tracer): + """Create a wrapper for metrics methods with dynamic span naming.""" + def wrapper(wrapped, instance, args, kwargs): + # Extract model ID for dynamic span naming + span_name = "agno.agent.metrics" # fallback + if hasattr(instance, "model") and instance.model and hasattr(instance.model, "id"): + model_id = str(instance.model.id) + span_name = f"{model_id}.llm" -class AgnoInstrumentor(BaseInstrumentor): - """Agno instrumentation class.""" + with tracer.start_as_current_span(span_name) as span: + try: + # Set attributes + attributes = get_metrics_attributes(args=(instance,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) - _original_methods = {} # Store original methods for cleanup + # Execute the original function + result = wrapped(*args, **kwargs) - def instrumentation_dependencies(self) -> Collection[str]: - """Returns list of packages required for instrumentation.""" - return [] - - def _instrument(self, **kwargs): - """Install instrumentation for Agno.""" - tracer = get_tracer(__name__) - - try: - # Apply workflow instrumentation - try: - import agno.workflow.workflow - if hasattr(agno.workflow.workflow, 'Workflow'): - # Store original methods for cleanup - self._original_methods['Workflow.run_workflow'] = getattr(agno.workflow.workflow.Workflow, 'run_workflow', None) - self._original_methods['Workflow.arun_workflow'] = getattr(agno.workflow.workflow.Workflow, 'arun_workflow', None) - - # Wrap main workflow execution methods - if self._original_methods['Workflow.run_workflow']: - agno.workflow.workflow.Workflow.run_workflow = create_streaming_workflow_wrapper( - agno.workflow.workflow.Workflow.run_workflow, is_async=False - ) - if self._original_methods['Workflow.arun_workflow']: - agno.workflow.workflow.Workflow.arun_workflow = create_streaming_workflow_wrapper( - agno.workflow.workflow.Workflow.arun_workflow, is_async=True - ) - - # Wrap session management methods - session_methods = ['load_session', 'new_session', 'read_from_storage', 'write_to_storage'] - for method_name in session_methods: - original_method = getattr(agno.workflow.workflow.Workflow, method_name, None) - if original_method: - self._original_methods[f'Workflow.{method_name}'] = original_method - setattr(agno.workflow.workflow.Workflow, method_name, - self._create_session_wrapper(original_method, method_name)) - - logger.debug("Successfully wrapped Workflow methods with streaming context support") - except ImportError: - logger.debug("Workflow module not found, skipping workflow instrumentation") - - # Apply streaming-aware agent wrappers - import agno.agent - if hasattr(agno.agent, 'Agent'): - # Store original methods for cleanup - self._original_methods['Agent.run'] = agno.agent.Agent.run - self._original_methods['Agent.arun'] = agno.agent.Agent.arun - - agno.agent.Agent.run = create_streaming_agent_wrapper(agno.agent.Agent.run, is_async=False) - agno.agent.Agent.arun = create_streaming_agent_wrapper(agno.agent.Agent.arun, is_async=True) - - logger.debug("Successfully wrapped Agent.run and Agent.arun with enhanced streaming context support") - - # Apply streaming-aware tool wrappers - import agno.tools.function - if hasattr(agno.tools.function, 'FunctionCall'): - # Store original method for cleanup - self._original_methods['FunctionCall.execute'] = agno.tools.function.FunctionCall.execute - - agno.tools.function.FunctionCall.execute = create_streaming_tool_wrapper(agno.tools.function.FunctionCall.execute) - - logger.debug("Successfully wrapped FunctionCall.execute with streaming context support") - - # Apply standard team and metrics wrappers if needed - try: - import agno.team.team - if hasattr(agno.team.team, 'Team'): - self._original_methods['Team._run'] = getattr(agno.team.team.Team, '_run', None) - self._original_methods['Team._arun'] = getattr(agno.team.team.Team, '_arun', None) - - if self._original_methods['Team._run']: - agno.team.team.Team._run = self._create_standard_wrapper( - agno.team.team.Team._run, "agno.team.run.workflow", get_team_run_attributes, is_async=False - ) - if self._original_methods['Team._arun']: - agno.team.team.Team._arun = self._create_standard_wrapper( - agno.team.team.Team._arun, "agno.team.run.workflow", get_team_run_attributes, is_async=True - ) - - logger.debug("Successfully wrapped Team._run and Team._arun") - except ImportError: - logger.debug("Team module not found, skipping team instrumentation") - - # Apply metrics wrapper + # Set result attributes + result_attributes = get_metrics_attributes(args=(instance,) + args, kwargs=kwargs, return_value=result) + for key, value in result_attributes.items(): + if key not in attributes: # Avoid duplicates + span.set_attribute(key, value) + + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + + return wrapper + + +def create_team_internal_wrapper(tracer): + """Create a wrapper for Team internal methods (_run/_arun) that manages team span lifecycle.""" + + def wrapper(wrapped, instance, args, kwargs): + # Get team ID for context storage + team_id = getattr(instance, "team_id", None) or getattr(instance, "id", None) or id(instance) + team_id = str(team_id) + + # Check if we already have a team context (from print_response) + existing_context = _streaming_context_manager.get_context(team_id) + + if existing_context: + # We're being called from print_response, use existing context + parent_context, parent_span = existing_context + + # Execute within the existing team context + context_token = otel_context.attach(parent_context) try: - if hasattr(agno.agent.Agent, '_set_session_metrics'): - self._original_methods['Agent._set_session_metrics'] = agno.agent.Agent._set_session_metrics - agno.agent.Agent._set_session_metrics = self._create_llm_metrics_wrapper( - agno.agent.Agent._set_session_metrics, get_metrics_attributes - ) - logger.debug("Successfully wrapped Agent._set_session_metrics") - except AttributeError: - logger.debug("_set_session_metrics method not found, skipping metrics instrumentation") - - logger.info("Agno instrumentation installed successfully with enhanced workflow and streaming context support") - - except Exception as e: - logger.error(f"Failed to install Agno instrumentation: {e}") - raise + with tracer.start_as_current_span("agno.team.run.workflow") as span: + try: + # Set workflow attributes + attributes = get_team_run_attributes(args=(instance,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Execute the original function + result = wrapped(*args, **kwargs) - def _create_session_wrapper(self, original_func, method_name): - """Create a wrapper for workflow session management methods.""" - - def wrapper(self, *args, **kwargs): - tracer = trace.get_tracer(__name__) - span_name = f"agno.workflow.session.{method_name}" - - with tracer.start_as_current_span(span_name) as span: + span.set_status(Status(StatusCode.OK)) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + raise + finally: + # Close the parent team span when workflow completes + if parent_span: + parent_span.end() + _streaming_context_manager.remove_context(team_id) + finally: + otel_context.detach(context_token) + else: + # Direct call to _run, create new team span + with tracer.start_as_current_span("agno.team.run.workflow") as span: try: - # Set session attributes - attributes = get_workflow_session_attributes(args=(self,) + args, kwargs=kwargs) + # Set workflow attributes + attributes = get_team_run_attributes(args=(instance,) + args, kwargs=kwargs) for key, value in attributes.items(): span.set_attribute(key, value) - + # Execute the original function - result = original_func(self, *args, **kwargs) - - # Set result attributes - result_attributes = get_workflow_session_attributes(args=(self,) + args, kwargs=kwargs, return_value=result) - for key, value in result_attributes.items(): - if key not in attributes: # Avoid duplicates - span.set_attribute(key, value) - + result = wrapped(*args, **kwargs) + span.set_status(Status(StatusCode.OK)) return result - + except Exception as e: span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) raise - - return wrapper - - def _create_standard_wrapper(self, original_func, span_name, attributes_handler, is_async=False): - """Create a standard wrapper for non-streaming methods.""" - - if is_async: - async def async_wrapper(self, *args, **kwargs): - tracer = trace.get_tracer(__name__) - with tracer.start_as_current_span(span_name) as span: + + return wrapper + + +def create_team_internal_async_wrapper(tracer): + """Create an async wrapper for Team internal methods (_arun) that manages team span lifecycle.""" + + async def wrapper(wrapped, instance, args, kwargs): + # Get team ID for context storage + team_id = getattr(instance, "team_id", None) or getattr(instance, "id", None) or id(instance) + team_id = str(team_id) + + # Check if we already have a team context (from print_response) + existing_context = _streaming_context_manager.get_context(team_id) + + if existing_context: + # We're being called from print_response, use existing context + parent_context, parent_span = existing_context + + # Execute within the existing team context + context_token = otel_context.attach(parent_context) + try: + with tracer.start_as_current_span("agno.team.run.workflow") as span: try: - # Set attributes - attributes = attributes_handler(args=(self,) + args, kwargs=kwargs) + # Set workflow attributes + attributes = get_team_run_attributes(args=(instance,) + args, kwargs=kwargs) for key, value in attributes.items(): span.set_attribute(key, value) - + # Execute the original function - result = await original_func(self, *args, **kwargs) - - # Set result attributes - result_attributes = attributes_handler(args=(self,) + args, kwargs=kwargs, return_value=result) - for key, value in result_attributes.items(): - if key not in attributes: # Avoid duplicates - span.set_attribute(key, value) - + result = await wrapped(*args, **kwargs) + span.set_status(Status(StatusCode.OK)) return result - + except Exception as e: span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) raise - - return async_wrapper + finally: + # Close the parent team span when workflow completes + if parent_span: + parent_span.end() + _streaming_context_manager.remove_context(team_id) + finally: + otel_context.detach(context_token) else: - def sync_wrapper(self, *args, **kwargs): - tracer = trace.get_tracer(__name__) - with tracer.start_as_current_span(span_name) as span: - try: - # Set attributes - attributes = attributes_handler(args=(self,) + args, kwargs=kwargs) - for key, value in attributes.items(): - span.set_attribute(key, value) - - # Execute the original function - result = original_func(self, *args, **kwargs) - - # Set result attributes - result_attributes = attributes_handler(args=(self,) + args, kwargs=kwargs, return_value=result) - for key, value in result_attributes.items(): - if key not in attributes: # Avoid duplicates - span.set_attribute(key, value) - - span.set_status(Status(StatusCode.OK)) - return result - - except Exception as e: - span.set_status(Status(StatusCode.ERROR, str(e))) - span.record_exception(e) - raise - - return sync_wrapper - - def _create_llm_metrics_wrapper(self, original_func, attributes_handler): - """Create an LLM metrics wrapper with dynamic span naming.""" - - def wrapper(self, *args, **kwargs): - tracer = trace.get_tracer(__name__) - - # Extract model ID for dynamic span naming - span_name = "agno.agent.metrics" # fallback - if hasattr(self, 'model') and self.model and hasattr(self.model, 'id'): - model_id = str(self.model.id) - span_name = f"{model_id}.llm" - - with tracer.start_as_current_span(span_name) as span: + # Direct call to _arun, create new team span + with tracer.start_as_current_span("agno.team.run.workflow") as span: try: - # Set attributes - attributes = attributes_handler(args=(self,) + args, kwargs=kwargs) + # Set workflow attributes + attributes = get_team_run_attributes(args=(instance,) + args, kwargs=kwargs) for key, value in attributes.items(): span.set_attribute(key, value) - + # Execute the original function - result = original_func(self, *args, **kwargs) - - # Set result attributes - result_attributes = attributes_handler(args=(self,) + args, kwargs=kwargs, return_value=result) - for key, value in result_attributes.items(): - if key not in attributes: # Avoid duplicates - span.set_attribute(key, value) - + result = await wrapped(*args, **kwargs) + span.set_status(Status(StatusCode.OK)) return result - + except Exception as e: span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) raise - - return wrapper + + return wrapper + + +def create_team_wrapper(tracer): + """Create a wrapper for Team methods that establishes the team context.""" + + def wrapper(wrapped, instance, args, kwargs): + # Get team ID for context storage + team_id = getattr(instance, "team_id", None) or getattr(instance, "id", None) or id(instance) + team_id = str(team_id) + + # Check if streaming is enabled + is_streaming = kwargs.get("stream", getattr(instance, "stream", False)) + + # For print_response, we need to wrap the internal _run method instead + # because print_response returns immediately + if wrapped.__name__ == "print_response": + # Create team span but don't manage it here + span = tracer.start_span("agno.team.run.agent") + + try: + # Set team attributes + attributes = get_team_run_attributes(args=(instance,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Store context for child spans + current_context = trace.set_span_in_context(span, otel_context.get_current()) + _streaming_context_manager.store_context(team_id, current_context, span) + + # The span will be closed by the internal _run method + # Just execute print_response normally + result = wrapped(*args, **kwargs) + return result + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + span.end() + _streaming_context_manager.remove_context(team_id) + raise + else: + # For run/arun methods, use standard span management + span = tracer.start_span("agno.team.run.agent") + + try: + # Set team attributes + attributes = get_team_run_attributes(args=(instance,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Store context for child spans + current_context = trace.set_span_in_context(span, otel_context.get_current()) + _streaming_context_manager.store_context(team_id, current_context, span) + + # Execute the original function within team context + context_token = otel_context.attach(current_context) + try: + result = wrapped(*args, **kwargs) + + # For streaming results, wrap them to keep span alive + if is_streaming and hasattr(result, "__iter__"): + return StreamingResultWrapper(result, span, team_id, current_context) + else: + # Non-streaming, close span + span.end() + _streaming_context_manager.remove_context(team_id) + return result + + finally: + otel_context.detach(context_token) + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + span.end() + _streaming_context_manager.remove_context(team_id) + raise + + return wrapper + + +def create_team_async_wrapper(tracer): + """Create an async wrapper for Team methods that establishes the team context.""" + + async def wrapper(wrapped, instance, args, kwargs): + # Get team ID for context storage + team_id = getattr(instance, "team_id", None) or getattr(instance, "id", None) or id(instance) + team_id = str(team_id) + + # Check if streaming is enabled + is_streaming = kwargs.get("stream", getattr(instance, "stream", False)) + + # Create team span + span = tracer.start_span("agno.team.run.agent") + + try: + # Set team attributes + attributes = get_team_run_attributes(args=(instance,) + args, kwargs=kwargs) + for key, value in attributes.items(): + span.set_attribute(key, value) + + # Store context for child spans - capture current context with active span + current_context = trace.set_span_in_context(span, otel_context.get_current()) + _streaming_context_manager.store_context(team_id, current_context, span) + + # Execute the original function within team context + context_token = otel_context.attach(current_context) + try: + result = await wrapped(*args, **kwargs) + + # For non-streaming, close the span + if not is_streaming: + span.end() + _streaming_context_manager.remove_context(team_id) + + return result + finally: + otel_context.detach(context_token) + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + span.end() + _streaming_context_manager.remove_context(team_id) + raise + + return wrapper + + +def get_agent_context_for_llm(): + """Helper function for LLM instrumentation to get current agent context.""" + current_context = otel_context.get_current() + current_span = trace.get_current_span(current_context) + + # Check if we're already in an agent span + if current_span and hasattr(current_span, "name") and "agent" in current_span.name: + return current_context, current_span + + # Try to find stored agent context by checking active contexts + # This is a fallback for cases where context isn't properly propagated + return None, None + + +class AgnoInstrumentor(BaseInstrumentor): + """Agno instrumentation class.""" + + def instrumentation_dependencies(self) -> Collection[str]: + """Returns list of packages required for instrumentation.""" + return ["agno >= 0.1.0"] + + def _instrument(self, **kwargs): + """Install instrumentation for Agno.""" + tracer_provider = kwargs.get("tracer_provider") + tracer = get_tracer(LIBRARY_NAME, LIBRARY_VERSION, tracer_provider) + + meter_provider = kwargs.get("meter_provider") + meter = get_meter(LIBRARY_NAME, LIBRARY_VERSION, meter_provider) + + # Create metrics + meter.create_histogram( + name=Meters.LLM_TOKEN_USAGE, + unit="token", + description="Measures number of input and output tokens used with Agno agents", + ) + + meter.create_histogram( + name=Meters.LLM_OPERATION_DURATION, + unit="s", + description="Agno agent operation duration", + ) + + meter.create_counter( + name=Meters.LLM_COMPLETIONS_EXCEPTIONS, + unit="time", + description="Number of exceptions occurred during Agno agent operations", + ) + + # Standard method wrapping using WrapConfig + for wrap_config in WRAPPED_METHODS: + try: + wrap(wrap_config, tracer) + except (AttributeError, ModuleNotFoundError): + logger.debug(f"Could not wrap {wrap_config}") + + # Special handling for streaming methods + # These require custom wrappers due to their streaming nature + try: + # Streaming agent methods + wrap_function_wrapper( + "agno.agent", + "Agent.run", + create_streaming_agent_wrapper(tracer), + ) + wrap_function_wrapper( + "agno.agent", + "Agent.arun", + create_streaming_agent_async_wrapper(tracer), + ) + + # Streaming workflow methods + wrap_function_wrapper( + "agno.workflow.workflow", + "Workflow.run_workflow", + create_streaming_workflow_wrapper(tracer), + ) + wrap_function_wrapper( + "agno.workflow.workflow", + "Workflow.arun_workflow", + create_streaming_workflow_async_wrapper(tracer), + ) + + # Streaming tool execution + wrap_function_wrapper( + "agno.tools.function", + "FunctionCall.execute", + create_streaming_tool_wrapper(tracer), + ) + + # Metrics wrapper + wrap_function_wrapper( + "agno.agent", + "Agent._set_session_metrics", + create_metrics_wrapper(tracer), + ) + + # Team methods + wrap_function_wrapper( + "agno.team.team", + "Team.run", + create_team_wrapper(tracer), + ) + wrap_function_wrapper( + "agno.team.team", + "Team.arun", + create_team_async_wrapper(tracer), + ) + wrap_function_wrapper( + "agno.team.team", + "Team.print_response", + create_team_wrapper(tracer), + ) + + # Team internal methods with special handling + wrap_function_wrapper( + "agno.team.team", + "Team._run", + create_team_internal_wrapper(tracer), + ) + wrap_function_wrapper( + "agno.team.team", + "Team._arun", + create_team_internal_async_wrapper(tracer), + ) + + logger.debug("Successfully wrapped Agno streaming methods") + except (AttributeError, ModuleNotFoundError) as e: + logger.debug(f"Failed to wrap Agno streaming methods: {e}") + + logger.info("Agno instrumentation installed successfully") def _uninstrument(self, **kwargs): """Remove instrumentation for Agno.""" + # Clear streaming contexts + _streaming_context_manager.clear_all() + + # Unwrap standard methods + for wrap_config in WRAPPED_METHODS: + try: + unwrap(wrap_config) + except Exception: + logger.debug(f"Failed to unwrap {wrap_config}") + + # Unwrap streaming methods try: - # Clear streaming contexts - _streaming_context_manager.clear_all() - - # Restore original workflow methods - if 'Workflow.run_workflow' in self._original_methods and self._original_methods['Workflow.run_workflow']: - import agno.workflow.workflow - agno.workflow.workflow.Workflow.run_workflow = self._original_methods['Workflow.run_workflow'] - agno.workflow.workflow.Workflow.arun_workflow = self._original_methods['Workflow.arun_workflow'] - logger.debug("Restored original Workflow.run_workflow and Workflow.arun_workflow methods") - - # Restore workflow session methods - workflow_session_methods = ['load_session', 'new_session', 'read_from_storage', 'write_to_storage'] - for method_name in workflow_session_methods: - key = f'Workflow.{method_name}' - if key in self._original_methods and self._original_methods[key]: - import agno.workflow.workflow - setattr(agno.workflow.workflow.Workflow, method_name, self._original_methods[key]) - logger.debug(f"Restored original Workflow.{method_name} method") - - # Restore original agent methods - if 'Agent.run' in self._original_methods: - import agno.agent - agno.agent.Agent.run = self._original_methods['Agent.run'] - agno.agent.Agent.arun = self._original_methods['Agent.arun'] - logger.debug("Restored original Agent.run and Agent.arun methods") - - # Restore original tool methods - if 'FunctionCall.execute' in self._original_methods: - import agno.tools.function - agno.tools.function.FunctionCall.execute = self._original_methods['FunctionCall.execute'] - logger.debug("Restored original FunctionCall.execute method") - - # Restore team methods - if 'Team._run' in self._original_methods and self._original_methods['Team._run']: - import agno.team.team - agno.team.team.Team._run = self._original_methods['Team._run'] - agno.team.team.Team._arun = self._original_methods['Team._arun'] - logger.debug("Restored original Team methods") - - # Restore metrics methods - if 'Agent._set_session_metrics' in self._original_methods: - import agno.agent - agno.agent.Agent._set_session_metrics = self._original_methods['Agent._set_session_metrics'] - logger.debug("Restored original Agent._set_session_metrics method") - - # Clear stored original methods - self._original_methods.clear() - - logger.info("Agno instrumentation removed successfully") - - except Exception as e: - logger.error(f"Failed to remove Agno instrumentation: {e}") - raise \ No newline at end of file + from opentelemetry.instrumentation.utils import unwrap as otel_unwrap + + # Agent methods + otel_unwrap("agno.agent", "Agent.run") + otel_unwrap("agno.agent", "Agent.arun") + + # Workflow methods + otel_unwrap("agno.workflow.workflow", "Workflow.run_workflow") + otel_unwrap("agno.workflow.workflow", "Workflow.arun_workflow") + + # Tool methods + otel_unwrap("agno.tools.function", "FunctionCall.execute") + + # Metrics methods + otel_unwrap("agno.agent", "Agent._set_session_metrics") + + # Team methods + otel_unwrap("agno.team.team", "Team.run") + otel_unwrap("agno.team.team", "Team.arun") + otel_unwrap("agno.team.team", "Team.print_response") + otel_unwrap("agno.team.team", "Team._run") + otel_unwrap("agno.team.team", "Team._arun") + + except (AttributeError, ModuleNotFoundError): + logger.debug("Failed to unwrap Agno streaming methods") + + logger.info("Agno instrumentation removed successfully") From 17317717230d558dab5082de1eb742065d636899 Mon Sep 17 00:00:00 2001 From: fenilfaldu Date: Sun, 8 Jun 2025 06:46:47 +0530 Subject: [PATCH 03/14] Code refactored --- agentops/instrumentation/agno/__init__.py | 11 --- .../instrumentation/agno/attributes/agent.py | 22 ++--- .../agno/attributes/metrics.py | 75 +++++++-------- .../instrumentation/agno/attributes/tool.py | 41 +++------ .../agno/attributes/workflow.py | 91 +++++++++++-------- agentops/semconv/workflow.py | 54 +++++++++++ 6 files changed, 162 insertions(+), 132 deletions(-) diff --git a/agentops/instrumentation/agno/__init__.py b/agentops/instrumentation/agno/__init__.py index 7ebf85c09..c6c04a7fc 100644 --- a/agentops/instrumentation/agno/__init__.py +++ b/agentops/instrumentation/agno/__init__.py @@ -4,12 +4,6 @@ from .instrumentor import AgnoInstrumentor -# Export attribute handlers for external use -from .attributes.agent import get_agent_run_attributes -from .attributes.team import get_team_run_attributes, get_team_public_run_attributes -from .attributes.tool import get_tool_execution_attributes -from .attributes.metrics import get_metrics_attributes - logger = logging.getLogger(__name__) __version__ = "1.0.0" @@ -21,9 +15,4 @@ "AgnoInstrumentor", "LIBRARY_NAME", "LIBRARY_VERSION", - "get_agent_run_attributes", - "get_team_run_attributes", - "get_team_public_run_attributes", - "get_tool_execution_attributes", - "get_metrics_attributes", ] diff --git a/agentops/instrumentation/agno/attributes/agent.py b/agentops/instrumentation/agno/attributes/agent.py index 535420ffe..5d0f3181c 100644 --- a/agentops/instrumentation/agno/attributes/agent.py +++ b/agentops/instrumentation/agno/attributes/agent.py @@ -3,7 +3,7 @@ from typing import Optional, Tuple, Dict, Any from agentops.instrumentation.common.attributes import AttributeMap -from agentops.semconv import SpanAttributes, WorkflowAttributes, AgentAttributes +from agentops.semconv import SpanAttributes, WorkflowAttributes, AgentAttributes, ToolAttributes from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind @@ -109,7 +109,7 @@ def get_agent_run_attributes( agent_config["max_retry_limit"] = str(agent.retries) if hasattr(agent, "response_model") and agent.response_model: - agent_config["response_model"] = str(agent.response_model.__name__) + agent_config[SpanAttributes.LLM_RESPONSE_MODEL] = str(agent.response_model.__name__) if hasattr(agent, "show_tool_calls"): agent_config["show_tool_calls"] = str(agent.show_tool_calls) @@ -153,20 +153,16 @@ def get_agent_run_attributes( # Set tool attributes if tool_names: - attributes["agent.tools_names"] = ",".join(tool_names[:5]) # Limit to first 5 attributes["agno.agent.tools_count"] = str(len(tool_names)) if tools_info: - import json - - try: - # Limit to first 3 tools to avoid overly long attributes - limited_tools = tools_info[:3] - tools_json = json.dumps(limited_tools) - attributes[AgentAttributes.AGENT_TOOLS] = tools_json - except: - # Fallback if JSON serialization fails - attributes[AgentAttributes.AGENT_TOOLS] = str(tools_info) + # Instead of storing as JSON blob, set individual tool attributes + for i, tool in enumerate(tools_info): + prefix = f"agent.tool.{i}" + if "name" in tool: + attributes[f"{prefix}.{ToolAttributes.TOOL_NAME}"] = tool["name"] + if "description" in tool: + attributes[f"{prefix}.{ToolAttributes.TOOL_DESCRIPTION}"] = tool["description"] # Memory and knowledge information if hasattr(agent, "memory") and agent.memory: diff --git a/agentops/instrumentation/agno/attributes/metrics.py b/agentops/instrumentation/agno/attributes/metrics.py index 306d702fa..c3d3eaffc 100644 --- a/agentops/instrumentation/agno/attributes/metrics.py +++ b/agentops/instrumentation/agno/attributes/metrics.py @@ -61,6 +61,11 @@ def get_metrics_attributes( if hasattr(model, "provider"): attributes["agno.model.provider"] = str(model.provider) + # Add model class name for better identification (with null check) + if hasattr(model, "__class__") and hasattr(model.__class__, "__name__"): + model_class = model.__class__.__name__ + attributes["agno.model.class"] = model_class + # === EXTRACT CONVERSATION STRUCTURE === if hasattr(run_messages, "messages") and run_messages.messages: messages = run_messages.messages @@ -138,29 +143,46 @@ def get_metrics_attributes( attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_id attributes[SpanAttributes.LLM_RESPONSE_MODEL] = model_id - # Use session metrics for more accurate token counts - session_prompt_tokens = getattr(session_metrics, "prompt_tokens", 0) - session_completion_tokens = getattr(session_metrics, "completion_tokens", 0) - session_output_tokens = getattr(session_metrics, "output_tokens", 0) - session_input_tokens = getattr(session_metrics, "input_tokens", 0) - session_total_tokens = getattr(session_metrics, "total_tokens", 0) + # Only set token variables if the attributes actually exist + session_prompt_tokens = None + session_completion_tokens = None + session_output_tokens = None + session_input_tokens = None + session_total_tokens = None + + if hasattr(session_metrics, "prompt_tokens"): + session_prompt_tokens = session_metrics.prompt_tokens + + if hasattr(session_metrics, "completion_tokens"): + session_completion_tokens = session_metrics.completion_tokens + + if hasattr(session_metrics, "output_tokens"): + session_output_tokens = session_metrics.output_tokens + + if hasattr(session_metrics, "input_tokens"): + session_input_tokens = session_metrics.input_tokens + + if hasattr(session_metrics, "total_tokens"): + session_total_tokens = session_metrics.total_tokens # For Anthropic, output_tokens represents completion tokens - if session_output_tokens > 0 and session_completion_tokens == 0: - session_completion_tokens = session_output_tokens + if session_output_tokens is not None and session_output_tokens > 0: + if session_completion_tokens is None or session_completion_tokens == 0: + session_completion_tokens = session_output_tokens # For some providers, input_tokens represents prompt tokens - if session_input_tokens > 0 and session_prompt_tokens == 0: - session_prompt_tokens = session_input_tokens + if session_input_tokens is not None and session_input_tokens > 0: + if session_prompt_tokens is None or session_prompt_tokens == 0: + session_prompt_tokens = session_input_tokens # Only set token attributes if we have actual values - if session_total_tokens > 0: + if session_total_tokens is not None and session_total_tokens > 0: usage_data["total_tokens"] = session_total_tokens # Set breakdown if available - if session_prompt_tokens > 0: + if session_prompt_tokens is not None and session_prompt_tokens > 0: usage_data["prompt_tokens"] = session_prompt_tokens - if session_completion_tokens > 0: + if session_completion_tokens is not None and session_completion_tokens > 0: usage_data["completion_tokens"] = session_completion_tokens # Additional token types from session metrics - only set if present @@ -169,13 +191,6 @@ def get_metrics_attributes( if hasattr(session_metrics, "reasoning_tokens") and session_metrics.reasoning_tokens > 0: usage_data["reasoning_tokens"] = session_metrics.reasoning_tokens - # Success/fail token metrics - only set if we have tokens - if session_total_tokens > 0: - usage_data["success_tokens"] = session_total_tokens - # Only set fail/indeterminate as 0 when we have success tokens - usage_data["fail_tokens"] = 0 - usage_data["indeterminate_tokens"] = 0 - # === FALLBACK TO MESSAGE AGGREGATION IF SESSION METRICS ARE EMPTY === # If we don't have token data from session metrics, try message aggregation if "total_tokens" not in usage_data: @@ -187,26 +202,6 @@ def get_metrics_attributes( if total_tokens > 0: usage_data["total_tokens"] = total_tokens - # Handle case where we have total but no breakdown (common with Anthropic) - if usage_data.get("prompt_tokens", 0) == 0 and usage_data.get("completion_tokens", 0) == 0: - # If we only have completion tokens from output_tokens, assume all are completion - if total_output_tokens > 0: - usage_data["completion_tokens"] = total_output_tokens - usage_data["prompt_tokens"] = max(0, total_tokens - total_output_tokens) - # Otherwise try to split reasonably - elif total_tokens > 0: - # For pure generation, most tokens are usually completion - estimated_completion = int(total_tokens * 0.7) # Rough estimate - estimated_prompt = total_tokens - estimated_completion - usage_data["completion_tokens"] = estimated_completion - usage_data["prompt_tokens"] = estimated_prompt - - # Success/fail tokens from message aggregation - only set if we have tokens - if total_tokens > 0: - usage_data["success_tokens"] = total_tokens - usage_data["fail_tokens"] = 0 - usage_data["indeterminate_tokens"] = 0 - # Extract user message info if available if hasattr(run_messages, "user_message") and run_messages.user_message: user_msg = run_messages.user_message diff --git a/agentops/instrumentation/agno/attributes/tool.py b/agentops/instrumentation/agno/attributes/tool.py index 55fa4ad16..92f9e1ad9 100644 --- a/agentops/instrumentation/agno/attributes/tool.py +++ b/agentops/instrumentation/agno/attributes/tool.py @@ -6,6 +6,7 @@ from agentops.instrumentation.common.attributes import AttributeMap from agentops.semconv import SpanAttributes from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind +from agentops.semconv.tool import ToolAttributes def get_tool_execution_attributes( @@ -50,17 +51,14 @@ def get_tool_execution_attributes( tool_name = getattr(function, "name", "unknown_tool") # Set span attributes for the tool execution span - attributes["tool.name"] = tool_name + attributes[ToolAttributes.TOOL_NAME] = tool_name attributes["agno.tool.function_name"] = tool_name # Function details and context if hasattr(function, "description"): description = getattr(function, "description", "") if description: - # Truncate long descriptions but keep them readable - if len(description) > 300: - description = description[:297] + "..." - attributes["tool.description"] = description + attributes[ToolAttributes.TOOL_DESCRIPTION] = description attributes["agno.tool.function_description"] = description # Function source information @@ -93,15 +91,13 @@ def get_tool_execution_attributes( formatted_args = [] for key, value in args_dict.items(): value_str = str(value) - if len(value_str) > 100: - value_str = value_str[:97] + "..." formatted_args.append(f"{key}={value_str}") - attributes["tool.parameters"] = json.dumps(args_dict) + attributes[ToolAttributes.TOOL_PARAMETERS] = json.dumps(args_dict) attributes["agno.tool.formatted_args"] = ", ".join(formatted_args) attributes["agno.tool.args_count"] = str(len(args_dict)) except Exception as e: - attributes["tool.parameters"] = str(function_call.arguments) + attributes[ToolAttributes.TOOL_PARAMETERS] = str(function_call.arguments) attributes["agno.tool.args_parse_error"] = str(e) # Extract call ID and metadata @@ -146,7 +142,7 @@ def get_tool_execution_attributes( error = getattr(result_value, "error", None) attributes["agno.tool.execution_result_status"] = str(status) - attributes["tool.status"] = str(status) + attributes[ToolAttributes.TOOL_STATUS] = str(status) if error: attributes["agno.tool.execution_error"] = str(error) @@ -185,8 +181,6 @@ def get_tool_execution_attributes( for var_name in context_vars: if var_name in locals_dict: value = str(locals_dict[var_name]) - if len(value) > 100: - value = value[:97] + "..." generator_info.append(f"{var_name}={value}") attributes[f"agno.tool.generator_{var_name}"] = value @@ -207,12 +201,10 @@ def get_tool_execution_attributes( else: result_str = f"Generator<{actual_result_type}> - {str(actual_result)}" else: - # Regular result - safe to convert to string + # Regular result result_str = str(actual_result) - if len(result_str) > 500: - result_str = result_str[:497] + "..." else: - result_str = f"FunctionExecutionResult(status={status}, result=None)" + result_str = str(status) else: # Not a FunctionExecutionResult, handle as direct result if hasattr(result_value, "__iter__") and hasattr(result_value, "__next__"): @@ -228,30 +220,21 @@ def get_tool_execution_attributes( else: # Regular result result_str = str(result_value) - if len(result_str) > 500: - result_str = result_str[:497] + "..." else: result_str = "None" # Set the main result attribute - attributes["tool.result"] = result_str + attributes[ToolAttributes.TOOL_RESULT] = result_str # Add additional analysis attributes attributes["agno.tool.result_length"] = str(len(result_str)) - # Provide a preview for long results - if len(result_str) > 100: - preview = result_str[:97] + "..." - attributes["agno.tool.result_preview"] = preview - else: - attributes["agno.tool.result_preview"] = result_str - # Set final execution status - if not attributes.get("tool.status"): - attributes["tool.status"] = "success" + if not attributes.get(ToolAttributes.TOOL_STATUS): + attributes[ToolAttributes.TOOL_STATUS] = "success" # Add execution summary for debugging - tool_name = attributes.get("tool.name", "unknown") + tool_name = attributes.get(ToolAttributes.TOOL_NAME, "unknown") call_type = attributes.get("agno.tool.transfer_type", "unknown") attributes["agno.tool.execution_summary"] = f"Tool '{tool_name}' executed with type '{call_type}'" diff --git a/agentops/instrumentation/agno/attributes/workflow.py b/agentops/instrumentation/agno/attributes/workflow.py index a0b4b1251..384cb616b 100644 --- a/agentops/instrumentation/agno/attributes/workflow.py +++ b/agentops/instrumentation/agno/attributes/workflow.py @@ -5,6 +5,7 @@ from agentops.semconv.instrumentation import InstrumentationAttributes from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind +from agentops.semconv.workflow import WorkflowAttributes from agentops.instrumentation.common.attributes import get_common_attributes @@ -31,97 +32,109 @@ def get_workflow_run_attributes( # Core workflow attributes if hasattr(workflow, "name") and workflow.name: - attributes["workflow.name"] = str(workflow.name) + attributes[WorkflowAttributes.WORKFLOW_NAME] = str(workflow.name) if hasattr(workflow, "workflow_id") and workflow.workflow_id: - attributes["workflow.workflow_id"] = str(workflow.workflow_id) + attributes[WorkflowAttributes.WORKFLOW_ID] = str(workflow.workflow_id) if hasattr(workflow, "description") and workflow.description: - attributes["workflow.description"] = str(workflow.description) + attributes[WorkflowAttributes.WORKFLOW_DESCRIPTION] = str(workflow.description) if hasattr(workflow, "app_id") and workflow.app_id: - attributes["workflow.app_id"] = str(workflow.app_id) + attributes[WorkflowAttributes.WORKFLOW_APP_ID] = str(workflow.app_id) + + # Set workflow type + attributes[WorkflowAttributes.WORKFLOW_TYPE] = "agno_workflow" # Session and user attributes if hasattr(workflow, "session_id") and workflow.session_id: - attributes["workflow.session_id"] = str(workflow.session_id) + attributes[WorkflowAttributes.WORKFLOW_SESSION_ID] = str(workflow.session_id) if hasattr(workflow, "session_name") and workflow.session_name: - attributes["workflow.session_name"] = str(workflow.session_name) + attributes[WorkflowAttributes.WORKFLOW_SESSION_NAME] = str(workflow.session_name) if hasattr(workflow, "user_id") and workflow.user_id: - attributes["workflow.user_id"] = str(workflow.user_id) + attributes[WorkflowAttributes.WORKFLOW_USER_ID] = str(workflow.user_id) # Run-specific attributes if hasattr(workflow, "run_id") and workflow.run_id: - attributes["workflow.run_id"] = str(workflow.run_id) + attributes[WorkflowAttributes.WORKFLOW_RUN_ID] = str(workflow.run_id) # Configuration attributes if hasattr(workflow, "debug_mode"): - attributes["workflow.debug_mode"] = bool(workflow.debug_mode) + attributes[WorkflowAttributes.WORKFLOW_DEBUG_MODE] = bool(workflow.debug_mode) if hasattr(workflow, "monitoring"): - attributes["workflow.monitoring"] = bool(workflow.monitoring) + attributes[WorkflowAttributes.WORKFLOW_MONITORING] = bool(workflow.monitoring) if hasattr(workflow, "telemetry"): - attributes["workflow.telemetry"] = bool(workflow.telemetry) + attributes[WorkflowAttributes.WORKFLOW_TELEMETRY] = bool(workflow.telemetry) # Memory and storage attributes if hasattr(workflow, "memory") and workflow.memory: memory_type = type(workflow.memory).__name__ - attributes["workflow.memory.type"] = memory_type + attributes[WorkflowAttributes.WORKFLOW_MEMORY_TYPE] = memory_type if hasattr(workflow, "storage") and workflow.storage: storage_type = type(workflow.storage).__name__ - attributes["workflow.storage.type"] = storage_type + attributes[WorkflowAttributes.WORKFLOW_STORAGE_TYPE] = storage_type # Input parameters from kwargs if kwargs: + # Store workflow input + attributes[WorkflowAttributes.WORKFLOW_INPUT] = str(kwargs) + # Count and types of input parameters - attributes["workflow.input.parameter_count"] = len(kwargs) + attributes[WorkflowAttributes.WORKFLOW_INPUT_PARAMETER_COUNT] = len(kwargs) param_types = list(set(type(v).__name__ for v in kwargs.values())) if param_types: - attributes["workflow.input.parameter_types"] = str(param_types) + attributes[WorkflowAttributes.WORKFLOW_INPUT_TYPE] = str(param_types) # Store specific input keys (without values for privacy) input_keys = list(kwargs.keys()) if input_keys: - attributes["workflow.input.parameter_keys"] = str(input_keys) + attributes[WorkflowAttributes.WORKFLOW_INPUT_PARAMETER_KEYS] = str(input_keys) # Workflow method parameters if available if hasattr(workflow, "_run_parameters") and workflow._run_parameters: param_count = len(workflow._run_parameters) - attributes["workflow.method.parameter_count"] = param_count + attributes[WorkflowAttributes.WORKFLOW_METHOD_PARAMETER_COUNT] = param_count if hasattr(workflow, "_run_return_type") and workflow._run_return_type: - attributes["workflow.method.return_type"] = str(workflow._run_return_type) + attributes[WorkflowAttributes.WORKFLOW_METHOD_RETURN_TYPE] = str(workflow._run_return_type) # Process return value attributes if return_value is not None: return_type = type(return_value).__name__ - attributes["workflow.output.type"] = return_type + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_TYPE] = return_type # Handle RunResponse objects if hasattr(return_value, "content"): + # Store workflow output + if return_value.content: + attributes[WorkflowAttributes.WORKFLOW_OUTPUT] = str(return_value.content) + if hasattr(return_value, "content_type"): - attributes["workflow.output.content_type"] = str(return_value.content_type) + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_CONTENT_TYPE] = str(return_value.content_type) if hasattr(return_value, "event"): - attributes["workflow.output.event"] = str(return_value.event) + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_EVENT] = str(return_value.event) if hasattr(return_value, "model"): - attributes["workflow.output.model"] = str(return_value.model) if return_value.model else "" + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_MODEL] = ( + str(return_value.model) if return_value.model else "" + ) if hasattr(return_value, "model_provider"): - attributes["workflow.output.model_provider"] = ( + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_MODEL_PROVIDER] = ( str(return_value.model_provider) if return_value.model_provider else "" ) # Count various response components if hasattr(return_value, "messages") and return_value.messages: - attributes["workflow.output.message_count"] = len(return_value.messages) + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_MESSAGE_COUNT] = len(return_value.messages) if hasattr(return_value, "tools") and return_value.tools: - attributes["workflow.output.tool_count"] = len(return_value.tools) + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_TOOL_COUNT] = len(return_value.tools) if hasattr(return_value, "images") and return_value.images: - attributes["workflow.output.image_count"] = len(return_value.images) + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_IMAGE_COUNT] = len(return_value.images) if hasattr(return_value, "videos") and return_value.videos: - attributes["workflow.output.video_count"] = len(return_value.videos) + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_VIDEO_COUNT] = len(return_value.videos) if hasattr(return_value, "audio") and return_value.audio: - attributes["workflow.output.audio_count"] = len(return_value.audio) + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_AUDIO_COUNT] = len(return_value.audio) # Handle generators/iterators elif hasattr(return_value, "__iter__") and not isinstance(return_value, (str, bytes)): - attributes["workflow.output.is_streaming"] = True + attributes[WorkflowAttributes.WORKFLOW_OUTPUT_IS_STREAMING] = True # Set span kind - AgentOpsSpanKind.WORKFLOW is already a string attributes[InstrumentationAttributes.INSTRUMENTATION_TYPE] = AgentOpsSpanKind.WORKFLOW @@ -152,32 +165,32 @@ def get_workflow_session_attributes( # Session attributes if hasattr(workflow, "session_id") and workflow.session_id: - attributes["workflow.session.session_id"] = str(workflow.session_id) + attributes[WorkflowAttributes.WORKFLOW_SESSION_ID] = str(workflow.session_id) if hasattr(workflow, "session_name") and workflow.session_name: - attributes["workflow.session.session_name"] = str(workflow.session_name) + attributes[WorkflowAttributes.WORKFLOW_SESSION_NAME] = str(workflow.session_name) if hasattr(workflow, "workflow_id") and workflow.workflow_id: - attributes["workflow.session.workflow_id"] = str(workflow.workflow_id) + attributes[WorkflowAttributes.WORKFLOW_SESSION_WORKFLOW_ID] = str(workflow.workflow_id) if hasattr(workflow, "user_id") and workflow.user_id: - attributes["workflow.session.user_id"] = str(workflow.user_id) + attributes[WorkflowAttributes.WORKFLOW_SESSION_USER_ID] = str(workflow.user_id) # Session state attributes if hasattr(workflow, "session_state") and workflow.session_state: if isinstance(workflow.session_state, dict): - attributes["workflow.session.state_keys"] = str(list(workflow.session_state.keys())) - attributes["workflow.session.state_size"] = len(workflow.session_state) + attributes[WorkflowAttributes.WORKFLOW_SESSION_STATE_KEYS] = str(list(workflow.session_state.keys())) + attributes[WorkflowAttributes.WORKFLOW_SESSION_STATE_SIZE] = len(workflow.session_state) # Storage attributes if hasattr(workflow, "storage") and workflow.storage: storage_type = type(workflow.storage).__name__ - attributes["workflow.session.storage_type"] = storage_type + attributes[WorkflowAttributes.WORKFLOW_SESSION_STORAGE_TYPE] = storage_type # Process session return value if it's a WorkflowSession if return_value is not None and hasattr(return_value, "session_id"): - attributes["workflow.session.returned_session_id"] = str(return_value.session_id) + attributes[WorkflowAttributes.WORKFLOW_SESSION_RETURNED_SESSION_ID] = str(return_value.session_id) if hasattr(return_value, "created_at") and return_value.created_at: - attributes["workflow.session.created_at"] = int(return_value.created_at) + attributes[WorkflowAttributes.WORKFLOW_SESSION_CREATED_AT] = int(return_value.created_at) if hasattr(return_value, "updated_at") and return_value.updated_at: - attributes["workflow.session.updated_at"] = int(return_value.updated_at) + attributes[WorkflowAttributes.WORKFLOW_SESSION_UPDATED_AT] = int(return_value.updated_at) # Set span kind - AgentOpsSpanKind.WORKFLOW is already a string attributes[InstrumentationAttributes.INSTRUMENTATION_TYPE] = AgentOpsSpanKind.WORKFLOW diff --git a/agentops/semconv/workflow.py b/agentops/semconv/workflow.py index 5d3199e26..e2bdfbaf5 100644 --- a/agentops/semconv/workflow.py +++ b/agentops/semconv/workflow.py @@ -25,3 +25,57 @@ class WorkflowAttributes: WORKFLOW_STEP_STATUS = "workflow.step.status" # Status of the workflow step WORKFLOW_STEP_ERROR = "workflow.step.error" # Error from the workflow step WORKFLOW_STEP = "workflow.step" + + # Core workflow identification + WORKFLOW_ID = "workflow.workflow_id" # Unique identifier for the workflow instance + WORKFLOW_DESCRIPTION = "workflow.description" # Description of the workflow + WORKFLOW_APP_ID = "workflow.app_id" # Application ID associated with the workflow + + # Session and user context + WORKFLOW_SESSION_ID = "workflow.session_id" # Session ID for the workflow execution + WORKFLOW_SESSION_NAME = "workflow.session_name" # Name of the workflow session + WORKFLOW_USER_ID = "workflow.user_id" # User ID associated with the workflow + + # Run-specific attributes + WORKFLOW_RUN_ID = "workflow.run_id" # Unique identifier for this workflow run + + # Configuration flags + WORKFLOW_DEBUG_MODE = "workflow.debug_mode" # Whether debug mode is enabled + WORKFLOW_MONITORING = "workflow.monitoring" # Whether monitoring is enabled + WORKFLOW_TELEMETRY = "workflow.telemetry" # Whether telemetry is enabled + + # Memory and storage + WORKFLOW_MEMORY_TYPE = "workflow.memory.type" # Type of memory used by workflow + WORKFLOW_STORAGE_TYPE = "workflow.storage.type" # Type of storage used by workflow + + # Input parameters metadata + WORKFLOW_INPUT_PARAMETER_COUNT = "workflow.input.parameter_count" # Number of input parameters + WORKFLOW_INPUT_PARAMETER_KEYS = "workflow.input.parameter_keys" # Keys of input parameters + + # Method metadata + WORKFLOW_METHOD_PARAMETER_COUNT = "workflow.method.parameter_count" # Number of method parameters + WORKFLOW_METHOD_RETURN_TYPE = "workflow.method.return_type" # Return type of the workflow method + + # Output metadata + WORKFLOW_OUTPUT_CONTENT_TYPE = "workflow.output.content_type" # Content type of the output + WORKFLOW_OUTPUT_EVENT = "workflow.output.event" # Event type in the output + WORKFLOW_OUTPUT_MODEL = "workflow.output.model" # Model used for the output + WORKFLOW_OUTPUT_MODEL_PROVIDER = "workflow.output.model_provider" # Provider of the model + WORKFLOW_OUTPUT_MESSAGE_COUNT = "workflow.output.message_count" # Number of messages in output + WORKFLOW_OUTPUT_TOOL_COUNT = "workflow.output.tool_count" # Number of tools in output + WORKFLOW_OUTPUT_IMAGE_COUNT = "workflow.output.image_count" # Number of images in output + WORKFLOW_OUTPUT_VIDEO_COUNT = "workflow.output.video_count" # Number of videos in output + WORKFLOW_OUTPUT_AUDIO_COUNT = "workflow.output.audio_count" # Number of audio items in output + WORKFLOW_OUTPUT_IS_STREAMING = "workflow.output.is_streaming" # Whether output is streaming + + # Session-specific attributes + WORKFLOW_SESSION_SESSION_ID = "workflow.session.session_id" # Session ID in session context + WORKFLOW_SESSION_SESSION_NAME = "workflow.session.session_name" # Session name in session context + WORKFLOW_SESSION_WORKFLOW_ID = "workflow.session.workflow_id" # Workflow ID in session context + WORKFLOW_SESSION_USER_ID = "workflow.session.user_id" # User ID in session context + WORKFLOW_SESSION_STATE_KEYS = "workflow.session.state_keys" # Keys in session state + WORKFLOW_SESSION_STATE_SIZE = "workflow.session.state_size" # Size of session state + WORKFLOW_SESSION_STORAGE_TYPE = "workflow.session.storage_type" # Storage type for session + WORKFLOW_SESSION_RETURNED_SESSION_ID = "workflow.session.returned_session_id" # Session ID returned + WORKFLOW_SESSION_CREATED_AT = "workflow.session.created_at" # Session creation timestamp + WORKFLOW_SESSION_UPDATED_AT = "workflow.session.updated_at" # Session update timestamp From f0ca16d4416bc780369a2b98d626234e876ffd79 Mon Sep 17 00:00:00 2001 From: fenilfaldu Date: Wed, 11 Jun 2025 07:10:55 +0530 Subject: [PATCH 04/14] added v2 docs --- .../agno/attributes/metrics.py | 2 - docs/images/external/agno/agno.png | Bin 0 -> 545 bytes docs/mint.json | 1 + docs/v2/examples/agno.mdx | 490 ++++++++++++++++++ docs/v2/examples/examples.mdx | 4 + docs/v2/integrations/agno.mdx | 344 ++++++++++++ docs/v2/introduction.mdx | 1 + .../comprehensive_agno_example.py | 374 +++++++++++++ 8 files changed, 1214 insertions(+), 2 deletions(-) create mode 100644 docs/images/external/agno/agno.png create mode 100644 docs/v2/examples/agno.mdx create mode 100644 docs/v2/integrations/agno.mdx create mode 100644 examples/agno_examples/comprehensive_agno_example.py diff --git a/agentops/instrumentation/agno/attributes/metrics.py b/agentops/instrumentation/agno/attributes/metrics.py index c3d3eaffc..b8d3a9ac1 100644 --- a/agentops/instrumentation/agno/attributes/metrics.py +++ b/agentops/instrumentation/agno/attributes/metrics.py @@ -223,10 +223,8 @@ def get_metrics_attributes( if "reasoning_tokens" in usage_data: attributes[SpanAttributes.LLM_USAGE_REASONING_TOKENS] = usage_data["reasoning_tokens"] - # Also keep the nested format and individual gen_ai.usage.* attributes for compatibility # But only if we have any usage data if usage_data: - attributes["gen_ai.usage"] = usage_data for key, value in usage_data.items(): attributes[f"gen_ai.usage.{key}"] = value diff --git a/docs/images/external/agno/agno.png b/docs/images/external/agno/agno.png new file mode 100644 index 0000000000000000000000000000000000000000..5251e720d6c5020b59d0ae6b62289b6064fbf353 GIT binary patch literal 545 zcmV++0^a?JP)!GlNpn06XaD4u+5K5L^=T8Vwxu=QqRtjR>x^B7>3bfI0}2*VdqL{YXgKv~ke2 zvkZ^!Ah_1<3_@Z9>L3ONhHc9bY!+4q*N}mB(9Ts1zkegRo)H7>pbu{tPVYx><<%H; z%?H#$m`U3|ff!A+bkMP#3|~GXxOUzQ{6YjZ;cgGft1)ygCv5A)3Wnpmu}l5>$*^ZF zLqHOg%gV;!63no6Hn|RBWn&O0qCL;Sg->?NQiOv*Njs9DgJ|3@yL*-4&J_e#S&Ko_ zXh0oAdeX)nWv`wx^b`}e@5TiJ3U;ny`1%>a{rLmGa>mg?PXb`nL4?4lg9w392N43J j4k82wor3@Y009603$kf%d|Yh>00000NkvXXu0mjfKL+U` literal 0 HcmV?d00001 diff --git a/docs/mint.json b/docs/mint.json index 1c45eef72..51a8b00a4 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -168,6 +168,7 @@ "group": "Integrations", "pages": [ "v2/integrations/ag2", + "v2/integrations/agno", "v2/integrations/anthropic", "v2/integrations/autogen", "v2/integrations/crewai", diff --git a/docs/v2/examples/agno.mdx b/docs/v2/examples/agno.mdx new file mode 100644 index 000000000..cfee0e312 --- /dev/null +++ b/docs/v2/examples/agno.mdx @@ -0,0 +1,490 @@ +--- +title: Agno +description: "Comprehensive examples of using Agno with AgentOps instrumentation" +--- + +This guide provides complete examples of using [Agno](https://docs.agno.com) with AgentOps instrumentation. You'll learn how to create agents, coordinate teams, build workflows, and leverage advanced features like RAG and async operations. + +## Prerequisites + +Before running these examples, ensure you have: + +1. **API Keys**: AgentOps and OpenAI API keys +2. **Environment Setup**: Python environment with required packages +3. **Dependencies**: Install Agno and AgentOps + + + ```bash pip + pip install agentops agno + ``` + ```bash poetry + poetry add agentops agno + ``` + ```bash uv + uv add agentops agno + ``` + + +## Environment Configuration + +Create a `.env` file with your API keys: + +```env +AGENTOPS_API_KEY="your_agentops_api_key_here" +OPENAI_API_KEY="your_openai_api_key_here" +``` + +## Complete Example Structure + +Here's the foundation for all our examples: + +```python +""" +Comprehensive Agno Example with AgentOps Instrumentation + +This example demonstrates key Agno features: +1. Basic Agents and Teams +2. Tool Integration with RAG +3. Workflows with Caching +4. Collaborative Research Teams +5. Async Operations +""" + +import os +import asyncio +from typing import Iterator +from textwrap import dedent +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + +# Import Agno components +from agno.agent import Agent, RunResponse +from agno.team import Team +from agno.models.openai import OpenAIChat +from agno.workflow import Workflow +from agno.tools.reasoning import ReasoningTools +from agno.tools.googlesearch import GoogleSearchTools +from agno.tools.hackernews import HackerNewsTools +from agno.tools.arxiv import ArxivTools +from agno.tools.duckduckgo import DuckDuckGoTools +from agno.knowledge.url import UrlKnowledge +from agno.utils.pprint import pprint_run_response +from agno.utils.log import logger + +# Initialize AgentOps +import agentops +agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) + +# Configuration +MODEL_NAME = "gpt-4o-mini" + +def check_environment(): + """Check if required environment variables are set.""" + required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] + missing_vars = [var for var in required_vars if not os.getenv(var)] + + if missing_vars: + print(f"Missing required environment variables: {missing_vars}") + print("Please set these in your .env file or environment") + return False + + print("Environment variables checked successfully") + return True +``` + +## 1. Basic Agents and Teams + +### Creating and Running Individual Agents + +```python +def demonstrate_basic_agents(): + """Demonstrate basic agent creation and team coordination.""" + print("\n" + "=" * 60) + print("BASIC AGENTS AND TEAMS") + print("=" * 60) + + try: + # Create individual agents with specific roles + news_agent = Agent( + name="News Agent", + role="Get the latest news", + model=OpenAIChat(id=MODEL_NAME) + ) + + weather_agent = Agent( + name="Weather Agent", + role="Get the weather for the next 7 days", + model=OpenAIChat(id=MODEL_NAME) + ) + + # Create a team with coordination mode + team = Team( + name="News and Weather Team", + mode="coordinate", # Agents work in sequence + members=[news_agent, weather_agent] + ) + + # Run team task - AgentOps will track all interactions + response = team.run("What is the weather in Tokyo?") + print(f"Team Response: {response.content}") + + except Exception as e: + print(f"Basic agents error: {e}") + +# Run the example +demonstrate_basic_agents() +``` + +/v2/introduction +## 2. Tool Integration with RAG + +### Knowledge Base and Advanced Tools + +```python +def demonstrate_tool_integration(): + """Demonstrate tool integration with RAG and knowledge base.""" + print("\n" + "=" * 60) + print("TOOL INTEGRATION WITH RAG") + print("=" * 60) + + try: + # Create knowledge base with vector database + knowledge_base = UrlKnowledge( + urls=["https://docs.agno.com/introduction/agents.md"], + # Use LanceDB for vector storage + vector_db=LanceDb( + uri="tmp/lancedb", + table_name="agno_docs", + search_type=SearchType.hybrid, + embedder=CohereEmbedder(id="embed-v4.0"), + reranker=CohereReranker(model="rerank-v3.5"), + ), + ) + + # Create agent with knowledge and reasoning tools + agent = Agent( + model=OpenAIChat(id=MODEL_NAME), + # Agentic RAG is enabled with knowledge + knowledge=knowledge_base, + search_knowledge=True, # Enable on-demand search + tools=[ReasoningTools(add_instructions=True)], + instructions=[ + "Include sources in your response.", + "Always search your knowledge before answering the question.", + "Only include the output in your response. No other text.", + ], + markdown=True, + ) + + print("Running RAG agent with knowledge base...") + agent.print_response( + "What are Agents?", + show_full_reasoning=True, + ) + + except Exception as e: + print(f"Tool integration error: {e}") + +# Run the example +demonstrate_tool_integration() +``` + +## 3. Workflows with Caching + +### State Management and Optimization + +```python +class CacheWorkflow(Workflow): + """A workflow that demonstrates caching capabilities.""" + + description: str = "A workflow that caches previous outputs" + agent = Agent(model=OpenAIChat(id=MODEL_NAME)) + + def run(self, message: str) -> Iterator[RunResponse]: + logger.info(f"Checking cache for '{message}'") + + # Check if the output is already cached + if self.session_state.get(message): + logger.info(f"Cache hit for '{message}'") + yield RunResponse( + run_id=self.run_id, + content=self.session_state.get(message) + ) + return + + logger.info(f"Cache miss for '{message}'") + + # Run the agent and yield the response + yield from self.agent.run(message, stream=True) + + # Cache the output after response is yielded + self.session_state[message] = self.agent.run_response.content + +def demonstrate_workflows(): + """Demonstrate workflow capabilities with caching.""" + print("\n" + "=" * 60) + print("WORKFLOWS WITH CACHING") + print("=" * 60) + + try: + workflow = CacheWorkflow() + + print("First run (cache miss):") + # This takes ~1s as it generates new content + response = workflow.run(message="Tell me a joke.") + pprint_run_response(response, markdown=True, show_time=True) + + print("\nSecond run (cache hit):") + # This is immediate due to caching + response = workflow.run(message="Tell me a joke.") + pprint_run_response(response, markdown=True, show_time=True) + + except Exception as e: + print(f"Workflow error: {e}") + +# Run the example +demonstrate_workflows() +``` + + +## 4. Collaborative Research Teams + +### Multi-Agent Coordination + +```python +def demonstrate_research_team(): + """Demonstrate collaborative research team with multiple specialized agents.""" + print("\n" + "=" * 60) + print("COLLABORATIVE RESEARCH TEAM") + print("=" * 60) + + try: + # Create specialized research agents + reddit_researcher = Agent( + name="Reddit Researcher", + role="Research a topic on Reddit", + model=OpenAIChat(id="gpt-4o"), + tools=[GoogleSearchTools()], + add_name_to_instructions=True, + instructions=dedent(""" + You are a Reddit researcher. + You will be given a topic to research on Reddit. + You will need to find the most relevant posts on Reddit. + """), + ) + + hackernews_researcher = Agent( + name="HackerNews Researcher", + model=OpenAIChat("gpt-4o"), + role="Research a topic on HackerNews.", + tools=[HackerNewsTools()], + add_name_to_instructions=True, + instructions=dedent(""" + You are a HackerNews researcher. + You will be given a topic to research on HackerNews. + You will need to find the most relevant posts on HackerNews. + """), + ) + + academic_paper_researcher = Agent( + name="Academic Paper Researcher", + model=OpenAIChat("gpt-4o"), + role="Research academic papers and scholarly content", + tools=[GoogleSearchTools(), ArxivTools()], + add_name_to_instructions=True, + instructions=dedent(""" + You are an academic paper researcher. + You will be given a topic to research in academic literature. + You will need to find relevant scholarly articles, papers, and academic discussions. + Focus on peer-reviewed content and citations from reputable sources. + Provide brief summaries of key findings and methodologies. + """), + ) + + twitter_researcher = Agent( + name="Twitter Researcher", + model=OpenAIChat("gpt-4o"), + role="Research trending discussions and real-time updates", + tools=[DuckDuckGoTools()], + add_name_to_instructions=True, + instructions=dedent(""" + You are a Twitter/X researcher. + You will be given a topic to research on Twitter/X. + You will need to find trending discussions, influential voices, and real-time updates. + Focus on verified accounts and credible sources when possible. + Track relevant hashtags and ongoing conversations. + """), + ) + + # Create collaborative team + agent_team = Team( + name="Discussion Team", + mode="collaborate", # Agents work together + model=OpenAIChat("gpt-4o"), + members=[ + reddit_researcher, + hackernews_researcher, + academic_paper_researcher, + twitter_researcher, + ], + instructions=[ + "You are a discussion master.", + "You have to stop the discussion when you think the team has reached a consensus.", + ], + success_criteria="The team has reached a consensus.", + enable_agentic_context=True, + add_context=True, + show_tool_calls=True, + markdown=True, + debug_mode=True, + show_members_responses=True, + ) + + print("Running collaborative research team...") + agent_team.print_response( + message="Start the discussion on the topic: 'What is the best way to learn to code?'", + stream=True, + stream_intermediate_steps=True, + ) + + except Exception as e: + print(f"Research team error: {e}") + +# Run the example +demonstrate_research_team() +``` + +## 5. Async Operations + +### Concurrent Agent Execution + +```python +async def demonstrate_async_operations(): + """Demonstrate async operations with Agno agents.""" + print("\n" + "=" * 60) + print("ASYNC OPERATIONS") + print("=" * 60) + + try: + # Create agent for async operations + agent = Agent(model=OpenAIChat(id=MODEL_NAME)) + + # Define async tasks + async def task1(): + response = await agent.arun("Explain Python in one paragraph") + return f"Task 1: {response.content}" + + async def task2(): + response = await agent.arun("Explain JavaScript in one paragraph") + return f"Task 2: {response.content}" + + async def task3(): + response = await agent.arun("Compare them briefly") + return f"Task 3: {response.content}" + + # Run tasks concurrently for better performance + print("Running async tasks concurrently...") + results = await asyncio.gather(task1(), task2(), task3()) + + for result in results: + print(result) + print() + + except Exception as e: + print(f"Async operations error: {e}") + +# Run async example +asyncio.run(demonstrate_async_operations()) +``` + +## Complete Running Example + +Here's how to run all examples together: + +```python +async def main(): + """Main function to run all Agno demonstrations.""" + print("Starting Comprehensive Agno Example with AgentOps") + print("=" * 80) + + # Check environment first + if not check_environment(): + return + + print("\nRunning all Agno demonstrations...") + + # Run all demonstrations with error handling + demos = [ + ("Basic Agents", demonstrate_basic_agents), + ("Tool Integration", demonstrate_tool_integration), + ("Workflows", demonstrate_workflows), + ("Research Team", demonstrate_research_team), + ] + + for name, demo_func in demos: + try: + demo_func() + except Exception as e: + print(f"Skipping {name} demo due to: {e}") + + # Run async demo + try: + await demonstrate_async_operations() + except Exception as e: + print(f"Skipping async demo due to: {e}") + + print("\nAll Agno demonstrations completed!") + print("Check your AgentOps dashboard for detailed traces and metrics.") + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Monitoring and Analytics + +### AgentOps Dashboard Features + +After running these examples, visit your [AgentOps Dashboard](https://app.agentops.ai/) to see: + +**Agent Analytics:** +- Individual agent performance metrics +- Token usage and costs per agent +- Response times and success rates +- Agent interaction patterns + +**Team Coordination:** +- Multi-agent collaboration flows +- Team decision-making processes +- Communication patterns between agents +- Task distribution and load balancing + +**Workflow Optimization:** +- Cache efficiency metrics +- Workflow state transitions +- Performance bottlenecks +- Resource utilization + +**Tool Usage:** +- Tool execution frequency and success rates +- RAG query performance +- Knowledge base retrieval accuracy +- Custom tool effectiveness + +## Best Practices from Examples + +1. **Environment Management**: Always validate API keys before running agents +2. **Error Handling**: Wrap each demo in try-catch for robust execution +3. **Resource Optimization**: Use caching workflows for repeated operations +4. **Team Design**: Create specialized agents with clear, distinct roles +5. **Async Operations**: Leverage concurrent execution for improved performance +6. **Monitoring**: Use AgentOps dashboard to optimize agent performance + + +## Next Steps + +- Explore the [Agno Integration Guide](/v2/integrations/agno) for more advanced configurations +- Check out additional [AgentOps Examples](/examples) for other frameworks +- Visit the [AgentOps Dashboard](https://app.agentops.ai/) to monitor your agents +- Join the [AgentOps Community](https://discord.gg/agentops) for support and discussions \ No newline at end of file diff --git a/docs/v2/examples/examples.mdx b/docs/v2/examples/examples.mdx index bdf9c2b82..9021e70f6 100644 --- a/docs/v2/examples/examples.mdx +++ b/docs/v2/examples/examples.mdx @@ -44,6 +44,10 @@ description: 'Examples of AgentOps with various integrations' Multi-agent conversations with memory capabilities + + Modern AI agent framework with teams, workflows, and tool integration + + } iconType="image" href="/v2/examples/autogen"> AG2 multi-agent workflow demonstration diff --git a/docs/v2/integrations/agno.mdx b/docs/v2/integrations/agno.mdx new file mode 100644 index 000000000..1fa4db133 --- /dev/null +++ b/docs/v2/integrations/agno.mdx @@ -0,0 +1,344 @@ +--- +title: Agno +description: "Track your Agno agents, teams, and workflows with AgentOps" +--- + +[Agno](https://docs.agno.com) is a modern AI agent framework that provides tools for building intelligent agents, teams, and workflows. AgentOps automatically tracks all Agno operations including agent interactions, team coordination, tool usage, and workflow execution. + +## Installation + +Install AgentOps and Agno: + + + ```bash pip + pip install agentops agno + ``` + ```bash poetry + poetry add agentops agno + ``` + ```bash uv + uv add agentops agno + ``` + + +## Setting Up API Keys + +You'll need API keys for AgentOps and your chosen LLM provider: +- **AGENTOPS_API_KEY**: From your [AgentOps Dashboard](https://app.agentops.ai/) +- **OPENAI_API_KEY**: From the [OpenAI Platform](https://platform.openai.com/api-keys) (if using OpenAI) +- **ANTHROPIC_API_KEY**: From [Anthropic Console](https://console.anthropic.com/) (if using Claude) + +Set these as environment variables or in a `.env` file. + + + ```bash Export to CLI + export AGENTOPS_API_KEY="your_agentops_api_key_here" + export OPENAI_API_KEY="your_openai_api_key_here" + export ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Optional + ``` + ```txt Set in .env file + AGENTOPS_API_KEY="your_agentops_api_key_here" + OPENAI_API_KEY="your_openai_api_key_here" + ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Optional + ``` + + +Then load them in your Python code: +```python +from dotenv import load_dotenv +import os + +load_dotenv() +``` + +## Quick Start + +```python +import os +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + + +from agno.agent import Agent +from agno.team import Team +from agno.models.openai import OpenAIChat + +# Initialize AgentOps +import agentops +agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) + +# Create and run an agent +agent = Agent( + name="Assistant", + role="Helpful AI assistant", + model=OpenAIChat(id="gpt-4o-mini") +) + +response = agent.run("What are the key benefits of AI agents?") +print(response.content) +``` + +## Features + +### 1. Basic Agents and Teams + +Create individual agents and coordinate them in teams: + +```python +import agentops +from agno.agent import Agent +from agno.team import Team +from agno.models.openai import OpenAIChat + +agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) + +# Create individual agents +news_agent = Agent( + name="News Agent", + role="Get the latest news", + model=OpenAIChat(id="gpt-4o-mini") +) + +weather_agent = Agent( + name="Weather Agent", + role="Get the weather for the next 7 days", + model=OpenAIChat(id="gpt-4o-mini") +) + +# Create a team with coordination mode +team = Team( + name="News and Weather Team", + mode="coordinate", + members=[news_agent, weather_agent] +) + +# Run team task +response = team.run("What is the weather in Tokyo?") +print(response.content) +``` + +### 2. Tool Integration with RAG + +Integrate custom tools and knowledge bases for advanced capabilities: + +```python +from agno.tools.reasoning import ReasoningTools +from agno.knowledge.url import UrlKnowledge + + +# Create knowledge base with vector database +knowledge_base = UrlKnowledge( + urls=["https://docs.agno.com/introduction/agents.md"], + vector_db=LanceDb( + uri="tmp/lancedb", + table_name="agno_docs", + search_type=SearchType.hybrid, + embedder=CohereEmbedder(id="embed-v4.0"), + reranker=CohereReranker(model="rerank-v3.5"), + ), +) + +# Create agent with knowledge and reasoning tools +agent = Agent( + model=OpenAIChat(id="gpt-4o-mini"), + knowledge=knowledge_base, + search_knowledge=True, + tools=[ReasoningTools(add_instructions=True)], + instructions=[ + "Include sources in your response.", + "Always search your knowledge before answering the question.", + "Only include the output in your response. No other text.", + ], + markdown=True, +) + +agent.print_response( + "What are Agents?", + show_full_reasoning=True, +) +``` + +### 3. Workflows with Caching + +Create workflows that maintain state and cache results: + +```python +from agno.workflow import Workflow +from agno.agent import Agent, RunResponse +from typing import Iterator + +class CacheWorkflow(Workflow): + """A workflow that demonstrates caching capabilities.""" + + description: str = "A workflow that caches previous outputs" + agent = Agent(model=OpenAIChat(id="gpt-4o-mini")) + + def run(self, message: str) -> Iterator[RunResponse]: + # Check if the output is already cached + if self.session_state.get(message): + yield RunResponse(run_id=self.run_id, content=self.session_state.get(message)) + return + + # Run the agent and yield the response + yield from self.agent.run(message, stream=True) + + # Cache the output after response is yielded + self.session_state[message] = self.agent.run_response.content + +# Use the workflow +workflow = CacheWorkflow() +response = workflow.run(message="Tell me a joke.") +``` + +### 4. Collaborative Research Teams + +Create multi-agent teams with specialized roles: + +```python +from agno.tools.googlesearch import GoogleSearchTools +from agno.tools.hackernews import HackerNewsTools +from agno.tools.arxiv import ArxivTools +from agno.tools.duckduckgo import DuckDuckGoTools +from textwrap import dedent + +# Create specialized research agents +reddit_researcher = Agent( + name="Reddit Researcher", + role="Research a topic on Reddit", + model=OpenAIChat(id="gpt-4o"), + tools=[GoogleSearchTools()], + add_name_to_instructions=True, + instructions=dedent(""" + You are a Reddit researcher. + You will be given a topic to research on Reddit. + You will need to find the most relevant posts on Reddit. + """), +) + +academic_paper_researcher = Agent( + name="Academic Paper Researcher", + model=OpenAIChat("gpt-4o"), + role="Research academic papers and scholarly content", + tools=[GoogleSearchTools(), ArxivTools()], + add_name_to_instructions=True, + instructions=dedent(""" + You are an academic paper researcher. + You will be given a topic to research in academic literature. + Focus on peer-reviewed content and citations from reputable sources. + """), +) + +# Create collaborative team +agent_team = Team( + name="Discussion Team", + mode="collaborate", + model=OpenAIChat("gpt-4o"), + members=[reddit_researcher, academic_paper_researcher], + instructions=[ + "You are a discussion master.", + "You have to stop the discussion when you think the team has reached a consensus.", + ], + success_criteria="The team has reached a consensus.", + enable_agentic_context=True, + add_context=True, + show_tool_calls=True, + markdown=True, + debug_mode=True, + show_members_responses=True, +) + +agent_team.print_response( + message="Start the discussion on the topic: 'What is the best way to learn to code?'", + stream=True, + stream_intermediate_steps=True, +) +``` + +### 5. Async Operations + +Run multiple agents concurrently for improved performance: + +```python +import asyncio + +async def demonstrate_async_operations(): + agent = Agent(model=OpenAIChat(id="gpt-4o-mini")) + + # Define async tasks + async def task1(): + response = await agent.arun("Explain Python in one paragraph") + return f"Task 1: {response.content}" + + async def task2(): + response = await agent.arun("Explain JavaScript in one paragraph") + return f"Task 2: {response.content}" + + async def task3(): + response = await agent.arun("Compare them briefly") + return f"Task 3: {response.content}" + + # Run tasks concurrently + results = await asyncio.gather(task1(), task2(), task3()) + + for result in results: + print(result) + +# Run async operations +asyncio.run(demonstrate_async_operations()) +``` + +## What Gets Tracked + +AgentOps automatically tracks: + +### Agent Operations +- **Agent Creation**: Names, roles, and model configurations +- **Agent Execution**: Input prompts, responses, and timing +- **Agent Metrics**: Token usage, costs, and performance + +### Team Coordination +- **Team Formation**: Member agents and coordination modes +- **Team Execution**: Task distribution and collaboration patterns +- **Team Results**: Aggregated responses and decision flows + +### Tool Usage +- **Tool Calls**: Function executions and parameters +- **Tool Results**: Return values and success/failure status +- **Tool Performance**: Execution times and resource usage + +### Workflow Management +- **Workflow States**: Session management and caching +- **Workflow Execution**: Step-by-step progression tracking +- **Workflow Optimization**: Cache hits/misses and performance metrics + +### Knowledge Base Operations +- **RAG Queries**: Knowledge searches and retrievals +- **Vector Operations**: Embedding generations and similarity searches +- **Knowledge Usage**: Source citations and relevance scoring + + + +## Dashboard and Monitoring + +Once your Agno agents are running with AgentOps, you can monitor them in the [AgentOps Dashboard](https://app.agentops.ai/): + +- **Real-time Monitoring**: Live agent status and performance +- **Execution Traces**: Detailed logs of agent interactions +- **Performance Analytics**: Token usage, costs, and timing metrics +- **Team Collaboration**: Visual representation of multi-agent workflows +- **Error Tracking**: Comprehensive error logs and debugging information + +## Best Practices + +1. **Initialize AgentOps Early**: Always call `agentops.init()` before creating Agno components +2. **Environment Management**: Use `.env` files for secure API key management +3. **Error Handling**: Wrap agent operations in try-catch blocks for robust applications +4. **Resource Management**: Monitor token usage and costs in the AgentOps dashboard +5. **Team Design**: Design specialized agents with clear roles for better collaboration +6. **Workflow Optimization**: Use caching workflows for repeated operations + +**Workflow State Issues**: Check that workflow sessions are properly managed and cache keys are unique. + +For more detailed examples and advanced usage patterns, check out our [Agno Examples](/v2/examples/agno). \ No newline at end of file diff --git a/docs/v2/introduction.mdx b/docs/v2/introduction.mdx index 2a46e7a03..a0d337276 100644 --- a/docs/v2/introduction.mdx +++ b/docs/v2/introduction.mdx @@ -29,6 +29,7 @@ description: "AgentOps is the developer favorite platform for testing, debugging } iconType="image" href="/v2/integrations/ag2" /> + } iconType="image" href="/v2/integrations/agno" /> } iconType="image" href="/v2/integrations/autogen" /> } iconType="image" href="/v2/integrations/crewai" /> } iconType="image" href="/v2/integrations/google_adk" /> diff --git a/examples/agno_examples/comprehensive_agno_example.py b/examples/agno_examples/comprehensive_agno_example.py new file mode 100644 index 000000000..367168dd8 --- /dev/null +++ b/examples/agno_examples/comprehensive_agno_example.py @@ -0,0 +1,374 @@ +""" +Comprehensive Agno Example with AgentOps Instrumentation + +This example demonstrates key Agno features: +1. Basic Agents and Teams +2. Tool Integration and RAG +3. Workflows with Caching +4. Collaborative Research Teams +5. Async Operations + +Each section shows different Agno capabilities with AgentOps tracking. +""" + +import os +import asyncio +from typing import Iterator +from textwrap import dedent +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + +# Now import agno to trigger instrumentation, then import specific classes +import agno # This triggers AgentOps instrumentation +from agno.agent import Agent, RunResponse +from agno.team import Team +from agno.models.openai import OpenAIChat +from agno.workflow import Workflow +from agno.tools import tool +from agno.tools.duckduckgo import DuckDuckGoTools +from agno.tools.hackernews import HackerNewsTools +from agno.tools.reasoning import ReasoningTools +from agno.tools.arxiv import ArxivTools +from agno.tools.googlesearch import GoogleSearchTools +from agno.knowledge.url import UrlKnowledge +from agno.utils.pprint import pprint_run_response +from agno.utils.log import logger + +import agentops +agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) +# Sample configuration +MODEL_ID = "gpt-4o-mini" + + +def check_environment(): + """Check if required environment variables are set.""" + required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] + missing_vars = [var for var in required_vars if not os.getenv(var)] + + if missing_vars: + print(f"Missing required environment variables: {missing_vars}") + print("Please set these in your .env file or environment") + return False + + print("Environment variables checked successfully") + return True + + +def demonstrate_basic_agents(): + """Demonstrate basic agent creation and team coordination.""" + print("\n" + "=" * 60) + print("BASIC AGENTS AND TEAMS") + print("=" * 60) + + try: + # Create individual agents + news_agent = Agent( + name="News Agent", + role="Get the latest news", + model=OpenAIChat(id=MODEL_ID) + ) + + weather_agent = Agent( + name="Weather Agent", + role="Get the weather for the next 7 days", + model=OpenAIChat(id=MODEL_ID) + ) + + # Create a team with coordination mode + team = Team( + name="News and Weather Team", + mode="coordinate", + members=[news_agent, weather_agent] + ) + + # Run team task + response = team.run("What is the weather in Tokyo?") + print(f"Team Response: {response.content}") + + except Exception as e: + print(f"Basic agents error: {e}") + + +def demonstrate_tool_integration(): + """Demonstrate tool integration with RAG and knowledge base.""" + print("\n" + "=" * 60) + print("TOOL INTEGRATION WITH RAG") + print("=" * 60) + + try: + # Create knowledge base with vector database + knowledge_base = UrlKnowledge( + urls=["https://docs.agno.com/introduction/agents.md"], + # Use LanceDB as the vector database, store embeddings in the `agno_docs` table + vector_db=LanceDb( + uri="tmp/lancedb", + table_name="agno_docs", + search_type=SearchType.hybrid, + embedder=CohereEmbedder(id="embed-v4.0"), + reranker=CohereReranker(model="rerank-v3.5"), + ), + ) + + # Create agent with knowledge and reasoning tools + agent = Agent( + model=OpenAIChat(id=MODEL_ID), + # Agentic RAG is enabled by default when `knowledge` is provided to the Agent. + knowledge=knowledge_base, + # search_knowledge=True gives the Agent the ability to search on demand + search_knowledge=True, + tools=[ReasoningTools(add_instructions=True)], + instructions=[ + "Include sources in your response.", + "Always search your knowledge before answering the question.", + "Only include the output in your response. No other text.", + ], + markdown=True, + ) + + print("Running RAG agent with knowledge base...") + agent.print_response( + "What are Agents?", + show_full_reasoning=True, + ) + + except Exception as e: + print(f"Tool integration error: {e}") + + +class CacheWorkflow(Workflow): + """A workflow that demonstrates caching capabilities.""" + + # Purely descriptive, not used by the workflow + description: str = "A workflow that caches previous outputs" + + # Add agents or teams as attributes on the workflow + agent = Agent(model=OpenAIChat(id=MODEL_ID)) + + # Write the logic in the `run()` method + def run(self, message: str) -> Iterator[RunResponse]: + logger.info(f"Checking cache for '{message}'") + # Check if the output is already cached + if self.session_state.get(message): + logger.info(f"Cache hit for '{message}'") + yield RunResponse(run_id=self.run_id, content=self.session_state.get(message)) + return + + logger.info(f"Cache miss for '{message}'") + # Run the agent and yield the response + yield from self.agent.run(message, stream=True) + + # Cache the output after response is yielded + self.session_state[message] = self.agent.run_response.content + + +def demonstrate_workflows(): + """Demonstrate workflow capabilities with caching.""" + print("\n" + "=" * 60) + print("WORKFLOWS WITH CACHING") + print("=" * 60) + + try: + workflow = CacheWorkflow() + + print("First run (cache miss):") + # Run workflow (this takes ~1s) + response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") + # Print the response + pprint_run_response(response, markdown=True, show_time=True) + + print("\nSecond run (cache hit):") + # Run workflow again (this is immediate because of caching) + response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") + # Print the response + pprint_run_response(response, markdown=True, show_time=True) + + except Exception as e: + print(f"Workflow error: {e}") + + +def demonstrate_research_team(): + """Demonstrate collaborative research team with multiple specialized agents.""" + print("\n" + "=" * 60) + print("COLLABORATIVE RESEARCH TEAM") + print("=" * 60) + + try: + # Create specialized research agents + reddit_researcher = Agent( + name="Reddit Researcher", + role="Research a topic on Reddit", + model=OpenAIChat(id="gpt-4o"), + tools=[GoogleSearchTools()], + add_name_to_instructions=True, + instructions=dedent(""" + You are a Reddit researcher. + You will be given a topic to research on Reddit. + You will need to find the most relevant posts on Reddit. + """), + ) + + hackernews_researcher = Agent( + name="HackerNews Researcher", + model=OpenAIChat("gpt-4o"), + role="Research a topic on HackerNews.", + tools=[HackerNewsTools()], + add_name_to_instructions=True, + instructions=dedent(""" + You are a HackerNews researcher. + You will be given a topic to research on HackerNews. + You will need to find the most relevant posts on HackerNews. + """), + ) + + academic_paper_researcher = Agent( + name="Academic Paper Researcher", + model=OpenAIChat("gpt-4o"), + role="Research academic papers and scholarly content", + tools=[GoogleSearchTools(), ArxivTools()], + add_name_to_instructions=True, + instructions=dedent(""" + You are an academic paper researcher. + You will be given a topic to research in academic literature. + You will need to find relevant scholarly articles, papers, and academic discussions. + Focus on peer-reviewed content and citations from reputable sources. + Provide brief summaries of key findings and methodologies. + """), + ) + + twitter_researcher = Agent( + name="Twitter Researcher", + model=OpenAIChat("gpt-4o"), + role="Research trending discussions and real-time updates", + tools=[DuckDuckGoTools()], + add_name_to_instructions=True, + instructions=dedent(""" + You are a Twitter/X researcher. + You will be given a topic to research on Twitter/X. + You will need to find trending discussions, influential voices, and real-time updates. + Focus on verified accounts and credible sources when possible. + Track relevant hashtags and ongoing conversations. + """), + ) + + # Create collaborative team + agent_team = Team( + name="Discussion Team", + mode="collaborate", + model=OpenAIChat("gpt-4o"), + members=[ + reddit_researcher, + hackernews_researcher, + academic_paper_researcher, + twitter_researcher, + ], + instructions=[ + "You are a discussion master.", + "You have to stop the discussion when you think the team has reached a consensus.", + ], + success_criteria="The team has reached a consensus.", + enable_agentic_context=True, + add_context=True, + show_tool_calls=True, + markdown=True, + debug_mode=True, + show_members_responses=True, + ) + + print("Running collaborative research team...") + agent_team.print_response( + message="Start the discussion on the topic: 'What is the best way to learn to code?'", + stream=True, + stream_intermediate_steps=True, + ) + + except Exception as e: + print(f"Research team error: {e}") + + +async def demonstrate_async_operations(): + """Demonstrate async operations with Agno agents.""" + print("\n" + "=" * 60) + print("ASYNC OPERATIONS") + print("=" * 60) + + try: + # Create async tasks with different agents + agent = Agent(model=OpenAIChat(id=MODEL_ID)) + + # Define async tasks + async def task1(): + response = await agent.arun("Explain Python in one paragraph") + return f"Task 1: {response.content}" + + async def task2(): + response = await agent.arun("Explain JavaScript in one paragraph") + return f"Task 2: {response.content}" + + async def task3(): + response = await agent.arun("Compare them briefly") + return f"Task 3: {response.content}" + + # Run tasks concurrently + print("Running async tasks concurrently...") + results = await asyncio.gather(task1(), task2(), task3()) + + for result in results: + print(result) + print() + + except Exception as e: + print(f"Async operations error: {e}") + + +async def main(): + """Main function to run all Agno demonstrations.""" + print("Starting Comprehensive Agno Example with AgentOps") + print("=" * 80) + + # Check environment + if not check_environment(): + return + + + + # Run all demonstrations + print("\nRunning all Agno demonstrations...") + + + # Research teams + try: + demonstrate_research_team() + except Exception as e: + print(f"Skipping research team demo due to: {e}") + + # Basic functionality + demonstrate_basic_agents() + + # Tool integration + try: + demonstrate_tool_integration() + except Exception as e: + print(f"Skipping tool integration demo due to: {e}") + + # Workflows + try: + demonstrate_workflows() + except Exception as e: + print(f"Skipping workflow demo due to: {e}") + + + + # Async operations + try: + await demonstrate_async_operations() + except Exception as e: + print(f"Skipping async demo due to: {e}") + + print("\nAll Agno demonstrations completed!") + print("Check your AgentOps dashboard for detailed traces and metrics.") + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file From b9ff5292a531436c834dc4a7f949994299c3b912 Mon Sep 17 00:00:00 2001 From: fenilfaldu Date: Wed, 11 Jun 2025 07:21:19 +0530 Subject: [PATCH 05/14] ruff checks --- .../comprehensive_agno_example.py | 128 +++++++++--------- 1 file changed, 62 insertions(+), 66 deletions(-) diff --git a/examples/agno_examples/comprehensive_agno_example.py b/examples/agno_examples/comprehensive_agno_example.py index 367168dd8..7a6519971 100644 --- a/examples/agno_examples/comprehensive_agno_example.py +++ b/examples/agno_examples/comprehensive_agno_example.py @@ -21,22 +21,25 @@ load_dotenv() # Now import agno to trigger instrumentation, then import specific classes -import agno # This triggers AgentOps instrumentation -from agno.agent import Agent, RunResponse -from agno.team import Team -from agno.models.openai import OpenAIChat -from agno.workflow import Workflow -from agno.tools import tool -from agno.tools.duckduckgo import DuckDuckGoTools -from agno.tools.hackernews import HackerNewsTools -from agno.tools.reasoning import ReasoningTools -from agno.tools.arxiv import ArxivTools -from agno.tools.googlesearch import GoogleSearchTools -from agno.knowledge.url import UrlKnowledge -from agno.utils.pprint import pprint_run_response -from agno.utils.log import logger - -import agentops +from agno.agent import Agent, RunResponse # noqa: E402 +from agno.team import Team # noqa: E402 +from agno.models.openai import OpenAIChat # noqa: E402 +from agno.workflow import Workflow # noqa: E402 +from agno.tools.duckduckgo import DuckDuckGoTools # noqa: E402 +from agno.tools.hackernews import HackerNewsTools # noqa: E402 +from agno.tools.reasoning import ReasoningTools # noqa: E402 +from agno.tools.arxiv import ArxivTools # noqa: E402 +from agno.tools.googlesearch import GoogleSearchTools # noqa: E402 +from agno.knowledge.url import UrlKnowledge # noqa: E402 +from agno.utils.pprint import pprint_run_response # noqa: E402 +from agno.utils.log import logger # noqa: E402 +from agno.vectordb.lancedb import LanceDb # noqa: E402 +from agno.vectordb.search import SearchType # noqa: E402 +from agno.embedder.cohere import CohereEmbedder # noqa: E402 +from agno.reranker.cohere import CohereReranker # noqa: E402 + +import agentops # noqa: E402 + agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) # Sample configuration MODEL_ID = "gpt-4o-mini" @@ -46,12 +49,12 @@ def check_environment(): """Check if required environment variables are set.""" required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] missing_vars = [var for var in required_vars if not os.getenv(var)] - + if missing_vars: print(f"Missing required environment variables: {missing_vars}") print("Please set these in your .env file or environment") return False - + print("Environment variables checked successfully") return True @@ -64,29 +67,19 @@ def demonstrate_basic_agents(): try: # Create individual agents - news_agent = Agent( - name="News Agent", - role="Get the latest news", - model=OpenAIChat(id=MODEL_ID) - ) + news_agent = Agent(name="News Agent", role="Get the latest news", model=OpenAIChat(id=MODEL_ID)) weather_agent = Agent( - name="Weather Agent", - role="Get the weather for the next 7 days", - model=OpenAIChat(id=MODEL_ID) + name="Weather Agent", role="Get the weather for the next 7 days", model=OpenAIChat(id=MODEL_ID) ) # Create a team with coordination mode - team = Team( - name="News and Weather Team", - mode="coordinate", - members=[news_agent, weather_agent] - ) + team = Team(name="News and Weather Team", mode="coordinate", members=[news_agent, weather_agent]) # Run team task response = team.run("What is the weather in Tokyo?") print(f"Team Response: {response.content}") - + except Exception as e: print(f"Basic agents error: {e}") @@ -106,9 +99,9 @@ def demonstrate_tool_integration(): uri="tmp/lancedb", table_name="agno_docs", search_type=SearchType.hybrid, - embedder=CohereEmbedder(id="embed-v4.0"), - reranker=CohereReranker(model="rerank-v3.5"), - ), + embedder=CohereEmbedder(id="embed-v4.0"), # noqa: E821 + reranker=CohereReranker(model="rerank-v3.5"), # noqa: E821 + ), # noqa: E821 ) # Create agent with knowledge and reasoning tools @@ -126,20 +119,20 @@ def demonstrate_tool_integration(): ], markdown=True, ) - + print("Running RAG agent with knowledge base...") agent.print_response( "What are Agents?", show_full_reasoning=True, ) - + except Exception as e: print(f"Tool integration error: {e}") class CacheWorkflow(Workflow): """A workflow that demonstrates caching capabilities.""" - + # Purely descriptive, not used by the workflow description: str = "A workflow that caches previous outputs" @@ -177,13 +170,13 @@ def demonstrate_workflows(): response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") # Print the response pprint_run_response(response, markdown=True, show_time=True) - + print("\nSecond run (cache hit):") # Run workflow again (this is immediate because of caching) response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") # Print the response pprint_run_response(response, markdown=True, show_time=True) - + except Exception as e: print(f"Workflow error: {e}") @@ -202,11 +195,13 @@ def demonstrate_research_team(): model=OpenAIChat(id="gpt-4o"), tools=[GoogleSearchTools()], add_name_to_instructions=True, - instructions=dedent(""" + instructions=dedent( + """ You are a Reddit researcher. You will be given a topic to research on Reddit. You will need to find the most relevant posts on Reddit. - """), + """ + ), ) hackernews_researcher = Agent( @@ -215,11 +210,13 @@ def demonstrate_research_team(): role="Research a topic on HackerNews.", tools=[HackerNewsTools()], add_name_to_instructions=True, - instructions=dedent(""" + instructions=dedent( + """ You are a HackerNews researcher. You will be given a topic to research on HackerNews. You will need to find the most relevant posts on HackerNews. - """), + """ + ), ) academic_paper_researcher = Agent( @@ -228,13 +225,15 @@ def demonstrate_research_team(): role="Research academic papers and scholarly content", tools=[GoogleSearchTools(), ArxivTools()], add_name_to_instructions=True, - instructions=dedent(""" + instructions=dedent( + """ You are an academic paper researcher. You will be given a topic to research in academic literature. You will need to find relevant scholarly articles, papers, and academic discussions. Focus on peer-reviewed content and citations from reputable sources. Provide brief summaries of key findings and methodologies. - """), + """ + ), ) twitter_researcher = Agent( @@ -243,13 +242,15 @@ def demonstrate_research_team(): role="Research trending discussions and real-time updates", tools=[DuckDuckGoTools()], add_name_to_instructions=True, - instructions=dedent(""" + instructions=dedent( + """ You are a Twitter/X researcher. You will be given a topic to research on Twitter/X. You will need to find trending discussions, influential voices, and real-time updates. Focus on verified accounts and credible sources when possible. Track relevant hashtags and ongoing conversations. - """), + """ + ), ) # Create collaborative team @@ -282,7 +283,7 @@ def demonstrate_research_team(): stream=True, stream_intermediate_steps=True, ) - + except Exception as e: print(f"Research team error: {e}") @@ -296,28 +297,28 @@ async def demonstrate_async_operations(): try: # Create async tasks with different agents agent = Agent(model=OpenAIChat(id=MODEL_ID)) - + # Define async tasks async def task1(): response = await agent.arun("Explain Python in one paragraph") return f"Task 1: {response.content}" - + async def task2(): response = await agent.arun("Explain JavaScript in one paragraph") return f"Task 2: {response.content}" - + async def task3(): response = await agent.arun("Compare them briefly") return f"Task 3: {response.content}" - + # Run tasks concurrently print("Running async tasks concurrently...") results = await asyncio.gather(task1(), task2(), task3()) - + for result in results: print(result) print() - + except Exception as e: print(f"Async operations error: {e}") @@ -326,49 +327,44 @@ async def main(): """Main function to run all Agno demonstrations.""" print("Starting Comprehensive Agno Example with AgentOps") print("=" * 80) - + # Check environment if not check_environment(): return - - # Run all demonstrations print("\nRunning all Agno demonstrations...") - # Research teams try: demonstrate_research_team() except Exception as e: print(f"Skipping research team demo due to: {e}") - + # Basic functionality demonstrate_basic_agents() - + # Tool integration try: demonstrate_tool_integration() except Exception as e: print(f"Skipping tool integration demo due to: {e}") - + # Workflows try: demonstrate_workflows() except Exception as e: print(f"Skipping workflow demo due to: {e}") - - # Async operations try: await demonstrate_async_operations() except Exception as e: print(f"Skipping async demo due to: {e}") - + print("\nAll Agno demonstrations completed!") print("Check your AgentOps dashboard for detailed traces and metrics.") if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file + asyncio.run(main()) From eca4d709e05ee93169b2d1b083466c9d22af5efd Mon Sep 17 00:00:00 2001 From: fenilfaldu Date: Thu, 12 Jun 2025 04:25:33 +0530 Subject: [PATCH 06/14] added docs --- docs/v2/examples/agno.mdx | 340 ++++++------ .../agno/agno_comprehensive_tutorial.ipynb | 524 ++++++++++++++++++ .../agno_comprehensive_tutorial.py} | 5 +- 3 files changed, 682 insertions(+), 187 deletions(-) create mode 100644 examples/agno/agno_comprehensive_tutorial.ipynb rename examples/{agno_examples/comprehensive_agno_example.py => agno/agno_comprehensive_tutorial.py} (98%) diff --git a/docs/v2/examples/agno.mdx b/docs/v2/examples/agno.mdx index cfee0e312..6fab0b164 100644 --- a/docs/v2/examples/agno.mdx +++ b/docs/v2/examples/agno.mdx @@ -1,55 +1,40 @@ --- -title: Agno -description: "Comprehensive examples of using Agno with AgentOps instrumentation" +title: 'Agno' +description: 'Comprehensive Agno Tutorial with AgentOps' --- +{/* SOURCE_FILE: examples/agno/agno_comprehensive_tutorial.ipynb */} -This guide provides complete examples of using [Agno](https://docs.agno.com) with AgentOps instrumentation. You'll learn how to create agents, coordinate teams, build workflows, and leverage advanced features like RAG and async operations. +_View Notebook on Github_ -## Prerequisites +# Comprehensive Agno Tutorial with AgentOps -Before running these examples, ensure you have: +This tutorial demonstrates key Agno features with AgentOps instrumentation: -1. **API Keys**: AgentOps and OpenAI API keys -2. **Environment Setup**: Python environment with required packages -3. **Dependencies**: Install Agno and AgentOps +1. **Basic Agents and Teams** - Creating individual agents and coordinating them +2. **Tool Integration and RAG** - Integrating tools with knowledge bases +3. **Workflows with Caching** - Building workflows that cache results +4. **Collaborative Research Teams** - Multi-agent research coordination +5. **Async Operations** - Concurrent agent operations +Each section shows different Agno capabilities with comprehensive AgentOps tracking. + + + +## Installation ```bash pip - pip install agentops agno + pip install agentops agno cohere openai python-dotenv ``` ```bash poetry - poetry add agentops agno + poetry add agentops agno cohere openai python-dotenv ``` ```bash uv - uv add agentops agno + uv add agentops agno cohere openai python-dotenv ``` -## Environment Configuration - -Create a `.env` file with your API keys: - -```env -AGENTOPS_API_KEY="your_agentops_api_key_here" -OPENAI_API_KEY="your_openai_api_key_here" -``` - -## Complete Example Structure - -Here's the foundation for all our examples: ```python -""" -Comprehensive Agno Example with AgentOps Instrumentation - -This example demonstrates key Agno features: -1. Basic Agents and Teams -2. Tool Integration with RAG -3. Workflows with Caching -4. Collaborative Research Teams -5. Async Operations -""" - import os import asyncio from typing import Iterator @@ -59,44 +44,57 @@ from dotenv import load_dotenv # Load environment variables load_dotenv() -# Import Agno components +# Import agno components from agno.agent import Agent, RunResponse from agno.team import Team from agno.models.openai import OpenAIChat from agno.workflow import Workflow -from agno.tools.reasoning import ReasoningTools -from agno.tools.googlesearch import GoogleSearchTools +from agno.tools.duckduckgo import DuckDuckGoTools from agno.tools.hackernews import HackerNewsTools +from agno.tools.reasoning import ReasoningTools from agno.tools.arxiv import ArxivTools -from agno.tools.duckduckgo import DuckDuckGoTools +from agno.tools.googlesearch import GoogleSearchTools from agno.knowledge.url import UrlKnowledge from agno.utils.pprint import pprint_run_response from agno.utils.log import logger +from agno.vectordb.lancedb import LanceDb +from agno.vectordb.search import SearchType +from agno.embedder.cohere import CohereEmbedder +from agno.reranker.cohere import CohereReranker -# Initialize AgentOps import agentops + +# Initialize AgentOps agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) # Configuration -MODEL_NAME = "gpt-4o-mini" +MODEL_ID = "gpt-4o-mini" + +``` + +```python def check_environment(): """Check if required environment variables are set.""" required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] missing_vars = [var for var in required_vars if not os.getenv(var)] - + if missing_vars: print(f"Missing required environment variables: {missing_vars}") print("Please set these in your .env file or environment") return False - + print("Environment variables checked successfully") return True -``` +# Check environment +check_environment() + +``` ## 1. Basic Agents and Teams -### Creating and Running Individual Agents +Let's start with creating individual agents and organizing them into teams. + ```python def demonstrate_basic_agents(): @@ -106,41 +104,31 @@ def demonstrate_basic_agents(): print("=" * 60) try: - # Create individual agents with specific roles - news_agent = Agent( - name="News Agent", - role="Get the latest news", - model=OpenAIChat(id=MODEL_NAME) - ) + # Create individual agents + news_agent = Agent(name="News Agent", role="Get the latest news", model=OpenAIChat(id=MODEL_ID)) weather_agent = Agent( - name="Weather Agent", - role="Get the weather for the next 7 days", - model=OpenAIChat(id=MODEL_NAME) + name="Weather Agent", role="Get the weather for the next 7 days", model=OpenAIChat(id=MODEL_ID) ) # Create a team with coordination mode - team = Team( - name="News and Weather Team", - mode="coordinate", # Agents work in sequence - members=[news_agent, weather_agent] - ) + team = Team(name="News and Weather Team", mode="coordinate", members=[news_agent, weather_agent]) - # Run team task - AgentOps will track all interactions + # Run team task response = team.run("What is the weather in Tokyo?") print(f"Team Response: {response.content}") - + except Exception as e: print(f"Basic agents error: {e}") -# Run the example +# Run the demonstration demonstrate_basic_agents() -``` -/v2/introduction +``` ## 2. Tool Integration with RAG -### Knowledge Base and Advanced Tools +This section demonstrates how to integrate tools with knowledge bases for Retrieval-Augmented Generation (RAG). + ```python def demonstrate_tool_integration(): @@ -153,7 +141,7 @@ def demonstrate_tool_integration(): # Create knowledge base with vector database knowledge_base = UrlKnowledge( urls=["https://docs.agno.com/introduction/agents.md"], - # Use LanceDB for vector storage + # Use LanceDB as the vector database, store embeddings in the `agno_docs` table vector_db=LanceDb( uri="tmp/lancedb", table_name="agno_docs", @@ -165,10 +153,11 @@ def demonstrate_tool_integration(): # Create agent with knowledge and reasoning tools agent = Agent( - model=OpenAIChat(id=MODEL_NAME), - # Agentic RAG is enabled with knowledge + model=OpenAIChat(id=MODEL_ID), + # Agentic RAG is enabled by default when `knowledge` is provided to the Agent. knowledge=knowledge_base, - search_knowledge=True, # Enable on-demand search + # search_knowledge=True gives the Agent the ability to search on demand + search_knowledge=True, tools=[ReasoningTools(add_instructions=True)], instructions=[ "Include sources in your response.", @@ -177,51 +166,52 @@ def demonstrate_tool_integration(): ], markdown=True, ) - + print("Running RAG agent with knowledge base...") agent.print_response( "What are Agents?", show_full_reasoning=True, ) - + except Exception as e: print(f"Tool integration error: {e}") -# Run the example +# Run the demonstration demonstrate_tool_integration() -``` +``` ## 3. Workflows with Caching -### State Management and Optimization +This section demonstrates how to create workflows that cache previous outputs for improved performance. + ```python class CacheWorkflow(Workflow): """A workflow that demonstrates caching capabilities.""" - + + # Purely descriptive, not used by the workflow description: str = "A workflow that caches previous outputs" - agent = Agent(model=OpenAIChat(id=MODEL_NAME)) + # Add agents or teams as attributes on the workflow + agent = Agent(model=OpenAIChat(id=MODEL_ID)) + + # Write the logic in the `run()` method def run(self, message: str) -> Iterator[RunResponse]: logger.info(f"Checking cache for '{message}'") - # Check if the output is already cached if self.session_state.get(message): logger.info(f"Cache hit for '{message}'") - yield RunResponse( - run_id=self.run_id, - content=self.session_state.get(message) - ) + yield RunResponse(run_id=self.run_id, content=self.session_state.get(message)) return logger.info(f"Cache miss for '{message}'") - # Run the agent and yield the response yield from self.agent.run(message, stream=True) - + # Cache the output after response is yielded self.session_state[message] = self.agent.run_response.content + def demonstrate_workflows(): """Demonstrate workflow capabilities with caching.""" print("\n" + "=" * 60) @@ -232,26 +222,28 @@ def demonstrate_workflows(): workflow = CacheWorkflow() print("First run (cache miss):") - # This takes ~1s as it generates new content - response = workflow.run(message="Tell me a joke.") + # Run workflow (this takes ~1s) + response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") + # Print the response pprint_run_response(response, markdown=True, show_time=True) - + print("\nSecond run (cache hit):") - # This is immediate due to caching - response = workflow.run(message="Tell me a joke.") + # Run workflow again (this is immediate because of caching) + response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") + # Print the response pprint_run_response(response, markdown=True, show_time=True) - + except Exception as e: print(f"Workflow error: {e}") -# Run the example +# Run the demonstration demonstrate_workflows() -``` - +``` ## 4. Collaborative Research Teams -### Multi-Agent Coordination +This section demonstrates how to create teams of specialized research agents that collaborate on complex research tasks. + ```python def demonstrate_research_team(): @@ -268,11 +260,13 @@ def demonstrate_research_team(): model=OpenAIChat(id="gpt-4o"), tools=[GoogleSearchTools()], add_name_to_instructions=True, - instructions=dedent(""" + instructions=dedent( + """ You are a Reddit researcher. You will be given a topic to research on Reddit. You will need to find the most relevant posts on Reddit. - """), + """ + ), ) hackernews_researcher = Agent( @@ -281,11 +275,13 @@ def demonstrate_research_team(): role="Research a topic on HackerNews.", tools=[HackerNewsTools()], add_name_to_instructions=True, - instructions=dedent(""" + instructions=dedent( + """ You are a HackerNews researcher. You will be given a topic to research on HackerNews. You will need to find the most relevant posts on HackerNews. - """), + """ + ), ) academic_paper_researcher = Agent( @@ -294,13 +290,15 @@ def demonstrate_research_team(): role="Research academic papers and scholarly content", tools=[GoogleSearchTools(), ArxivTools()], add_name_to_instructions=True, - instructions=dedent(""" + instructions=dedent( + """ You are an academic paper researcher. You will be given a topic to research in academic literature. You will need to find relevant scholarly articles, papers, and academic discussions. Focus on peer-reviewed content and citations from reputable sources. Provide brief summaries of key findings and methodologies. - """), + """ + ), ) twitter_researcher = Agent( @@ -309,19 +307,21 @@ def demonstrate_research_team(): role="Research trending discussions and real-time updates", tools=[DuckDuckGoTools()], add_name_to_instructions=True, - instructions=dedent(""" + instructions=dedent( + """ You are a Twitter/X researcher. You will be given a topic to research on Twitter/X. You will need to find trending discussions, influential voices, and real-time updates. Focus on verified accounts and credible sources when possible. Track relevant hashtags and ongoing conversations. - """), + """ + ), ) # Create collaborative team agent_team = Team( name="Discussion Team", - mode="collaborate", # Agents work together + mode="collaborate", model=OpenAIChat("gpt-4o"), members=[ reddit_researcher, @@ -348,17 +348,18 @@ def demonstrate_research_team(): stream=True, stream_intermediate_steps=True, ) - + except Exception as e: print(f"Research team error: {e}") -# Run the example +# Run the demonstration demonstrate_research_team() -``` +``` ## 5. Async Operations -### Concurrent Agent Execution +This section demonstrates how to run multiple agent operations concurrently using async/await patterns. + ```python async def demonstrate_async_operations(): @@ -368,123 +369,92 @@ async def demonstrate_async_operations(): print("=" * 60) try: - # Create agent for async operations - agent = Agent(model=OpenAIChat(id=MODEL_NAME)) - + # Create async tasks with different agents + agent = Agent(model=OpenAIChat(id=MODEL_ID)) + # Define async tasks async def task1(): response = await agent.arun("Explain Python in one paragraph") return f"Task 1: {response.content}" - + async def task2(): response = await agent.arun("Explain JavaScript in one paragraph") return f"Task 2: {response.content}" - + async def task3(): response = await agent.arun("Compare them briefly") return f"Task 3: {response.content}" - - # Run tasks concurrently for better performance + + # Run tasks concurrently print("Running async tasks concurrently...") results = await asyncio.gather(task1(), task2(), task3()) - + for result in results: print(result) print() - + except Exception as e: print(f"Async operations error: {e}") -# Run async example -asyncio.run(demonstrate_async_operations()) +# Run the async demonstration +await demonstrate_async_operations() + ``` +## Complete Tutorial -## Complete Running Example +Run all demonstrations in sequence to see the full capabilities of Agno with AgentOps tracking. -Here's how to run all examples together: ```python async def main(): """Main function to run all Agno demonstrations.""" - print("Starting Comprehensive Agno Example with AgentOps") + print("Starting Comprehensive Agno Tutorial with AgentOps") print("=" * 80) - - # Check environment first + + # Check environment if not check_environment(): return - + + # Run all demonstrations print("\nRunning all Agno demonstrations...") - - # Run all demonstrations with error handling - demos = [ - ("Basic Agents", demonstrate_basic_agents), - ("Tool Integration", demonstrate_tool_integration), - ("Workflows", demonstrate_workflows), - ("Research Team", demonstrate_research_team), - ] - - for name, demo_func in demos: - try: - demo_func() - except Exception as e: - print(f"Skipping {name} demo due to: {e}") - - # Run async demo + + # Research teams try: - await demonstrate_async_operations() + demonstrate_research_team() except Exception as e: - print(f"Skipping async demo due to: {e}") - - print("\nAll Agno demonstrations completed!") - print("Check your AgentOps dashboard for detailed traces and metrics.") - -if __name__ == "__main__": - asyncio.run(main()) -``` - -## Monitoring and Analytics - -### AgentOps Dashboard Features - -After running these examples, visit your [AgentOps Dashboard](https://app.agentops.ai/) to see: + print(f"Skipping research team demo due to: {e}") -**Agent Analytics:** -- Individual agent performance metrics -- Token usage and costs per agent -- Response times and success rates -- Agent interaction patterns + # Basic functionality + demonstrate_basic_agents() -**Team Coordination:** -- Multi-agent collaboration flows -- Team decision-making processes -- Communication patterns between agents -- Task distribution and load balancing + # Tool integration + try: + demonstrate_tool_integration() + except Exception as e: + print(f"Skipping tool integration demo due to: {e}") -**Workflow Optimization:** -- Cache efficiency metrics -- Workflow state transitions -- Performance bottlenecks -- Resource utilization + # Workflows + try: + demonstrate_workflows() + except Exception as e: + print(f"Skipping workflow demo due to: {e}") -**Tool Usage:** -- Tool execution frequency and success rates -- RAG query performance -- Knowledge base retrieval accuracy -- Custom tool effectiveness + # Async operations + try: + await demonstrate_async_operations() + except Exception as e: + print(f"Skipping async demo due to: {e}") -## Best Practices from Examples + print("\nAll Agno demonstrations completed!") + print("Check your AgentOps dashboard for detailed traces and metrics.") -1. **Environment Management**: Always validate API keys before running agents -2. **Error Handling**: Wrap each demo in try-catch for robust execution -3. **Resource Optimization**: Use caching workflows for repeated operations -4. **Team Design**: Create specialized agents with clear, distinct roles -5. **Async Operations**: Leverage concurrent execution for improved performance -6. **Monitoring**: Use AgentOps dashboard to optimize agent performance +# Uncomment the line below to run the complete tutorial +# await main() +``` -## Next Steps -- Explore the [Agno Integration Guide](/v2/integrations/agno) for more advanced configurations -- Check out additional [AgentOps Examples](/examples) for other frameworks -- Visit the [AgentOps Dashboard](https://app.agentops.ai/) to monitor your agents -- Join the [AgentOps Community](https://discord.gg/agentops) for support and discussions \ No newline at end of file + + + + \ No newline at end of file diff --git a/examples/agno/agno_comprehensive_tutorial.ipynb b/examples/agno/agno_comprehensive_tutorial.ipynb new file mode 100644 index 000000000..1a92d7f68 --- /dev/null +++ b/examples/agno/agno_comprehensive_tutorial.ipynb @@ -0,0 +1,524 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install agno agentops python-dotenv openai cohere\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import asyncio\n", + "from typing import Iterator\n", + "from textwrap import dedent\n", + "from dotenv import load_dotenv\n", + "\n", + "# Load environment variables\n", + "load_dotenv()\n", + "\n", + "# Import agno components\n", + "from agno.agent import Agent, RunResponse\n", + "from agno.team import Team\n", + "from agno.models.openai import OpenAIChat\n", + "from agno.workflow import Workflow\n", + "from agno.tools.duckduckgo import DuckDuckGoTools\n", + "from agno.tools.hackernews import HackerNewsTools\n", + "from agno.tools.reasoning import ReasoningTools\n", + "from agno.tools.arxiv import ArxivTools\n", + "from agno.tools.googlesearch import GoogleSearchTools\n", + "from agno.knowledge.url import UrlKnowledge\n", + "from agno.utils.pprint import pprint_run_response\n", + "from agno.utils.log import logger\n", + "from agno.vectordb.lancedb import LanceDb\n", + "from agno.vectordb.search import SearchType\n", + "from agno.embedder.cohere import CohereEmbedder\n", + "from agno.reranker.cohere import CohereReranker\n", + "\n", + "import agentops\n", + "\n", + "# Initialize AgentOps\n", + "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", + "\n", + "# Configuration\n", + "MODEL_ID = \"gpt-4o-mini\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def check_environment():\n", + " \"\"\"Check if required environment variables are set.\"\"\"\n", + " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\"]\n", + " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", + "\n", + " if missing_vars:\n", + " print(f\"Missing required environment variables: {missing_vars}\")\n", + " print(\"Please set these in your .env file or environment\")\n", + " return False\n", + "\n", + " print(\"Environment variables checked successfully\")\n", + " return True\n", + "\n", + "# Check environment\n", + "check_environment()\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## 1. Basic Agents and Teams\n", + "\n", + "Let's start with creating individual agents and organizing them into teams.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def demonstrate_basic_agents():\n", + " \"\"\"Demonstrate basic agent creation and team coordination.\"\"\"\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(\"BASIC AGENTS AND TEAMS\")\n", + " print(\"=\" * 60)\n", + "\n", + " try:\n", + " # Create individual agents\n", + " news_agent = Agent(name=\"News Agent\", role=\"Get the latest news\", model=OpenAIChat(id=MODEL_ID))\n", + "\n", + " weather_agent = Agent(\n", + " name=\"Weather Agent\", role=\"Get the weather for the next 7 days\", model=OpenAIChat(id=MODEL_ID)\n", + " )\n", + "\n", + " # Create a team with coordination mode\n", + " team = Team(name=\"News and Weather Team\", mode=\"coordinate\", members=[news_agent, weather_agent])\n", + "\n", + " # Run team task\n", + " response = team.run(\"What is the weather in Tokyo?\")\n", + " print(f\"Team Response: {response.content}\")\n", + "\n", + " except Exception as e:\n", + " print(f\"Basic agents error: {e}\")\n", + "\n", + "# Run the demonstration\n", + "demonstrate_basic_agents()\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## 2. Tool Integration with RAG\n", + "\n", + "This section demonstrates how to integrate tools with knowledge bases for Retrieval-Augmented Generation (RAG).\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def demonstrate_tool_integration():\n", + " \"\"\"Demonstrate tool integration with RAG and knowledge base.\"\"\"\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(\"TOOL INTEGRATION WITH RAG\")\n", + " print(\"=\" * 60)\n", + "\n", + " try:\n", + " # Create knowledge base with vector database\n", + " knowledge_base = UrlKnowledge(\n", + " urls=[\"https://docs.agno.com/introduction/agents.md\"],\n", + " # Use LanceDB as the vector database, store embeddings in the `agno_docs` table\n", + " vector_db=LanceDb(\n", + " uri=\"tmp/lancedb\",\n", + " table_name=\"agno_docs\",\n", + " search_type=SearchType.hybrid,\n", + " embedder=CohereEmbedder(id=\"embed-v4.0\"),\n", + " reranker=CohereReranker(model=\"rerank-v3.5\"),\n", + " ),\n", + " )\n", + "\n", + " # Create agent with knowledge and reasoning tools\n", + " agent = Agent(\n", + " model=OpenAIChat(id=MODEL_ID),\n", + " # Agentic RAG is enabled by default when `knowledge` is provided to the Agent.\n", + " knowledge=knowledge_base,\n", + " # search_knowledge=True gives the Agent the ability to search on demand\n", + " search_knowledge=True,\n", + " tools=[ReasoningTools(add_instructions=True)],\n", + " instructions=[\n", + " \"Include sources in your response.\",\n", + " \"Always search your knowledge before answering the question.\",\n", + " \"Only include the output in your response. No other text.\",\n", + " ],\n", + " markdown=True,\n", + " )\n", + "\n", + " print(\"Running RAG agent with knowledge base...\")\n", + " agent.print_response(\n", + " \"What are Agents?\",\n", + " show_full_reasoning=True,\n", + " )\n", + "\n", + " except Exception as e:\n", + " print(f\"Tool integration error: {e}\")\n", + "\n", + "# Run the demonstration\n", + "demonstrate_tool_integration()\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## 3. Workflows with Caching\n", + "\n", + "This section demonstrates how to create workflows that cache previous outputs for improved performance.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class CacheWorkflow(Workflow):\n", + " \"\"\"A workflow that demonstrates caching capabilities.\"\"\"\n", + "\n", + " # Purely descriptive, not used by the workflow\n", + " description: str = \"A workflow that caches previous outputs\"\n", + "\n", + " # Add agents or teams as attributes on the workflow\n", + " agent = Agent(model=OpenAIChat(id=MODEL_ID))\n", + "\n", + " # Write the logic in the `run()` method\n", + " def run(self, message: str) -> Iterator[RunResponse]:\n", + " logger.info(f\"Checking cache for '{message}'\")\n", + " # Check if the output is already cached\n", + " if self.session_state.get(message):\n", + " logger.info(f\"Cache hit for '{message}'\")\n", + " yield RunResponse(run_id=self.run_id, content=self.session_state.get(message))\n", + " return\n", + "\n", + " logger.info(f\"Cache miss for '{message}'\")\n", + " # Run the agent and yield the response\n", + " yield from self.agent.run(message, stream=True)\n", + "\n", + " # Cache the output after response is yielded\n", + " self.session_state[message] = self.agent.run_response.content\n", + "\n", + "\n", + "def demonstrate_workflows():\n", + " \"\"\"Demonstrate workflow capabilities with caching.\"\"\"\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(\"WORKFLOWS WITH CACHING\")\n", + " print(\"=\" * 60)\n", + "\n", + " try:\n", + " workflow = CacheWorkflow()\n", + "\n", + " print(\"First run (cache miss):\")\n", + " # Run workflow (this takes ~1s)\n", + " response: Iterator[RunResponse] = workflow.run(message=\"Tell me a joke.\")\n", + " # Print the response\n", + " pprint_run_response(response, markdown=True, show_time=True)\n", + "\n", + " print(\"\\nSecond run (cache hit):\")\n", + " # Run workflow again (this is immediate because of caching)\n", + " response: Iterator[RunResponse] = workflow.run(message=\"Tell me a joke.\")\n", + " # Print the response\n", + " pprint_run_response(response, markdown=True, show_time=True)\n", + "\n", + " except Exception as e:\n", + " print(f\"Workflow error: {e}\")\n", + "\n", + "# Run the demonstration\n", + "demonstrate_workflows()\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## 4. Collaborative Research Teams\n", + "\n", + "This section demonstrates how to create teams of specialized research agents that collaborate on complex research tasks.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def demonstrate_research_team():\n", + " \"\"\"Demonstrate collaborative research team with multiple specialized agents.\"\"\"\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(\"COLLABORATIVE RESEARCH TEAM\")\n", + " print(\"=\" * 60)\n", + "\n", + " try:\n", + " # Create specialized research agents\n", + " reddit_researcher = Agent(\n", + " name=\"Reddit Researcher\",\n", + " role=\"Research a topic on Reddit\",\n", + " model=OpenAIChat(id=\"gpt-4o\"),\n", + " tools=[GoogleSearchTools()],\n", + " add_name_to_instructions=True,\n", + " instructions=dedent(\n", + " \"\"\"\n", + " You are a Reddit researcher.\n", + " You will be given a topic to research on Reddit.\n", + " You will need to find the most relevant posts on Reddit.\n", + " \"\"\"\n", + " ),\n", + " )\n", + "\n", + " hackernews_researcher = Agent(\n", + " name=\"HackerNews Researcher\",\n", + " model=OpenAIChat(\"gpt-4o\"),\n", + " role=\"Research a topic on HackerNews.\",\n", + " tools=[HackerNewsTools()],\n", + " add_name_to_instructions=True,\n", + " instructions=dedent(\n", + " \"\"\"\n", + " You are a HackerNews researcher.\n", + " You will be given a topic to research on HackerNews.\n", + " You will need to find the most relevant posts on HackerNews.\n", + " \"\"\"\n", + " ),\n", + " )\n", + "\n", + " academic_paper_researcher = Agent(\n", + " name=\"Academic Paper Researcher\",\n", + " model=OpenAIChat(\"gpt-4o\"),\n", + " role=\"Research academic papers and scholarly content\",\n", + " tools=[GoogleSearchTools(), ArxivTools()],\n", + " add_name_to_instructions=True,\n", + " instructions=dedent(\n", + " \"\"\"\n", + " You are an academic paper researcher.\n", + " You will be given a topic to research in academic literature.\n", + " You will need to find relevant scholarly articles, papers, and academic discussions.\n", + " Focus on peer-reviewed content and citations from reputable sources.\n", + " Provide brief summaries of key findings and methodologies.\n", + " \"\"\"\n", + " ),\n", + " )\n", + "\n", + " twitter_researcher = Agent(\n", + " name=\"Twitter Researcher\",\n", + " model=OpenAIChat(\"gpt-4o\"),\n", + " role=\"Research trending discussions and real-time updates\",\n", + " tools=[DuckDuckGoTools()],\n", + " add_name_to_instructions=True,\n", + " instructions=dedent(\n", + " \"\"\"\n", + " You are a Twitter/X researcher.\n", + " You will be given a topic to research on Twitter/X.\n", + " You will need to find trending discussions, influential voices, and real-time updates.\n", + " Focus on verified accounts and credible sources when possible.\n", + " Track relevant hashtags and ongoing conversations.\n", + " \"\"\"\n", + " ),\n", + " )\n", + "\n", + " # Create collaborative team\n", + " agent_team = Team(\n", + " name=\"Discussion Team\",\n", + " mode=\"collaborate\",\n", + " model=OpenAIChat(\"gpt-4o\"),\n", + " members=[\n", + " reddit_researcher,\n", + " hackernews_researcher,\n", + " academic_paper_researcher,\n", + " twitter_researcher,\n", + " ],\n", + " instructions=[\n", + " \"You are a discussion master.\",\n", + " \"You have to stop the discussion when you think the team has reached a consensus.\",\n", + " ],\n", + " success_criteria=\"The team has reached a consensus.\",\n", + " enable_agentic_context=True,\n", + " add_context=True,\n", + " show_tool_calls=True,\n", + " markdown=True,\n", + " debug_mode=True,\n", + " show_members_responses=True,\n", + " )\n", + "\n", + " print(\"Running collaborative research team...\")\n", + " agent_team.print_response(\n", + " message=\"Start the discussion on the topic: 'What is the best way to learn to code?'\",\n", + " stream=True,\n", + " stream_intermediate_steps=True,\n", + " )\n", + "\n", + " except Exception as e:\n", + " print(f\"Research team error: {e}\")\n", + "\n", + "# Run the demonstration\n", + "demonstrate_research_team()\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## 5. Async Operations\n", + "\n", + "This section demonstrates how to run multiple agent operations concurrently using async/await patterns.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "async def demonstrate_async_operations():\n", + " \"\"\"Demonstrate async operations with Agno agents.\"\"\"\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(\"ASYNC OPERATIONS\")\n", + " print(\"=\" * 60)\n", + "\n", + " try:\n", + " # Create async tasks with different agents\n", + " agent = Agent(model=OpenAIChat(id=MODEL_ID))\n", + "\n", + " # Define async tasks\n", + " async def task1():\n", + " response = await agent.arun(\"Explain Python in one paragraph\")\n", + " return f\"Task 1: {response.content}\"\n", + "\n", + " async def task2():\n", + " response = await agent.arun(\"Explain JavaScript in one paragraph\")\n", + " return f\"Task 2: {response.content}\"\n", + "\n", + " async def task3():\n", + " response = await agent.arun(\"Compare them briefly\")\n", + " return f\"Task 3: {response.content}\"\n", + "\n", + " # Run tasks concurrently\n", + " print(\"Running async tasks concurrently...\")\n", + " results = await asyncio.gather(task1(), task2(), task3())\n", + "\n", + " for result in results:\n", + " print(result)\n", + " print()\n", + "\n", + " except Exception as e:\n", + " print(f\"Async operations error: {e}\")\n", + "\n", + "# Run the async demonstration\n", + "await demonstrate_async_operations()\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Complete Tutorial\n", + "\n", + "Run all demonstrations in sequence to see the full capabilities of Agno with AgentOps tracking.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "async def main():\n", + " \"\"\"Main function to run all Agno demonstrations.\"\"\"\n", + " print(\"Starting Comprehensive Agno Tutorial with AgentOps\")\n", + " print(\"=\" * 80)\n", + "\n", + " # Check environment\n", + " if not check_environment():\n", + " return\n", + "\n", + " # Run all demonstrations\n", + " print(\"\\nRunning all Agno demonstrations...\")\n", + "\n", + " # Research teams\n", + " try:\n", + " demonstrate_research_team()\n", + " except Exception as e:\n", + " print(f\"Skipping research team demo due to: {e}\")\n", + "\n", + " # Basic functionality\n", + " demonstrate_basic_agents()\n", + "\n", + " # Tool integration\n", + " try:\n", + " demonstrate_tool_integration()\n", + " except Exception as e:\n", + " print(f\"Skipping tool integration demo due to: {e}\")\n", + "\n", + " # Workflows\n", + " try:\n", + " demonstrate_workflows()\n", + " except Exception as e:\n", + " print(f\"Skipping workflow demo due to: {e}\")\n", + "\n", + " # Async operations\n", + " try:\n", + " await demonstrate_async_operations()\n", + " except Exception as e:\n", + " print(f\"Skipping async demo due to: {e}\")\n", + "\n", + " print(\"\\nAll Agno demonstrations completed!\")\n", + " print(\"Check your AgentOps dashboard for detailed traces and metrics.\")\n", + "\n", + "# Uncomment the line below to run the complete tutorial\n", + "# await main()\n" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/agno_examples/comprehensive_agno_example.py b/examples/agno/agno_comprehensive_tutorial.py similarity index 98% rename from examples/agno_examples/comprehensive_agno_example.py rename to examples/agno/agno_comprehensive_tutorial.py index 7a6519971..032e3df33 100644 --- a/examples/agno_examples/comprehensive_agno_example.py +++ b/examples/agno/agno_comprehensive_tutorial.py @@ -1,7 +1,7 @@ """ -Comprehensive Agno Example with AgentOps Instrumentation +Comprehensive Agno Tutorial with AgentOps Instrumentation -This example demonstrates key Agno features: +This tutorial demonstrates key Agno features: 1. Basic Agents and Teams 2. Tool Integration and RAG 3. Workflows with Caching @@ -9,6 +9,7 @@ 5. Async Operations Each section shows different Agno capabilities with AgentOps tracking. +Run this script to see all examples in action. """ import os From 5e25523ca32405f04e0759a38c9c7d2711c48e64 Mon Sep 17 00:00:00 2001 From: fenilfaldu Date: Thu, 12 Jun 2025 06:30:06 +0530 Subject: [PATCH 07/14] enhanced docs :) --- docs/v2/examples/agno.mdx | 218 +++++++----- docs/v2/integrations/agno.mdx | 261 ++------------ .../agno/agno_comprehensive_tutorial.ipynb | 329 ++++++++++++++---- 3 files changed, 419 insertions(+), 389 deletions(-) diff --git a/docs/v2/examples/agno.mdx b/docs/v2/examples/agno.mdx index 6fab0b164..bc83dcaaa 100644 --- a/docs/v2/examples/agno.mdx +++ b/docs/v2/examples/agno.mdx @@ -1,37 +1,27 @@ --- title: 'Agno' -description: 'Comprehensive Agno Tutorial with AgentOps' +description: 'Agno Framework Comprehensive Tutorial with AgentOps' --- {/* SOURCE_FILE: examples/agno/agno_comprehensive_tutorial.ipynb */} _View Notebook on Github_ -# Comprehensive Agno Tutorial with AgentOps +# Agno Framework Comprehensive Tutorial with AgentOps -This tutorial demonstrates key Agno features with AgentOps instrumentation: +This tutorial demonstrates the powerful capabilities of the Agno framework for building AI agents and teams. Agno provides a flexible and intuitive way to create intelligent agents that can collaborate, use tools, and perform complex tasks. -1. **Basic Agents and Teams** - Creating individual agents and coordinating them -2. **Tool Integration and RAG** - Integrating tools with knowledge bases -3. **Workflows with Caching** - Building workflows that cache results -4. **Collaborative Research Teams** - Multi-agent research coordination -5. **Async Operations** - Concurrent agent operations +## What You'll Learn -Each section shows different Agno capabilities with comprehensive AgentOps tracking. +1. **Basic Agents and Teams** - Creating individual agents and coordinating them in teams +2. **Tool Integration with RAG** - Integrating tools and knowledge bases for enhanced capabilities +3. **Workflows with Caching** - Building efficient workflows with state management +4. **Collaborative Research Teams** - Creating specialized agents that work together +5. **Async Operations** - Running agents asynchronously for better performance +Throughout this tutorial, AgentOps will track all agent activities, providing detailed insights into your AI system's behavior. +## Getting Started - -## Installation - - ```bash pip - pip install agentops agno cohere openai python-dotenv - ``` - ```bash poetry - poetry add agentops agno cohere openai python-dotenv - ``` - ```bash uv - uv add agentops agno cohere openai python-dotenv - ``` - +First, let's import the necessary libraries and set up our environment. We'll need Agno for building agents and AgentOps for monitoring and tracking. ```python @@ -40,36 +30,48 @@ import asyncio from typing import Iterator from textwrap import dedent from dotenv import load_dotenv +``` + +```python # Load environment variables load_dotenv() +``` +## Environment Setup + +Now we'll load environment variables and import the Agno framework components. The order of imports is important - we import agno first to ensure proper instrumentation, then import specific classes we'll use. -# Import agno components -from agno.agent import Agent, RunResponse -from agno.team import Team -from agno.models.openai import OpenAIChat -from agno.workflow import Workflow -from agno.tools.duckduckgo import DuckDuckGoTools -from agno.tools.hackernews import HackerNewsTools -from agno.tools.reasoning import ReasoningTools -from agno.tools.arxiv import ArxivTools -from agno.tools.googlesearch import GoogleSearchTools -from agno.knowledge.url import UrlKnowledge -from agno.utils.pprint import pprint_run_response -from agno.utils.log import logger -from agno.vectordb.lancedb import LanceDb -from agno.vectordb.search import SearchType -from agno.embedder.cohere import CohereEmbedder + +```python +# Now import agno to trigger instrumentation, then import specific classes +from agno.agent import Agent, RunResponse +from agno.team import Team +from agno.models.openai import OpenAIChat +from agno.workflow import Workflow +from agno.tools.duckduckgo import DuckDuckGoTools +from agno.tools.hackernews import HackerNewsTools +from agno.tools.reasoning import ReasoningTools +from agno.tools.arxiv import ArxivTools +from agno.tools.googlesearch import GoogleSearchTools +from agno.knowledge.url import UrlKnowledge +from agno.utils.pprint import pprint_run_response +from agno.utils.log import logger +from agno.vectordb.lancedb import LanceDb +from agno.vectordb.search import SearchType +from agno.embedder.cohere import CohereEmbedder from agno.reranker.cohere import CohereReranker +import agentops + +``` +## Initialize AgentOps -import agentops +Let's initialize AgentOps to start tracking our agent activities. Make sure you have your AGENTOPS_API_KEY set in your environment variables. -# Initialize AgentOps -agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) -# Configuration +```python +agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) +# Sample configuration MODEL_ID = "gpt-4o-mini" - ``` @@ -86,14 +88,15 @@ def check_environment(): print("Environment variables checked successfully") return True - -# Check environment -check_environment() - ``` ## 1. Basic Agents and Teams -Let's start with creating individual agents and organizing them into teams. +Let's start with the fundamentals - creating individual agents and organizing them into teams. In Agno, agents are AI-powered entities with specific roles, and teams coordinate multiple agents to accomplish complex tasks. + +### Key Concepts: +- **Agent**: An individual AI entity with a specific role and model +- **Team**: A group of agents that can work together in different modes +- **Coordination Mode**: How agents interact (e.g., 'coordinate' mode) ```python @@ -120,14 +123,17 @@ def demonstrate_basic_agents(): except Exception as e: print(f"Basic agents error: {e}") - -# Run the demonstration -demonstrate_basic_agents() - ``` -## 2. Tool Integration with RAG +## 2. Tool Integration with RAG (Retrieval-Augmented Generation) + +One of Agno's most powerful features is the ability to integrate tools and knowledge bases. This example shows how to create an agent with: +- **Knowledge Base**: Using URLs as sources of information +- **Vector Database**: LanceDB for efficient similarity search +- **Embeddings**: Cohere embeddings for semantic understanding +- **Reranking**: Improving search results with Cohere reranker +- **Reasoning Tools**: Adding logical reasoning capabilities -This section demonstrates how to integrate tools with knowledge bases for Retrieval-Augmented Generation (RAG). +This creates an agent that can search through documentation and provide informed answers with sources. ```python @@ -146,9 +152,9 @@ def demonstrate_tool_integration(): uri="tmp/lancedb", table_name="agno_docs", search_type=SearchType.hybrid, - embedder=CohereEmbedder(id="embed-v4.0"), - reranker=CohereReranker(model="rerank-v3.5"), - ), + embedder=CohereEmbedder(id="embed-v4.0"), # noqa: E821 + reranker=CohereReranker(model="rerank-v3.5"), # noqa: E821 + ), # noqa: E821 ) # Create agent with knowledge and reasoning tools @@ -175,14 +181,16 @@ def demonstrate_tool_integration(): except Exception as e: print(f"Tool integration error: {e}") - -# Run the demonstration -demonstrate_tool_integration() - ``` ## 3. Workflows with Caching -This section demonstrates how to create workflows that cache previous outputs for improved performance. +Workflows in Agno allow you to create reusable, stateful processes. This example demonstrates a caching workflow that: +- Stores previous responses in session state +- Checks cache before running expensive operations +- Yields responses in a streaming fashion +- Improves performance by avoiding redundant computations + +This is particularly useful for applications where users might ask similar questions repeatedly. ```python @@ -210,8 +218,10 @@ class CacheWorkflow(Workflow): # Cache the output after response is yielded self.session_state[message] = self.agent.run_response.content +``` +```python def demonstrate_workflows(): """Demonstrate workflow capabilities with caching.""" print("\n" + "=" * 60) @@ -235,14 +245,24 @@ def demonstrate_workflows(): except Exception as e: print(f"Workflow error: {e}") - -# Run the demonstration -demonstrate_workflows() - ``` ## 4. Collaborative Research Teams -This section demonstrates how to create teams of specialized research agents that collaborate on complex research tasks. +This is where Agno truly shines - creating teams of specialized agents that collaborate to solve complex problems. In this example, we create: + +### Specialized Agents: +- **Reddit Researcher**: Searches Reddit for community discussions +- **HackerNews Researcher**: Finds technical discussions on HackerNews +- **Academic Paper Researcher**: Searches scholarly articles and papers +- **Twitter Researcher**: Tracks real-time discussions and trends + +### Team Configuration: +- **Collaborate Mode**: Agents work together and discuss findings +- **Discussion Master**: The team lead that manages the conversation +- **Success Criteria**: Defines when the team has reached consensus +- **Agentic Context**: Agents maintain context throughout the discussion + +This demonstrates how multiple AI agents can work together like a human research team! ```python @@ -351,14 +371,16 @@ def demonstrate_research_team(): except Exception as e: print(f"Research team error: {e}") - -# Run the demonstration -demonstrate_research_team() - ``` ## 5. Async Operations -This section demonstrates how to run multiple agent operations concurrently using async/await patterns. +Modern applications often need to run multiple AI tasks concurrently. Agno supports async operations out of the box, allowing you to: +- Run multiple agent tasks in parallel +- Improve response times for complex queries +- Handle multiple user requests simultaneously +- Build scalable AI applications + +This example shows how to run three different agent tasks concurrently using Python's asyncio. ```python @@ -395,20 +417,31 @@ async def demonstrate_async_operations(): except Exception as e: print(f"Async operations error: {e}") +``` +## Running the Complete Tutorial -# Run the async demonstration -await demonstrate_async_operations() +The main function orchestrates all the demonstrations, showing how different Agno features work together. Each section is wrapped in error handling to ensure the tutorial continues even if one section encounters issues. -``` -## Complete Tutorial +### What Happens When You Run This: +1. Environment variables are checked +2. AgentOps begins tracking all agent activities +3. Each demonstration runs in sequence +4. Results are displayed in the console +5. All activities are logged to your AgentOps dashboard -Run all demonstrations in sequence to see the full capabilities of Agno with AgentOps tracking. +### Viewing Results in AgentOps: +After running this tutorial, visit your AgentOps dashboard to see: +- Agent conversation traces +- Tool usage analytics +- Performance metrics +- Error tracking +- Team collaboration patterns ```python async def main(): """Main function to run all Agno demonstrations.""" - print("Starting Comprehensive Agno Tutorial with AgentOps") + print("Starting Comprehensive Agno Example with AgentOps") print("=" * 80) # Check environment @@ -447,11 +480,36 @@ async def main(): print("\nAll Agno demonstrations completed!") print("Check your AgentOps dashboard for detailed traces and metrics.") +``` -# Uncomment the line below to run the complete tutorial -# await main() +```python +if __name__ == "__main__": + asyncio.run(main()) ``` +## Summary and Next Steps + +Congratulations! You've explored the key features of the Agno framework: + +### What We Covered: +- ✅ Creating and coordinating basic agents and teams +- ✅ Integrating tools and knowledge bases with RAG +- ✅ Building efficient workflows with caching +- ✅ Creating collaborative research teams with specialized agents +- ✅ Running agents asynchronously for better performance + +### Next Steps: +1. **Experiment with Different Models**: Try using different LLMs (GPT-4, Claude, etc.) +2. **Add More Tools**: Explore Agno's extensive tool library +3. **Build Custom Workflows**: Create workflows for your specific use cases +4. **Scale Your Teams**: Add more agents with specialized roles +5. **Monitor with AgentOps**: Use the dashboard to optimize your agents + +### Resources: +- [Agno Documentation](https://docs.agno.com) +- [AgentOps Dashboard](https://app.agentops.ai) + +Happy building with Agno! 🚀 diff --git a/docs/v2/integrations/agno.mdx b/docs/v2/integrations/agno.mdx index 1fa4db133..039fefa7b 100644 --- a/docs/v2/integrations/agno.mdx +++ b/docs/v2/integrations/agno.mdx @@ -3,7 +3,7 @@ title: Agno description: "Track your Agno agents, teams, and workflows with AgentOps" --- -[Agno](https://docs.agno.com) is a modern AI agent framework that provides tools for building intelligent agents, teams, and workflows. AgentOps automatically tracks all Agno operations including agent interactions, team coordination, tool usage, and workflow execution. +[Agno](https://docs.agno.com) is a modern AI agent framework for building intelligent agents, teams, and workflows. AgentOps provides automatic instrumentation to track all Agno operations including agent interactions, team coordination, tool usage, and workflow execution. ## Installation @@ -43,14 +43,6 @@ Set these as environment variables or in a `.env` file. ``` -Then load them in your Python code: -```python -from dotenv import load_dotenv -import os - -load_dotenv() -``` - ## Quick Start ```python @@ -80,11 +72,11 @@ response = agent.run("What are the key benefits of AI agents?") print(response.content) ``` -## Features +## AgentOps Integration -### 1. Basic Agents and Teams +### Basic Agent Tracking -Create individual agents and coordinate them in teams: +AgentOps automatically instruments Agno agents and teams: ```python import agentops @@ -92,232 +84,39 @@ from agno.agent import Agent from agno.team import Team from agno.models.openai import OpenAIChat +# Initialize AgentOps - this enables automatic tracking agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) -# Create individual agents -news_agent = Agent( - name="News Agent", - role="Get the latest news", - model=OpenAIChat(id="gpt-4o-mini") -) - -weather_agent = Agent( - name="Weather Agent", - role="Get the weather for the next 7 days", +# Create agents - automatically tracked by AgentOps +agent = Agent( + name="Assistant", + role="Helpful AI assistant", model=OpenAIChat(id="gpt-4o-mini") ) -# Create a team with coordination mode +# Create teams - coordination automatically tracked team = Team( - name="News and Weather Team", + name="Research Team", mode="coordinate", - members=[news_agent, weather_agent] + members=[agent] ) -# Run team task -response = team.run("What is the weather in Tokyo?") +# All operations are automatically logged to AgentOps +response = team.run("Analyze the current AI market trends") print(response.content) ``` -### 2. Tool Integration with RAG - -Integrate custom tools and knowledge bases for advanced capabilities: - -```python -from agno.tools.reasoning import ReasoningTools -from agno.knowledge.url import UrlKnowledge - - -# Create knowledge base with vector database -knowledge_base = UrlKnowledge( - urls=["https://docs.agno.com/introduction/agents.md"], - vector_db=LanceDb( - uri="tmp/lancedb", - table_name="agno_docs", - search_type=SearchType.hybrid, - embedder=CohereEmbedder(id="embed-v4.0"), - reranker=CohereReranker(model="rerank-v3.5"), - ), -) - -# Create agent with knowledge and reasoning tools -agent = Agent( - model=OpenAIChat(id="gpt-4o-mini"), - knowledge=knowledge_base, - search_knowledge=True, - tools=[ReasoningTools(add_instructions=True)], - instructions=[ - "Include sources in your response.", - "Always search your knowledge before answering the question.", - "Only include the output in your response. No other text.", - ], - markdown=True, -) - -agent.print_response( - "What are Agents?", - show_full_reasoning=True, -) -``` - -### 3. Workflows with Caching - -Create workflows that maintain state and cache results: - -```python -from agno.workflow import Workflow -from agno.agent import Agent, RunResponse -from typing import Iterator - -class CacheWorkflow(Workflow): - """A workflow that demonstrates caching capabilities.""" - - description: str = "A workflow that caches previous outputs" - agent = Agent(model=OpenAIChat(id="gpt-4o-mini")) - - def run(self, message: str) -> Iterator[RunResponse]: - # Check if the output is already cached - if self.session_state.get(message): - yield RunResponse(run_id=self.run_id, content=self.session_state.get(message)) - return - - # Run the agent and yield the response - yield from self.agent.run(message, stream=True) - - # Cache the output after response is yielded - self.session_state[message] = self.agent.run_response.content - -# Use the workflow -workflow = CacheWorkflow() -response = workflow.run(message="Tell me a joke.") -``` - -### 4. Collaborative Research Teams - -Create multi-agent teams with specialized roles: - -```python -from agno.tools.googlesearch import GoogleSearchTools -from agno.tools.hackernews import HackerNewsTools -from agno.tools.arxiv import ArxivTools -from agno.tools.duckduckgo import DuckDuckGoTools -from textwrap import dedent - -# Create specialized research agents -reddit_researcher = Agent( - name="Reddit Researcher", - role="Research a topic on Reddit", - model=OpenAIChat(id="gpt-4o"), - tools=[GoogleSearchTools()], - add_name_to_instructions=True, - instructions=dedent(""" - You are a Reddit researcher. - You will be given a topic to research on Reddit. - You will need to find the most relevant posts on Reddit. - """), -) - -academic_paper_researcher = Agent( - name="Academic Paper Researcher", - model=OpenAIChat("gpt-4o"), - role="Research academic papers and scholarly content", - tools=[GoogleSearchTools(), ArxivTools()], - add_name_to_instructions=True, - instructions=dedent(""" - You are an academic paper researcher. - You will be given a topic to research in academic literature. - Focus on peer-reviewed content and citations from reputable sources. - """), -) - -# Create collaborative team -agent_team = Team( - name="Discussion Team", - mode="collaborate", - model=OpenAIChat("gpt-4o"), - members=[reddit_researcher, academic_paper_researcher], - instructions=[ - "You are a discussion master.", - "You have to stop the discussion when you think the team has reached a consensus.", - ], - success_criteria="The team has reached a consensus.", - enable_agentic_context=True, - add_context=True, - show_tool_calls=True, - markdown=True, - debug_mode=True, - show_members_responses=True, -) - -agent_team.print_response( - message="Start the discussion on the topic: 'What is the best way to learn to code?'", - stream=True, - stream_intermediate_steps=True, -) -``` - -### 5. Async Operations - -Run multiple agents concurrently for improved performance: - -```python -import asyncio - -async def demonstrate_async_operations(): - agent = Agent(model=OpenAIChat(id="gpt-4o-mini")) - - # Define async tasks - async def task1(): - response = await agent.arun("Explain Python in one paragraph") - return f"Task 1: {response.content}" - - async def task2(): - response = await agent.arun("Explain JavaScript in one paragraph") - return f"Task 2: {response.content}" - - async def task3(): - response = await agent.arun("Compare them briefly") - return f"Task 3: {response.content}" - - # Run tasks concurrently - results = await asyncio.gather(task1(), task2(), task3()) - - for result in results: - print(result) - -# Run async operations -asyncio.run(demonstrate_async_operations()) -``` - ## What Gets Tracked -AgentOps automatically tracks: - -### Agent Operations -- **Agent Creation**: Names, roles, and model configurations -- **Agent Execution**: Input prompts, responses, and timing -- **Agent Metrics**: Token usage, costs, and performance - -### Team Coordination -- **Team Formation**: Member agents and coordination modes -- **Team Execution**: Task distribution and collaboration patterns -- **Team Results**: Aggregated responses and decision flows - -### Tool Usage -- **Tool Calls**: Function executions and parameters -- **Tool Results**: Return values and success/failure status -- **Tool Performance**: Execution times and resource usage - -### Workflow Management -- **Workflow States**: Session management and caching -- **Workflow Execution**: Step-by-step progression tracking -- **Workflow Optimization**: Cache hits/misses and performance metrics - -### Knowledge Base Operations -- **RAG Queries**: Knowledge searches and retrievals -- **Vector Operations**: Embedding generations and similarity searches -- **Knowledge Usage**: Source citations and relevance scoring +AgentOps automatically captures: +- **Agent Interactions**: All agent inputs, outputs, and configurations +- **Team Coordination**: Multi-agent collaboration patterns and results +- **Tool Executions**: Function calls, parameters, and return values +- **Workflow Steps**: Session states, caching, and performance metrics +- **Token Usage**: Costs and resource consumption across all operations +- **Timing Metrics**: Response times and concurrent operation performance +- **Error Tracking**: Failures and debugging information ## Dashboard and Monitoring @@ -330,15 +129,11 @@ Once your Agno agents are running with AgentOps, you can monitor them in the [Ag - **Team Collaboration**: Visual representation of multi-agent workflows - **Error Tracking**: Comprehensive error logs and debugging information -## Best Practices - -1. **Initialize AgentOps Early**: Always call `agentops.init()` before creating Agno components -2. **Environment Management**: Use `.env` files for secure API key management -3. **Error Handling**: Wrap agent operations in try-catch blocks for robust applications -4. **Resource Management**: Monitor token usage and costs in the AgentOps dashboard -5. **Team Design**: Design specialized agents with clear roles for better collaboration -6. **Workflow Optimization**: Use caching workflows for repeated operations +## Examples -**Workflow State Issues**: Check that workflow sessions are properly managed and cache keys are unique. + + + Comprehensive tutorial covering agents, teams, workflows, and RAG + + -For more detailed examples and advanced usage patterns, check out our [Agno Examples](/v2/examples/agno). \ No newline at end of file diff --git a/examples/agno/agno_comprehensive_tutorial.ipynb b/examples/agno/agno_comprehensive_tutorial.ipynb index 1a92d7f68..482f1ebd7 100644 --- a/examples/agno/agno_comprehensive_tutorial.ipynb +++ b/examples/agno/agno_comprehensive_tutorial.ipynb @@ -1,17 +1,45 @@ { "cells": [ { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "# Agno Framework Comprehensive Tutorial with AgentOps\n", + "\n", + "This tutorial demonstrates the powerful capabilities of the Agno framework for building AI agents and teams. Agno provides a flexible and intuitive way to create intelligent agents that can collaborate, use tools, and perform complex tasks.\n", + "\n", + "## What You'll Learn\n", + "\n", + "1. **Basic Agents and Teams** - Creating individual agents and coordinating them in teams\n", + "2. **Tool Integration with RAG** - Integrating tools and knowledge bases for enhanced capabilities\n", + "3. **Workflows with Caching** - Building efficient workflows with state management\n", + "4. **Collaborative Research Teams** - Creating specialized agents that work together\n", + "5. **Async Operations** - Running agents asynchronously for better performance\n", + "\n", + "Throughout this tutorial, AgentOps will track all agent activities, providing detailed insights into your AI system's behavior.\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, "source": [ - "%pip install agno agentops python-dotenv openai cohere\n" + "## Getting Started\n", + "\n", + "First, let's import the necessary libraries and set up our environment. We'll need Agno for building agents and AgentOps for monitoring and tracking.\n" ] }, { "cell_type": "code", "execution_count": null, + "id": "afacf485", "metadata": {}, "outputs": [], "source": [ @@ -19,41 +47,91 @@ "import asyncio\n", "from typing import Iterator\n", "from textwrap import dedent\n", - "from dotenv import load_dotenv\n", - "\n", + "from dotenv import load_dotenv" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "376e56d8", + "metadata": {}, + "outputs": [], + "source": [ "# Load environment variables\n", - "load_dotenv()\n", - "\n", - "# Import agno components\n", - "from agno.agent import Agent, RunResponse\n", - "from agno.team import Team\n", - "from agno.models.openai import OpenAIChat\n", - "from agno.workflow import Workflow\n", - "from agno.tools.duckduckgo import DuckDuckGoTools\n", - "from agno.tools.hackernews import HackerNewsTools\n", - "from agno.tools.reasoning import ReasoningTools\n", - "from agno.tools.arxiv import ArxivTools\n", - "from agno.tools.googlesearch import GoogleSearchTools\n", - "from agno.knowledge.url import UrlKnowledge\n", - "from agno.utils.pprint import pprint_run_response\n", - "from agno.utils.log import logger\n", - "from agno.vectordb.lancedb import LanceDb\n", - "from agno.vectordb.search import SearchType\n", - "from agno.embedder.cohere import CohereEmbedder\n", - "from agno.reranker.cohere import CohereReranker\n", + "load_dotenv()" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Environment Setup\n", "\n", - "import agentops\n", + "Now we'll load environment variables and import the Agno framework components. The order of imports is important - we import agno first to ensure proper instrumentation, then import specific classes we'll use.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f8fb8c25", + "metadata": {}, + "outputs": [], + "source": [ + "# Now import agno to trigger instrumentation, then import specific classes\n", + "from agno.agent import Agent, RunResponse \n", + "from agno.team import Team \n", + "from agno.models.openai import OpenAIChat \n", + "from agno.workflow import Workflow \n", + "from agno.tools.duckduckgo import DuckDuckGoTools \n", + "from agno.tools.hackernews import HackerNewsTools \n", + "from agno.tools.reasoning import ReasoningTools \n", + "from agno.tools.arxiv import ArxivTools \n", + "from agno.tools.googlesearch import GoogleSearchTools \n", + "from agno.knowledge.url import UrlKnowledge \n", + "from agno.utils.pprint import pprint_run_response \n", + "from agno.utils.log import logger \n", + "from agno.vectordb.lancedb import LanceDb \n", + "from agno.vectordb.search import SearchType \n", + "from agno.embedder.cohere import CohereEmbedder \n", + "from agno.reranker.cohere import CohereReranker\n", + "import agentops \n", + " " + ] + }, + { + "cell_type": "raw", + "id": "c8b9232b", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Initialize AgentOps\n", "\n", - "# Initialize AgentOps\n", + "Let's initialize AgentOps to start tracking our agent activities. Make sure you have your AGENTOPS_API_KEY set in your environment variables.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13c47f0c", + "metadata": {}, + "outputs": [], + "source": [ "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", - "\n", - "# Configuration\n", - "MODEL_ID = \"gpt-4o-mini\"\n" + "# Sample configuration\n", + "MODEL_ID = \"gpt-4o-mini\"" ] }, { "cell_type": "code", "execution_count": null, + "id": "129c88ab", "metadata": {}, "outputs": [], "source": [ @@ -68,14 +146,12 @@ " return False\n", "\n", " print(\"Environment variables checked successfully\")\n", - " return True\n", - "\n", - "# Check environment\n", - "check_environment()\n" + " return True" ] }, { "cell_type": "raw", + "id": "eb9628b8", "metadata": { "vscode": { "languageId": "raw" @@ -84,12 +160,18 @@ "source": [ "## 1. Basic Agents and Teams\n", "\n", - "Let's start with creating individual agents and organizing them into teams.\n" + "Let's start with the fundamentals - creating individual agents and organizing them into teams. In Agno, agents are AI-powered entities with specific roles, and teams coordinate multiple agents to accomplish complex tasks.\n", + "\n", + "### Key Concepts:\n", + "- **Agent**: An individual AI entity with a specific role and model\n", + "- **Team**: A group of agents that can work together in different modes\n", + "- **Coordination Mode**: How agents interact (e.g., 'coordinate' mode)\n" ] }, { "cell_type": "code", "execution_count": null, + "id": "ccf4268c", "metadata": {}, "outputs": [], "source": [ @@ -115,28 +197,34 @@ " print(f\"Team Response: {response.content}\")\n", "\n", " except Exception as e:\n", - " print(f\"Basic agents error: {e}\")\n", - "\n", - "# Run the demonstration\n", - "demonstrate_basic_agents()\n" + " print(f\"Basic agents error: {e}\")" ] }, { "cell_type": "raw", + "id": "4f7feccc", "metadata": { "vscode": { "languageId": "raw" } }, "source": [ - "## 2. Tool Integration with RAG\n", + "## 2. Tool Integration with RAG (Retrieval-Augmented Generation)\n", "\n", - "This section demonstrates how to integrate tools with knowledge bases for Retrieval-Augmented Generation (RAG).\n" + "One of Agno's most powerful features is the ability to integrate tools and knowledge bases. This example shows how to create an agent with:\n", + "- **Knowledge Base**: Using URLs as sources of information\n", + "- **Vector Database**: LanceDB for efficient similarity search\n", + "- **Embeddings**: Cohere embeddings for semantic understanding\n", + "- **Reranking**: Improving search results with Cohere reranker\n", + "- **Reasoning Tools**: Adding logical reasoning capabilities\n", + "\n", + "This creates an agent that can search through documentation and provide informed answers with sources.\n" ] }, { "cell_type": "code", "execution_count": null, + "id": "3ff48560", "metadata": {}, "outputs": [], "source": [ @@ -155,9 +243,9 @@ " uri=\"tmp/lancedb\",\n", " table_name=\"agno_docs\",\n", " search_type=SearchType.hybrid,\n", - " embedder=CohereEmbedder(id=\"embed-v4.0\"),\n", - " reranker=CohereReranker(model=\"rerank-v3.5\"),\n", - " ),\n", + " embedder=CohereEmbedder(id=\"embed-v4.0\"), # noqa: E821\n", + " reranker=CohereReranker(model=\"rerank-v3.5\"), # noqa: E821\n", + " ), # noqa: E821\n", " )\n", "\n", " # Create agent with knowledge and reasoning tools\n", @@ -183,10 +271,7 @@ " )\n", "\n", " except Exception as e:\n", - " print(f\"Tool integration error: {e}\")\n", - "\n", - "# Run the demonstration\n", - "demonstrate_tool_integration()\n" + " print(f\"Tool integration error: {e}\")" ] }, { @@ -199,12 +284,19 @@ "source": [ "## 3. Workflows with Caching\n", "\n", - "This section demonstrates how to create workflows that cache previous outputs for improved performance.\n" + "Workflows in Agno allow you to create reusable, stateful processes. This example demonstrates a caching workflow that:\n", + "- Stores previous responses in session state\n", + "- Checks cache before running expensive operations\n", + "- Yields responses in a streaming fashion\n", + "- Improves performance by avoiding redundant computations\n", + "\n", + "This is particularly useful for applications where users might ask similar questions repeatedly.\n" ] }, { "cell_type": "code", "execution_count": null, + "id": "b4408138", "metadata": {}, "outputs": [], "source": [ @@ -231,9 +323,16 @@ " yield from self.agent.run(message, stream=True)\n", "\n", " # Cache the output after response is yielded\n", - " self.session_state[message] = self.agent.run_response.content\n", - "\n", - "\n", + " self.session_state[message] = self.agent.run_response.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f8e1e6c", + "metadata": {}, + "outputs": [], + "source": [ "def demonstrate_workflows():\n", " \"\"\"Demonstrate workflow capabilities with caching.\"\"\"\n", " print(\"\\n\" + \"=\" * 60)\n", @@ -256,10 +355,7 @@ " pprint_run_response(response, markdown=True, show_time=True)\n", "\n", " except Exception as e:\n", - " print(f\"Workflow error: {e}\")\n", - "\n", - "# Run the demonstration\n", - "demonstrate_workflows()\n" + " print(f\"Workflow error: {e}\")" ] }, { @@ -272,12 +368,27 @@ "source": [ "## 4. Collaborative Research Teams\n", "\n", - "This section demonstrates how to create teams of specialized research agents that collaborate on complex research tasks.\n" + "This is where Agno truly shines - creating teams of specialized agents that collaborate to solve complex problems. In this example, we create:\n", + "\n", + "### Specialized Agents:\n", + "- **Reddit Researcher**: Searches Reddit for community discussions\n", + "- **HackerNews Researcher**: Finds technical discussions on HackerNews\n", + "- **Academic Paper Researcher**: Searches scholarly articles and papers\n", + "- **Twitter Researcher**: Tracks real-time discussions and trends\n", + "\n", + "### Team Configuration:\n", + "- **Collaborate Mode**: Agents work together and discuss findings\n", + "- **Discussion Master**: The team lead that manages the conversation\n", + "- **Success Criteria**: Defines when the team has reached consensus\n", + "- **Agentic Context**: Agents maintain context throughout the discussion\n", + "\n", + "This demonstrates how multiple AI agents can work together like a human research team!\n" ] }, { "cell_type": "code", "execution_count": null, + "id": "db870fdb", "metadata": {}, "outputs": [], "source": [ @@ -385,14 +496,12 @@ " )\n", "\n", " except Exception as e:\n", - " print(f\"Research team error: {e}\")\n", - "\n", - "# Run the demonstration\n", - "demonstrate_research_team()\n" + " print(f\"Research team error: {e}\")" ] }, { "cell_type": "raw", + "id": "72f38f42", "metadata": { "vscode": { "languageId": "raw" @@ -401,12 +510,19 @@ "source": [ "## 5. Async Operations\n", "\n", - "This section demonstrates how to run multiple agent operations concurrently using async/await patterns.\n" + "Modern applications often need to run multiple AI tasks concurrently. Agno supports async operations out of the box, allowing you to:\n", + "- Run multiple agent tasks in parallel\n", + "- Improve response times for complex queries\n", + "- Handle multiple user requests simultaneously\n", + "- Build scalable AI applications\n", + "\n", + "This example shows how to run three different agent tasks concurrently using Python's asyncio.\n" ] }, { "cell_type": "code", "execution_count": null, + "id": "17e09033", "metadata": {}, "outputs": [], "source": [ @@ -442,34 +558,48 @@ " print()\n", "\n", " except Exception as e:\n", - " print(f\"Async operations error: {e}\")\n", - "\n", - "# Run the async demonstration\n", - "await demonstrate_async_operations()\n" + " print(f\"Async operations error: {e}\")" ] }, { "cell_type": "raw", + "id": "0df119bd", "metadata": { "vscode": { "languageId": "raw" } }, "source": [ - "## Complete Tutorial\n", - "\n", - "Run all demonstrations in sequence to see the full capabilities of Agno with AgentOps tracking.\n" + "## Running the Complete Tutorial\n", + "\n", + "The main function orchestrates all the demonstrations, showing how different Agno features work together. Each section is wrapped in error handling to ensure the tutorial continues even if one section encounters issues.\n", + "\n", + "### What Happens When You Run This:\n", + "1. Environment variables are checked\n", + "2. AgentOps begins tracking all agent activities\n", + "3. Each demonstration runs in sequence\n", + "4. Results are displayed in the console\n", + "5. All activities are logged to your AgentOps dashboard\n", + "\n", + "### Viewing Results in AgentOps:\n", + "After running this tutorial, visit your AgentOps dashboard to see:\n", + "- Agent conversation traces\n", + "- Tool usage analytics\n", + "- Performance metrics\n", + "- Error tracking\n", + "- Team collaboration patterns\n" ] }, { "cell_type": "code", "execution_count": null, + "id": "b617da0c", "metadata": {}, "outputs": [], "source": [ "async def main():\n", " \"\"\"Main function to run all Agno demonstrations.\"\"\"\n", - " print(\"Starting Comprehensive Agno Tutorial with AgentOps\")\n", + " print(\"Starting Comprehensive Agno Example with AgentOps\")\n", " print(\"=\" * 80)\n", "\n", " # Check environment\n", @@ -507,18 +637,65 @@ " print(f\"Skipping async demo due to: {e}\")\n", "\n", " print(\"\\nAll Agno demonstrations completed!\")\n", - " print(\"Check your AgentOps dashboard for detailed traces and metrics.\")\n", - "\n", - "# Uncomment the line below to run the complete tutorial\n", - "# await main()\n" + " print(\"Check your AgentOps dashboard for detailed traces and metrics.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5da195a4", + "metadata": {}, + "outputs": [], + "source": [ + "if __name__ == \"__main__\":\n", + " asyncio.run(main())" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Summary and Next Steps\n", + "\n", + "Congratulations! You've explored the key features of the Agno framework:\n", + "\n", + "### What We Covered:\n", + "- ✅ Creating and coordinating basic agents and teams\n", + "- ✅ Integrating tools and knowledge bases with RAG\n", + "- ✅ Building efficient workflows with caching\n", + "- ✅ Creating collaborative research teams with specialized agents\n", + "- ✅ Running agents asynchronously for better performance\n", + "\n", + "### Next Steps:\n", + "1. **Experiment with Different Models**: Try using different LLMs (GPT-4, Claude, etc.)\n", + "2. **Add More Tools**: Explore Agno's extensive tool library\n", + "3. **Build Custom Workflows**: Create workflows for your specific use cases\n", + "4. **Scale Your Teams**: Add more agents with specialized roles\n", + "5. **Monitor with AgentOps**: Use the dashboard to optimize your agents\n", + "\n", + "### Resources:\n", + "- [Agno Documentation](https://docs.agno.com)\n", + "- [AgentOps Dashboard](https://app.agentops.ai)\n", + "- [More Examples](https://github.com/agno-ai/agno/tree/main/examples)\n", + "\n", + "Happy building with Agno! 🚀\n" ] } ], "metadata": { + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" + }, "language_info": { "name": "python" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } From 2a7f5facc68869141854bd4f54ab19f771873ecf Mon Sep 17 00:00:00 2001 From: fenilfaldu Date: Thu, 12 Jun 2025 20:52:38 +0530 Subject: [PATCH 08/14] v2 docs updated --- docs/v2/examples/agno.mdx | 586 +++++-------------- docs/v2/integrations/agno.mdx | 20 +- examples/agno/agno_async_operations.ipynb | 243 ++++++++ examples/agno/agno_async_operations.py | 146 +++++ examples/agno/agno_basic_agents.ipynb | 328 +++++++++++ examples/agno/agno_basic_agents.py | 155 +++++ examples/agno/agno_comprehensive_tutorial.py | 38 +- examples/agno/agno_research_team.ipynb | 442 ++++++++++++++ examples/agno/agno_research_team.py | 249 ++++++++ examples/agno/agno_tool_integrations.ipynb | 443 ++++++++++++++ examples/agno/agno_tool_integrations.py | 203 +++++++ examples/agno/agno_workflow_setup.ipynb | 446 ++++++++++++++ examples/agno/agno_workflow_setup.py | 215 +++++++ 13 files changed, 3058 insertions(+), 456 deletions(-) create mode 100644 examples/agno/agno_async_operations.ipynb create mode 100644 examples/agno/agno_async_operations.py create mode 100644 examples/agno/agno_basic_agents.ipynb create mode 100644 examples/agno/agno_basic_agents.py create mode 100644 examples/agno/agno_research_team.ipynb create mode 100644 examples/agno/agno_research_team.py create mode 100644 examples/agno/agno_tool_integrations.ipynb create mode 100644 examples/agno/agno_tool_integrations.py create mode 100644 examples/agno/agno_workflow_setup.ipynb create mode 100644 examples/agno/agno_workflow_setup.py diff --git a/docs/v2/examples/agno.mdx b/docs/v2/examples/agno.mdx index bc83dcaaa..42963263f 100644 --- a/docs/v2/examples/agno.mdx +++ b/docs/v2/examples/agno.mdx @@ -1,83 +1,81 @@ --- title: 'Agno' -description: 'Agno Framework Comprehensive Tutorial with AgentOps' +description: 'Basic Agents and Teams with Agno' --- -{/* SOURCE_FILE: examples/agno/agno_comprehensive_tutorial.ipynb */} +{/* SOURCE_FILE: examples/agno/agno_basic_agents.ipynb */} -_View Notebook on Github_ +_View Notebook on Github_ -# Agno Framework Comprehensive Tutorial with AgentOps +# Basic Agents and Teams with Agno -This tutorial demonstrates the powerful capabilities of the Agno framework for building AI agents and teams. Agno provides a flexible and intuitive way to create intelligent agents that can collaborate, use tools, and perform complex tasks. +This notebook demonstrates the fundamentals of creating AI agents and organizing them into collaborative teams using the Agno framework. -## What You'll Learn +## Overview -1. **Basic Agents and Teams** - Creating individual agents and coordinating them in teams -2. **Tool Integration with RAG** - Integrating tools and knowledge bases for enhanced capabilities -3. **Workflows with Caching** - Building efficient workflows with state management -4. **Collaborative Research Teams** - Creating specialized agents that work together -5. **Async Operations** - Running agents asynchronously for better performance +In this example, you'll learn how to: +- **Create specialized AI agents** with specific roles and expertise +- **Organize agents into teams** for collaborative problem-solving +- **Use coordination modes** for effective agent communication +- **Monitor agent interactions** with AgentOps integration -Throughout this tutorial, AgentOps will track all agent activities, providing detailed insights into your AI system's behavior. -## Getting Started +## Key Concepts -First, let's import the necessary libraries and set up our environment. We'll need Agno for building agents and AgentOps for monitoring and tracking. +### Agents +Individual AI entities with specific roles and capabilities. Each agent can be assigned a particular area of expertise, making them specialists in their domain. +### Teams +Collections of agents that work together to solve complex tasks. Teams can coordinate their responses, share information, and delegate tasks based on each agent's expertise. -```python -import os -import asyncio -from typing import Iterator -from textwrap import dedent -from dotenv import load_dotenv -``` +### Coordination Modes +Different strategies for how agents within a team interact and collaborate. The "coordinate" mode enables intelligent task routing and information sharing. +## Pre-requisites -```python -# Load environment variables -load_dotenv() +Before running this notebook, ensure you have: +- **AgentOps API key** from [AgentOps](https://agentops.ai) +- **OpenAI API key** from [OpenAI](https://openai.com) + +Create a `.env` file in your project root with: ``` -## Environment Setup +AGENTOPS_API_KEY=your_agentops_key_here +OPENAI_API_KEY=your_openai_key_here +``` +## Implementation -Now we'll load environment variables and import the Agno framework components. The order of imports is important - we import agno first to ensure proper instrumentation, then import specific classes we'll use. +Let's start by importing the necessary libraries and setting up our environment. ```python -# Now import agno to trigger instrumentation, then import specific classes -from agno.agent import Agent, RunResponse -from agno.team import Team -from agno.models.openai import OpenAIChat -from agno.workflow import Workflow -from agno.tools.duckduckgo import DuckDuckGoTools -from agno.tools.hackernews import HackerNewsTools -from agno.tools.reasoning import ReasoningTools -from agno.tools.arxiv import ArxivTools -from agno.tools.googlesearch import GoogleSearchTools -from agno.knowledge.url import UrlKnowledge -from agno.utils.pprint import pprint_run_response -from agno.utils.log import logger -from agno.vectordb.lancedb import LanceDb -from agno.vectordb.search import SearchType -from agno.embedder.cohere import CohereEmbedder -from agno.reranker.cohere import CohereReranker -import agentops - -``` -## Initialize AgentOps - -Let's initialize AgentOps to start tracking our agent activities. Make sure you have your AGENTOPS_API_KEY set in your environment variables. +import os +from agno.agent import Agent +from agno.team import Team +from agno.models.openai import OpenAIChat +import asyncio +import agentops +from dotenv import load_dotenv +# Load environment variables from .env file +load_dotenv() -```python +# Initialize AgentOps for monitoring and analytics agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) -# Sample configuration -MODEL_ID = "gpt-4o-mini" + +# Configuration +MODEL_ID = "gpt-4o-mini" ``` +## Environment Validation + +Before we create our agents, let's ensure all required API keys are properly configured: ```python def check_environment(): - """Check if required environment variables are set.""" + """ + Verify that all required API keys are properly configured. + + Returns: + bool: True if all required environment variables are set + """ required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] missing_vars = [var for var in required_vars if not os.getenv(var)] @@ -86,430 +84,148 @@ def check_environment(): print("Please set these in your .env file or environment") return False - print("Environment variables checked successfully") + print("✓ Environment variables checked successfully") return True ``` -## 1. Basic Agents and Teams +## Creating Agents and Teams -Let's start with the fundamentals - creating individual agents and organizing them into teams. In Agno, agents are AI-powered entities with specific roles, and teams coordinate multiple agents to accomplish complex tasks. +Now let's create our specialized agents and organize them into a collaborative team: -### Key Concepts: -- **Agent**: An individual AI entity with a specific role and model -- **Team**: A group of agents that can work together in different modes -- **Coordination Mode**: How agents interact (e.g., 'coordinate' mode) +### Step 1: Create Individual Agents +We'll create two agents with different specializations: +- **News Agent**: Specializes in gathering and analyzing news +- **Weather Agent**: Specializes in weather forecasting and analysis +### Step 2: Form a Team +We'll combine these agents into a team using the "coordinate" mode, which enables: +- Intelligent task routing based on agent expertise +- Information sharing between agents +- Collaborative problem-solving -```python -def demonstrate_basic_agents(): - """Demonstrate basic agent creation and team coordination.""" - print("\n" + "=" * 60) - print("BASIC AGENTS AND TEAMS") - print("=" * 60) - - try: - # Create individual agents - news_agent = Agent(name="News Agent", role="Get the latest news", model=OpenAIChat(id=MODEL_ID)) - - weather_agent = Agent( - name="Weather Agent", role="Get the weather for the next 7 days", model=OpenAIChat(id=MODEL_ID) - ) - - # Create a team with coordination mode - team = Team(name="News and Weather Team", mode="coordinate", members=[news_agent, weather_agent]) - - # Run team task - response = team.run("What is the weather in Tokyo?") - print(f"Team Response: {response.content}") - - except Exception as e: - print(f"Basic agents error: {e}") -``` -## 2. Tool Integration with RAG (Retrieval-Augmented Generation) - -One of Agno's most powerful features is the ability to integrate tools and knowledge bases. This example shows how to create an agent with: -- **Knowledge Base**: Using URLs as sources of information -- **Vector Database**: LanceDB for efficient similarity search -- **Embeddings**: Cohere embeddings for semantic understanding -- **Reranking**: Improving search results with Cohere reranker -- **Reasoning Tools**: Adding logical reasoning capabilities - -This creates an agent that can search through documentation and provide informed answers with sources. +### Step 3: Execute Tasks +The team will automatically delegate tasks to the most appropriate agent(s) based on the query. ```python -def demonstrate_tool_integration(): - """Demonstrate tool integration with RAG and knowledge base.""" - print("\n" + "=" * 60) - print("TOOL INTEGRATION WITH RAG") - print("=" * 60) - - try: - # Create knowledge base with vector database - knowledge_base = UrlKnowledge( - urls=["https://docs.agno.com/introduction/agents.md"], - # Use LanceDB as the vector database, store embeddings in the `agno_docs` table - vector_db=LanceDb( - uri="tmp/lancedb", - table_name="agno_docs", - search_type=SearchType.hybrid, - embedder=CohereEmbedder(id="embed-v4.0"), # noqa: E821 - reranker=CohereReranker(model="rerank-v3.5"), # noqa: E821 - ), # noqa: E821 - ) - - # Create agent with knowledge and reasoning tools - agent = Agent( - model=OpenAIChat(id=MODEL_ID), - # Agentic RAG is enabled by default when `knowledge` is provided to the Agent. - knowledge=knowledge_base, - # search_knowledge=True gives the Agent the ability to search on demand - search_knowledge=True, - tools=[ReasoningTools(add_instructions=True)], - instructions=[ - "Include sources in your response.", - "Always search your knowledge before answering the question.", - "Only include the output in your response. No other text.", - ], - markdown=True, - ) - - print("Running RAG agent with knowledge base...") - agent.print_response( - "What are Agents?", - show_full_reasoning=True, - ) - - except Exception as e: - print(f"Tool integration error: {e}") -``` -## 3. Workflows with Caching - -Workflows in Agno allow you to create reusable, stateful processes. This example demonstrates a caching workflow that: -- Stores previous responses in session state -- Checks cache before running expensive operations -- Yields responses in a streaming fashion -- Improves performance by avoiding redundant computations - -This is particularly useful for applications where users might ask similar questions repeatedly. - - -```python -class CacheWorkflow(Workflow): - """A workflow that demonstrates caching capabilities.""" - - # Purely descriptive, not used by the workflow - description: str = "A workflow that caches previous outputs" - - # Add agents or teams as attributes on the workflow - agent = Agent(model=OpenAIChat(id=MODEL_ID)) - - # Write the logic in the `run()` method - def run(self, message: str) -> Iterator[RunResponse]: - logger.info(f"Checking cache for '{message}'") - # Check if the output is already cached - if self.session_state.get(message): - logger.info(f"Cache hit for '{message}'") - yield RunResponse(run_id=self.run_id, content=self.session_state.get(message)) - return - - logger.info(f"Cache miss for '{message}'") - # Run the agent and yield the response - yield from self.agent.run(message, stream=True) - - # Cache the output after response is yielded - self.session_state[message] = self.agent.run_response.content -``` - - -```python -def demonstrate_workflows(): - """Demonstrate workflow capabilities with caching.""" - print("\n" + "=" * 60) - print("WORKFLOWS WITH CACHING") - print("=" * 60) - - try: - workflow = CacheWorkflow() - - print("First run (cache miss):") - # Run workflow (this takes ~1s) - response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") - # Print the response - pprint_run_response(response, markdown=True, show_time=True) - - print("\nSecond run (cache hit):") - # Run workflow again (this is immediate because of caching) - response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") - # Print the response - pprint_run_response(response, markdown=True, show_time=True) - - except Exception as e: - print(f"Workflow error: {e}") -``` -## 4. Collaborative Research Teams - -This is where Agno truly shines - creating teams of specialized agents that collaborate to solve complex problems. In this example, we create: - -### Specialized Agents: -- **Reddit Researcher**: Searches Reddit for community discussions -- **HackerNews Researcher**: Finds technical discussions on HackerNews -- **Academic Paper Researcher**: Searches scholarly articles and papers -- **Twitter Researcher**: Tracks real-time discussions and trends - -### Team Configuration: -- **Collaborate Mode**: Agents work together and discuss findings -- **Discussion Master**: The team lead that manages the conversation -- **Success Criteria**: Defines when the team has reached consensus -- **Agentic Context**: Agents maintain context throughout the discussion - -This demonstrates how multiple AI agents can work together like a human research team! - - -```python -def demonstrate_research_team(): - """Demonstrate collaborative research team with multiple specialized agents.""" +def demonstrate_basic_agents(): + """ + Demonstrate basic agent creation and team coordination. + + This function shows how to: + 1. Create specialized agents with specific roles + 2. Organize agents into a team + 3. Use the team to solve tasks that require multiple perspectives + """ print("\n" + "=" * 60) - print("COLLABORATIVE RESEARCH TEAM") + print("BASIC AGENTS AND TEAMS DEMONSTRATION") print("=" * 60) try: - # Create specialized research agents - reddit_researcher = Agent( - name="Reddit Researcher", - role="Research a topic on Reddit", - model=OpenAIChat(id="gpt-4o"), - tools=[GoogleSearchTools()], - add_name_to_instructions=True, - instructions=dedent( - """ - You are a Reddit researcher. - You will be given a topic to research on Reddit. - You will need to find the most relevant posts on Reddit. - """ - ), + # Create individual agents with specific roles + # Each agent has a name and a role that defines its expertise + + print("\n1. Creating specialized agents...") + + # News Agent: Specializes in gathering and analyzing news information + news_agent = Agent( + name="News Agent", + role="Get the latest news and provide news analysis", + model=OpenAIChat(id=MODEL_ID) ) + print(" ✓ News Agent created") - hackernews_researcher = Agent( - name="HackerNews Researcher", - model=OpenAIChat("gpt-4o"), - role="Research a topic on HackerNews.", - tools=[HackerNewsTools()], - add_name_to_instructions=True, - instructions=dedent( - """ - You are a HackerNews researcher. - You will be given a topic to research on HackerNews. - You will need to find the most relevant posts on HackerNews. - """ - ), - ) - - academic_paper_researcher = Agent( - name="Academic Paper Researcher", - model=OpenAIChat("gpt-4o"), - role="Research academic papers and scholarly content", - tools=[GoogleSearchTools(), ArxivTools()], - add_name_to_instructions=True, - instructions=dedent( - """ - You are an academic paper researcher. - You will be given a topic to research in academic literature. - You will need to find relevant scholarly articles, papers, and academic discussions. - Focus on peer-reviewed content and citations from reputable sources. - Provide brief summaries of key findings and methodologies. - """ - ), - ) - - twitter_researcher = Agent( - name="Twitter Researcher", - model=OpenAIChat("gpt-4o"), - role="Research trending discussions and real-time updates", - tools=[DuckDuckGoTools()], - add_name_to_instructions=True, - instructions=dedent( - """ - You are a Twitter/X researcher. - You will be given a topic to research on Twitter/X. - You will need to find trending discussions, influential voices, and real-time updates. - Focus on verified accounts and credible sources when possible. - Track relevant hashtags and ongoing conversations. - """ - ), - ) - - # Create collaborative team - agent_team = Team( - name="Discussion Team", - mode="collaborate", - model=OpenAIChat("gpt-4o"), - members=[ - reddit_researcher, - hackernews_researcher, - academic_paper_researcher, - twitter_researcher, - ], - instructions=[ - "You are a discussion master.", - "You have to stop the discussion when you think the team has reached a consensus.", - ], - success_criteria="The team has reached a consensus.", - enable_agentic_context=True, - add_context=True, - show_tool_calls=True, - markdown=True, - debug_mode=True, - show_members_responses=True, + # Weather Agent: Specializes in weather forecasting and analysis + weather_agent = Agent( + name="Weather Agent", + role="Get weather forecasts and provide weather analysis", + model=OpenAIChat(id=MODEL_ID) ) + print(" ✓ Weather Agent created") - print("Running collaborative research team...") - agent_team.print_response( - message="Start the discussion on the topic: 'What is the best way to learn to code?'", - stream=True, - stream_intermediate_steps=True, + # Create a team with coordination mode + # The "coordinate" mode allows agents to work together and share information + print("\n2. Creating a team with coordination capabilities...") + team = Team( + name="News and Weather Team", + mode="coordinate", # Agents will coordinate their responses + members=[news_agent, weather_agent] ) + print(" ✓ Team created with 2 agents") - except Exception as e: - print(f"Research team error: {e}") -``` -## 5. Async Operations - -Modern applications often need to run multiple AI tasks concurrently. Agno supports async operations out of the box, allowing you to: -- Run multiple agent tasks in parallel -- Improve response times for complex queries -- Handle multiple user requests simultaneously -- Build scalable AI applications - -This example shows how to run three different agent tasks concurrently using Python's asyncio. - - -```python -async def demonstrate_async_operations(): - """Demonstrate async operations with Agno agents.""" - print("\n" + "=" * 60) - print("ASYNC OPERATIONS") - print("=" * 60) - - try: - # Create async tasks with different agents - agent = Agent(model=OpenAIChat(id=MODEL_ID)) - - # Define async tasks - async def task1(): - response = await agent.arun("Explain Python in one paragraph") - return f"Task 1: {response.content}" - - async def task2(): - response = await agent.arun("Explain JavaScript in one paragraph") - return f"Task 2: {response.content}" - - async def task3(): - response = await agent.arun("Compare them briefly") - return f"Task 3: {response.content}" - - # Run tasks concurrently - print("Running async tasks concurrently...") - results = await asyncio.gather(task1(), task2(), task3()) + # Run a task that requires team coordination + # The team will automatically determine which agent(s) should respond + print("\n3. Running team task...") + print(" Query: 'What is the weather in Tokyo?'") + + response = team.run("What is the weather in Tokyo?") + + print("\n4. Team Response:") + print("-" * 60) + print(f"{response.content}") + print("-" * 60) - for result in results: - print(result) - print() except Exception as e: - print(f"Async operations error: {e}") + print(f"Error during basic agents demonstration: {e}") + print("This might be due to API issues or configuration problems") ``` -## Running the Complete Tutorial - -The main function orchestrates all the demonstrations, showing how different Agno features work together. Each section is wrapped in error handling to ensure the tutorial continues even if one section encounters issues. - -### What Happens When You Run This: -1. Environment variables are checked -2. AgentOps begins tracking all agent activities -3. Each demonstration runs in sequence -4. Results are displayed in the console -5. All activities are logged to your AgentOps dashboard +## Running the Demo -### Viewing Results in AgentOps: -After running this tutorial, visit your AgentOps dashboard to see: -- Agent conversation traces -- Tool usage analytics -- Performance metrics -- Error tracking -- Team collaboration patterns +Let's execute our main function to see the agents and teams in action: ```python async def main(): - """Main function to run all Agno demonstrations.""" - print("Starting Comprehensive Agno Example with AgentOps") - print("=" * 80) - - # Check environment + """ + Main function that orchestrates the demonstration. + + This async function handles: + - Environment validation + - Running the basic agents demonstration + - Error handling and user feedback + """ + print("Welcome to Agno Basic Agents Demo") + print("This demo shows how to create and coordinate AI agents") + print() + + # Validate environment setup if not check_environment(): + print("Cannot proceed without proper API configuration") return - # Run all demonstrations - print("\nRunning all Agno demonstrations...") + # Run demonstrations + print("\nStarting demonstrations...") - # Research teams + # Basic agents and teams demonstration try: - demonstrate_research_team() + demonstrate_basic_agents() + print("\n✓ Demo completed successfully!") + print("\nKey Takeaways:") + print("- Agents can have specialized roles and expertise") + print("- Teams enable multiple agents to collaborate on tasks") + print("- Coordination mode allows intelligent task delegation") + print("- AgentOps tracks all agent interactions for monitoring") + except Exception as e: - print(f"Skipping research team demo due to: {e}") - - # Basic functionality - demonstrate_basic_agents() - - # Tool integration - try: - demonstrate_tool_integration() - except Exception as e: - print(f"Skipping tool integration demo due to: {e}") - - # Workflows - try: - demonstrate_workflows() - except Exception as e: - print(f"Skipping workflow demo due to: {e}") - - # Async operations - try: - await demonstrate_async_operations() - except Exception as e: - print(f"Skipping async demo due to: {e}") - - print("\nAll Agno demonstrations completed!") - print("Check your AgentOps dashboard for detailed traces and metrics.") + print(f"Demo failed: {e}") + print("Please check your API keys and network connection") ``` +## Execute the Demo + +Run the cell below to see how agents collaborate within a team: ```python if __name__ == "__main__": + """ + Entry point for the script. + + Uses asyncio to run the main function, preparing for future + async operations and maintaining consistency with other examples. + """ asyncio.run(main()) ``` -## Summary and Next Steps - -Congratulations! You've explored the key features of the Agno framework: - -### What We Covered: -- ✅ Creating and coordinating basic agents and teams -- ✅ Integrating tools and knowledge bases with RAG -- ✅ Building efficient workflows with caching -- ✅ Creating collaborative research teams with specialized agents -- ✅ Running agents asynchronously for better performance - -### Next Steps: -1. **Experiment with Different Models**: Try using different LLMs (GPT-4, Claude, etc.) -2. **Add More Tools**: Explore Agno's extensive tool library -3. **Build Custom Workflows**: Create workflows for your specific use cases -4. **Scale Your Teams**: Add more agents with specialized roles -5. **Monitor with AgentOps**: Use the dashboard to optimize your agents - -### Resources: -- [Agno Documentation](https://docs.agno.com) -- [AgentOps Dashboard](https://app.agentops.ai) -Happy building with Agno! 🚀 diff --git a/docs/v2/integrations/agno.mdx b/docs/v2/integrations/agno.mdx index 039fefa7b..6adf71348 100644 --- a/docs/v2/integrations/agno.mdx +++ b/docs/v2/integrations/agno.mdx @@ -132,8 +132,24 @@ Once your Agno agents are running with AgentOps, you can monitor them in the [Ag ## Examples - - Comprehensive tutorial covering agents, teams, workflows, and RAG + + Learn the fundamentals of creating AI agents and organizing them into collaborative teams + + + + Execute multiple AI tasks concurrently for improved performance using asyncio + + + + Build sophisticated multi-agent teams with specialized tools for comprehensive research + + + + Implement Retrieval-Augmented Generation with vector databases and knowledge bases + + + + Create custom workflows with intelligent caching for optimized agent performance diff --git a/examples/agno/agno_async_operations.ipynb b/examples/agno/agno_async_operations.ipynb new file mode 100644 index 000000000..aedb041a7 --- /dev/null +++ b/examples/agno/agno_async_operations.ipynb @@ -0,0 +1,243 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a35d851b", + "metadata": {}, + "source": [ + "\n", + "# Async Operations with Agno\n", + "\n", + "This notebook demonstrates how to leverage asynchronous programming with Agno agents to execute multiple AI tasks concurrently, significantly improving performance and efficiency.\n", + "\n", + "## Overview\n", + "\n", + "This implementation showcases:\n", + "- **Agno** for creating and managing AI agents\n", + "- **Asyncio** for concurrent task execution\n", + "- **AgentOps** for monitoring and tracking AI operations\n", + "\n", + "By using async operations, you can run multiple AI queries simultaneously instead of waiting for each one to complete sequentially. This is particularly beneficial when dealing with I/O-bound operations like API calls to AI models.\n", + "\n", + "## Pre-requisites\n", + "\n", + "Before running this notebook, ensure you have:\n", + "- **AgentOps API key** from [AgentOps](https://agentops.ai)\n", + "- **OpenAI API key** from [OpenAI](https://openai.com)\n", + "\n", + "## Setup\n", + "\n", + "Create a `.env` file in your project root with:\n", + "```\n", + "AGENTOPS_API_KEY=your_agentops_key_here\n", + "OPENAI_API_KEY=your_openai_key_here\n", + "```\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Implementation Details\n", + "\n", + "This notebook demonstrates a practical example of concurrent AI operations where we:\n", + "\n", + "1. **Initialize an Agno agent** with OpenAI's GPT-4o-mini model\n", + "2. **Create multiple async tasks** that query the AI about different programming languages\n", + "3. **Compare performance** between concurrent and sequential execution\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fe7d8b83", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from agno.agent import Agent\n", + "from agno.team import Team\n", + "from agno.models.openai import OpenAIChat\n", + "import asyncio # For concurrent task execution\n", + "import agentops # For tracking AI operations and analytics\n", + "from dotenv import load_dotenv\n", + "\n", + "# Load environment variables from .env file\n", + "load_dotenv()\n", + "\n", + "# Initialize AgentOps for monitoring AI usage, costs, and performance\n", + "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", + "\n", + "# Configuration\n", + "MODEL_ID = \"gpt-4o-mini\" " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b6607818", + "metadata": {}, + "outputs": [], + "source": [ + "def check_environment():\n", + " \"\"\"\n", + " Validate that required API keys are properly configured.\n", + " \n", + " Returns:\n", + " bool: True if all required environment variables are set\n", + " \"\"\"\n", + " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\"]\n", + " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", + "\n", + " if missing_vars:\n", + " print(f\"Missing required environment variables: {missing_vars}\")\n", + " print(\"Please configure these in your .env file:\")\n", + " for var in missing_vars:\n", + " print(f\" {var}=your_key_here\")\n", + " return False\n", + "\n", + " print(\"✓ Environment variables configured successfully\")\n", + " return True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ca0f1a8a", + "metadata": {}, + "outputs": [], + "source": [ + "async def demonstrate_async_operations():\n", + " \"\"\"\n", + " Demonstrate concurrent execution of multiple AI agent tasks.\n", + " \n", + " This function creates multiple async tasks that execute concurrently rather than sequentially.\n", + " Each task makes an independent API call to the AI model, and asyncio.gather() \n", + " waits for all tasks to complete before returning results.\n", + " \n", + " Performance benefit: Instead of 3 sequential calls taking ~90 seconds total,\n", + " concurrent execution typically completes in ~30 seconds.\n", + " \"\"\"\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(\"CONCURRENT AI OPERATIONS DEMO\")\n", + " print(\"=\" * 60)\n", + "\n", + " try:\n", + " # Initialize AI agent with specified model\n", + " print(\"Initializing AI agent...\")\n", + " agent = Agent(model=OpenAIChat(id=MODEL_ID))\n", + " print(\"✓ Agent ready\")\n", + "\n", + " # Define async task functions\n", + " # Each function is a coroutine that can be executed concurrently\n", + " \n", + " async def task1():\n", + " \"\"\"Query AI about Python programming language.\"\"\"\n", + " print(\"→ Starting Python explanation task...\")\n", + " response = await agent.arun(\"Explain Python programming language in one paragraph\")\n", + " return f\"Python: {response.content}\"\n", + "\n", + " async def task2():\n", + " \"\"\"Query AI about JavaScript programming language.\"\"\"\n", + " print(\"→ Starting JavaScript explanation task...\")\n", + " response = await agent.arun(\"Explain JavaScript programming language in one paragraph\")\n", + " return f\"JavaScript: {response.content}\"\n", + "\n", + " async def task3():\n", + " \"\"\"Query AI for comparison between programming languages.\"\"\"\n", + " print(\"→ Starting comparison task...\")\n", + " response = await agent.arun(\"Compare Python and JavaScript briefly\")\n", + " return f\"Comparison: {response.content}\"\n", + "\n", + " # Execute all tasks concurrently using asyncio.gather()\n", + " # This is the key to async performance - tasks run simultaneously\n", + " print(\"\\nExecuting tasks concurrently...\")\n", + " results = await asyncio.gather(task1(), task2(), task3())\n", + "\n", + " # Process and display results\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(\"RESULTS\")\n", + " print(\"=\" * 60)\n", + " \n", + " for i, result in enumerate(results, 1):\n", + " print(f\"\\nTask {i} Result:\")\n", + " print(result)\n", + " print(\"-\" * 50)\n", + "\n", + " except Exception as e:\n", + " print(f\"Error during async operations: {e}\")\n", + " print(\"This may be due to API rate limits, network issues, or authentication problems\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c0c03fe4", + "metadata": {}, + "outputs": [], + "source": [ + "async def main():\n", + " \"\"\"\n", + " Main async function that orchestrates the demonstration.\n", + " \n", + " Handles environment validation and executes the async operations demo\n", + " with proper error handling and user feedback.\n", + " \"\"\"\n", + " print(\"Agno Async Operations Demonstration\")\n", + " print(\"Showcasing concurrent AI task execution for improved performance\")\n", + " print()\n", + " \n", + " # Validate environment setup\n", + " if not check_environment():\n", + " print(\"Cannot proceed without proper API configuration\")\n", + " return\n", + "\n", + " print(\"\\nStarting async operations demo...\")\n", + "\n", + " # Execute async operations demonstration\n", + " try:\n", + " await demonstrate_async_operations()\n", + " print(\"\\n✓ Demo completed successfully\")\n", + " print(\"Note: Observe the performance improvement compared to sequential execution\")\n", + " \n", + " except Exception as e:\n", + " print(f\"Demo execution failed: {e}\")\n", + " print(\"Check your API keys, rate limits, and network connectivity\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00a4f206", + "metadata": {}, + "outputs": [], + "source": [ + "if __name__ == \"__main__\":\n", + " \"\"\"\n", + " Entry point for the script.\n", + " \n", + " Uses asyncio.run() to execute the main async function and handle\n", + " the async event loop lifecycle automatically.\n", + " \"\"\"\n", + " asyncio.run(main())" + ] + } + ], + "metadata": { + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/agno/agno_async_operations.py b/examples/agno/agno_async_operations.py new file mode 100644 index 000000000..298ca90bc --- /dev/null +++ b/examples/agno/agno_async_operations.py @@ -0,0 +1,146 @@ +""" +Async Operations with Agno + +This script demonstrates concurrent execution of multiple AI agent tasks using Python's asyncio. +Instead of sequential execution where each task waits for the previous one to complete, +async operations allow multiple tasks to run concurrently, significantly improving performance +when dealing with I/O-bound operations like API calls to AI models. +""" + +import os +from agno.agent import Agent +from agno.team import Team +from agno.models.openai import OpenAIChat +import asyncio # For concurrent task execution +import agentops # For tracking AI operations and analytics +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + +# Initialize AgentOps for monitoring AI usage, costs, and performance +agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) + +# Configuration +MODEL_ID = "gpt-4o-mini" # Cost-effective OpenAI model suitable for most tasks + +def check_environment(): + """ + Validate that required API keys are properly configured. + + Returns: + bool: True if all required environment variables are set + """ + required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] + missing_vars = [var for var in required_vars if not os.getenv(var)] + + if missing_vars: + print(f"Missing required environment variables: {missing_vars}") + print("Please configure these in your .env file:") + for var in missing_vars: + print(f" {var}=your_key_here") + return False + + print("✓ Environment variables configured successfully") + return True + + +async def demonstrate_async_operations(): + """ + Demonstrate concurrent execution of multiple AI agent tasks. + + This function creates multiple async tasks that execute concurrently rather than sequentially. + Each task makes an independent API call to the AI model, and asyncio.gather() + waits for all tasks to complete before returning results. + + Performance benefit: Instead of 3 sequential calls taking ~90 seconds total, + concurrent execution typically completes in ~30 seconds. + """ + print("\n" + "=" * 60) + print("CONCURRENT AI OPERATIONS DEMO") + print("=" * 60) + + try: + # Initialize AI agent with specified model + print("Initializing AI agent...") + agent = Agent(model=OpenAIChat(id=MODEL_ID)) + print("✓ Agent ready") + + # Define async task functions + # Each function is a coroutine that can be executed concurrently + + async def task1(): + """Query AI about Python programming language.""" + print("→ Starting Python explanation task...") + response = await agent.arun("Explain Python programming language in one paragraph") + return f"Python: {response.content}" + + async def task2(): + """Query AI about JavaScript programming language.""" + print("→ Starting JavaScript explanation task...") + response = await agent.arun("Explain JavaScript programming language in one paragraph") + return f"JavaScript: {response.content}" + + async def task3(): + """Query AI for comparison between programming languages.""" + print("→ Starting comparison task...") + response = await agent.arun("Compare Python and JavaScript briefly") + return f"Comparison: {response.content}" + + # Execute all tasks concurrently using asyncio.gather() + # This is the key to async performance - tasks run simultaneously + print("\nExecuting tasks concurrently...") + results = await asyncio.gather(task1(), task2(), task3()) + + # Process and display results + print("\n" + "=" * 60) + print("RESULTS") + print("=" * 60) + + for i, result in enumerate(results, 1): + print(f"\nTask {i} Result:") + print(result) + print("-" * 50) + + except Exception as e: + print(f"Error during async operations: {e}") + print("This may be due to API rate limits, network issues, or authentication problems") + + +async def main(): + """ + Main async function that orchestrates the demonstration. + + Handles environment validation and executes the async operations demo + with proper error handling and user feedback. + """ + print("Agno Async Operations Demonstration") + print("Showcasing concurrent AI task execution for improved performance") + print() + + # Validate environment setup + if not check_environment(): + print("Cannot proceed without proper API configuration") + return + + print("\nStarting async operations demo...") + + # Execute async operations demonstration + try: + await demonstrate_async_operations() + print("\n✓ Demo completed successfully") + print("Note: Observe the performance improvement compared to sequential execution") + + except Exception as e: + print(f"Demo execution failed: {e}") + print("Check your API keys, rate limits, and network connectivity") + + +if __name__ == "__main__": + """ + Entry point for the script. + + Uses asyncio.run() to execute the main async function and handle + the async event loop lifecycle automatically. + """ + asyncio.run(main()) diff --git a/examples/agno/agno_basic_agents.ipynb b/examples/agno/agno_basic_agents.ipynb new file mode 100644 index 000000000..af5240f4b --- /dev/null +++ b/examples/agno/agno_basic_agents.ipynb @@ -0,0 +1,328 @@ +{ + "cells": [ + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "# Basic Agents and Teams with Agno\n", + "\n", + "This notebook demonstrates the fundamentals of creating AI agents and organizing them into collaborative teams using the Agno framework.\n", + "\n", + "## Overview\n", + "\n", + "In this example, you'll learn how to:\n", + "- **Create specialized AI agents** with specific roles and expertise\n", + "- **Organize agents into teams** for collaborative problem-solving\n", + "- **Use coordination modes** for effective agent communication\n", + "- **Monitor agent interactions** with AgentOps integration\n", + "\n", + "## Key Concepts\n", + "\n", + "### Agents\n", + "Individual AI entities with specific roles and capabilities. Each agent can be assigned a particular area of expertise, making them specialists in their domain.\n", + "\n", + "### Teams\n", + "Collections of agents that work together to solve complex tasks. Teams can coordinate their responses, share information, and delegate tasks based on each agent's expertise.\n", + "\n", + "### Coordination Modes\n", + "Different strategies for how agents within a team interact and collaborate. The \"coordinate\" mode enables intelligent task routing and information sharing.\n", + "\n", + "## Pre-requisites\n", + "\n", + "Before running this notebook, ensure you have:\n", + "- **AgentOps API key** from [AgentOps](https://agentops.ai)\n", + "- **OpenAI API key** from [OpenAI](https://openai.com)\n", + "\n", + "Create a `.env` file in your project root with:\n", + "```\n", + "AGENTOPS_API_KEY=your_agentops_key_here\n", + "OPENAI_API_KEY=your_openai_key_here\n", + "```\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Implementation\n", + "\n", + "Let's start by importing the necessary libraries and setting up our environment.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39ad00cb", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from agno.agent import Agent\n", + "from agno.team import Team\n", + "from agno.models.openai import OpenAIChat\n", + "import asyncio\n", + "import agentops\n", + "from dotenv import load_dotenv\n", + "\n", + "# Load environment variables from .env file\n", + "load_dotenv()\n", + "\n", + "# Initialize AgentOps for monitoring and analytics\n", + "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", + "\n", + "# Configuration\n", + "MODEL_ID = \"gpt-4o-mini\" " + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Environment Validation\n", + "\n", + "Before we create our agents, let's ensure all required API keys are properly configured:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "439ff736", + "metadata": {}, + "outputs": [], + "source": [ + "def check_environment():\n", + " \"\"\"\n", + " Verify that all required API keys are properly configured.\n", + " \n", + " Returns:\n", + " bool: True if all required environment variables are set\n", + " \"\"\"\n", + " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\"]\n", + " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", + "\n", + " if missing_vars:\n", + " print(f\"Missing required environment variables: {missing_vars}\")\n", + " print(\"Please set these in your .env file or environment\")\n", + " return False\n", + "\n", + " print(\"✓ Environment variables checked successfully\")\n", + " return True" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Creating Agents and Teams\n", + "\n", + "Now let's create our specialized agents and organize them into a collaborative team:\n", + "\n", + "### Step 1: Create Individual Agents\n", + "We'll create two agents with different specializations:\n", + "- **News Agent**: Specializes in gathering and analyzing news\n", + "- **Weather Agent**: Specializes in weather forecasting and analysis\n", + "\n", + "### Step 2: Form a Team\n", + "We'll combine these agents into a team using the \"coordinate\" mode, which enables:\n", + "- Intelligent task routing based on agent expertise\n", + "- Information sharing between agents\n", + "- Collaborative problem-solving\n", + "\n", + "### Step 3: Execute Tasks\n", + "The team will automatically delegate tasks to the most appropriate agent(s) based on the query.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f872be2e", + "metadata": {}, + "outputs": [], + "source": [ + "def demonstrate_basic_agents():\n", + " \"\"\"\n", + " Demonstrate basic agent creation and team coordination.\n", + " \n", + " This function shows how to:\n", + " 1. Create specialized agents with specific roles\n", + " 2. Organize agents into a team\n", + " 3. Use the team to solve tasks that require multiple perspectives\n", + " \"\"\"\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(\"BASIC AGENTS AND TEAMS DEMONSTRATION\")\n", + " print(\"=\" * 60)\n", + "\n", + " try:\n", + " # Create individual agents with specific roles\n", + " # Each agent has a name and a role that defines its expertise\n", + " \n", + " print(\"\\n1. Creating specialized agents...\")\n", + " \n", + " # News Agent: Specializes in gathering and analyzing news information\n", + " news_agent = Agent(\n", + " name=\"News Agent\", \n", + " role=\"Get the latest news and provide news analysis\", \n", + " model=OpenAIChat(id=MODEL_ID)\n", + " )\n", + " print(\" ✓ News Agent created\")\n", + "\n", + " # Weather Agent: Specializes in weather forecasting and analysis\n", + " weather_agent = Agent(\n", + " name=\"Weather Agent\", \n", + " role=\"Get weather forecasts and provide weather analysis\", \n", + " model=OpenAIChat(id=MODEL_ID)\n", + " )\n", + " print(\" ✓ Weather Agent created\")\n", + "\n", + " # Create a team with coordination mode\n", + " # The \"coordinate\" mode allows agents to work together and share information\n", + " print(\"\\n2. Creating a team with coordination capabilities...\")\n", + " team = Team(\n", + " name=\"News and Weather Team\", \n", + " mode=\"coordinate\", # Agents will coordinate their responses\n", + " members=[news_agent, weather_agent]\n", + " )\n", + " print(\" ✓ Team created with 2 agents\")\n", + "\n", + " # Run a task that requires team coordination\n", + " # The team will automatically determine which agent(s) should respond\n", + " print(\"\\n3. Running team task...\")\n", + " print(\" Query: 'What is the weather in Tokyo?'\")\n", + " \n", + " response = team.run(\"What is the weather in Tokyo?\")\n", + " \n", + " print(\"\\n4. Team Response:\")\n", + " print(\"-\" * 60)\n", + " print(f\"{response.content}\")\n", + " print(\"-\" * 60)\n", + "\n", + "\n", + " except Exception as e:\n", + " print(f\"Error during basic agents demonstration: {e}\")\n", + " print(\"This might be due to API issues or configuration problems\")" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Running the Demo\n", + "\n", + "Let's execute our main function to see the agents and teams in action:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d7c1373b", + "metadata": {}, + "outputs": [], + "source": [ + "async def main():\n", + " \"\"\"\n", + " Main function that orchestrates the demonstration.\n", + " \n", + " This async function handles:\n", + " - Environment validation\n", + " - Running the basic agents demonstration\n", + " - Error handling and user feedback\n", + " \"\"\"\n", + " print(\"Welcome to Agno Basic Agents Demo\")\n", + " print(\"This demo shows how to create and coordinate AI agents\")\n", + " print()\n", + " \n", + " # Validate environment setup\n", + " if not check_environment():\n", + " print(\"Cannot proceed without proper API configuration\")\n", + " return\n", + "\n", + " # Run demonstrations\n", + " print(\"\\nStarting demonstrations...\")\n", + "\n", + " # Basic agents and teams demonstration\n", + " try:\n", + " demonstrate_basic_agents()\n", + " print(\"\\n✓ Demo completed successfully!\")\n", + " print(\"\\nKey Takeaways:\")\n", + " print(\"- Agents can have specialized roles and expertise\")\n", + " print(\"- Teams enable multiple agents to collaborate on tasks\")\n", + " print(\"- Coordination mode allows intelligent task delegation\")\n", + " print(\"- AgentOps tracks all agent interactions for monitoring\")\n", + " \n", + " except Exception as e:\n", + " print(f\"Demo failed: {e}\")\n", + " print(\"Please check your API keys and network connection\")" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Execute the Demo\n", + "\n", + "Run the cell below to see how agents collaborate within a team:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b51cb77", + "metadata": {}, + "outputs": [], + "source": [ + "if __name__ == \"__main__\":\n", + " \"\"\"\n", + " Entry point for the script.\n", + " \n", + " Uses asyncio to run the main function, preparing for future\n", + " async operations and maintaining consistency with other examples.\n", + " \"\"\"\n", + " asyncio.run(main())" + ] + }, + { + "cell_type": "raw", + "id": "0a05b8c0", + "metadata": {}, + "source": [ + "\n" + ] + } + ], + "metadata": { + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/agno/agno_basic_agents.py b/examples/agno/agno_basic_agents.py new file mode 100644 index 000000000..ea768a7e3 --- /dev/null +++ b/examples/agno/agno_basic_agents.py @@ -0,0 +1,155 @@ +""" +Basic Agents and Teams with Agno + +This example demonstrates the fundamentals of creating AI agents and organizing them into teams +using the Agno framework. You'll learn how to: +- Create individual agents with specific roles +- Combine agents into teams for collaborative problem-solving +- Use coordination modes for effective agent communication +""" + +import os +from agno.agent import Agent +from agno.team import Team +from agno.models.openai import OpenAIChat +import asyncio +import agentops +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + +# Initialize AgentOps for monitoring and analytics +agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) + +# Configuration +MODEL_ID = "gpt-4o-mini" # Using OpenAI's cost-effective model + +def check_environment(): + """ + Verify that all required API keys are properly configured. + + Returns: + bool: True if all required environment variables are set + """ + required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] + missing_vars = [var for var in required_vars if not os.getenv(var)] + + if missing_vars: + print(f"Missing required environment variables: {missing_vars}") + print("Please set these in your .env file or environment") + return False + + print("✓ Environment variables checked successfully") + return True + + +def demonstrate_basic_agents(): + """ + Demonstrate basic agent creation and team coordination. + + This function shows how to: + 1. Create specialized agents with specific roles + 2. Organize agents into a team + 3. Use the team to solve tasks that require multiple perspectives + """ + print("\n" + "=" * 60) + print("BASIC AGENTS AND TEAMS DEMONSTRATION") + print("=" * 60) + + try: + # Create individual agents with specific roles + # Each agent has a name and a role that defines its expertise + + print("\n1. Creating specialized agents...") + + # News Agent: Specializes in gathering and analyzing news information + news_agent = Agent( + name="News Agent", + role="Get the latest news and provide news analysis", + model=OpenAIChat(id=MODEL_ID) + ) + print(" ✓ News Agent created") + + # Weather Agent: Specializes in weather forecasting and analysis + weather_agent = Agent( + name="Weather Agent", + role="Get weather forecasts and provide weather analysis", + model=OpenAIChat(id=MODEL_ID) + ) + print(" ✓ Weather Agent created") + + # Create a team with coordination mode + # The "coordinate" mode allows agents to work together and share information + print("\n2. Creating a team with coordination capabilities...") + team = Team( + name="News and Weather Team", + mode="coordinate", # Agents will coordinate their responses + members=[news_agent, weather_agent] + ) + print(" ✓ Team created with 2 agents") + + # Run a task that requires team coordination + # The team will automatically determine which agent(s) should respond + print("\n3. Running team task...") + print(" Query: 'What is the weather in Tokyo?'") + + response = team.run("What is the weather in Tokyo?") + + print("\n4. Team Response:") + print("-" * 60) + print(f"{response.content}") + print("-" * 60) + + # The team intelligently routes the query to the Weather Agent + # since it's weather-related, demonstrating smart task delegation + + except Exception as e: + print(f"Error during basic agents demonstration: {e}") + print("This might be due to API issues or configuration problems") + + +async def main(): + """ + Main function that orchestrates the demonstration. + + This async function handles: + - Environment validation + - Running the basic agents demonstration + - Error handling and user feedback + """ + print("Welcome to Agno Basic Agents Demo") + print("This demo shows how to create and coordinate AI agents") + print() + + # Validate environment setup + if not check_environment(): + print("Cannot proceed without proper API configuration") + return + + # Run demonstrations + print("\nStarting demonstrations...") + + # Basic agents and teams demonstration + try: + demonstrate_basic_agents() + print("\n✓ Demo completed successfully!") + print("\nKey Takeaways:") + print("- Agents can have specialized roles and expertise") + print("- Teams enable multiple agents to collaborate on tasks") + print("- Coordination mode allows intelligent task delegation") + print("- AgentOps tracks all agent interactions for monitoring") + + except Exception as e: + print(f"Demo failed: {e}") + print("Please check your API keys and network connection") + + +if __name__ == "__main__": + """ + Entry point for the script. + + Uses asyncio to run the main function, preparing for future + async operations and maintaining consistency with other examples. + """ + asyncio.run(main()) diff --git a/examples/agno/agno_comprehensive_tutorial.py b/examples/agno/agno_comprehensive_tutorial.py index 032e3df33..41c0ff9d1 100644 --- a/examples/agno/agno_comprehensive_tutorial.py +++ b/examples/agno/agno_comprehensive_tutorial.py @@ -337,25 +337,25 @@ async def main(): print("\nRunning all Agno demonstrations...") # Research teams - try: - demonstrate_research_team() - except Exception as e: - print(f"Skipping research team demo due to: {e}") - - # Basic functionality - demonstrate_basic_agents() - - # Tool integration - try: - demonstrate_tool_integration() - except Exception as e: - print(f"Skipping tool integration demo due to: {e}") - - # Workflows - try: - demonstrate_workflows() - except Exception as e: - print(f"Skipping workflow demo due to: {e}") + # try: + # demonstrate_research_team() + # except Exception as e: + # print(f"Skipping research team demo due to: {e}") + + # # Basic functionality + # demonstrate_basic_agents() + + # # Tool integration + # try: + # demonstrate_tool_integration() + # except Exception as e: + # print(f"Skipping tool integration demo due to: {e}") + + # # Workflows + # try: + # demonstrate_workflows() + # except Exception as e: + # print(f"Skipping workflow demo due to: {e}") # Async operations try: diff --git a/examples/agno/agno_research_team.ipynb b/examples/agno/agno_research_team.ipynb new file mode 100644 index 000000000..219b8ef32 --- /dev/null +++ b/examples/agno/agno_research_team.ipynb @@ -0,0 +1,442 @@ +{ + "cells": [ + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "# Collaborative Research Team with Agno\n", + "\n", + "This notebook demonstrates how to build a sophisticated multi-agent research team where specialized agents collaborate to provide comprehensive insights from different perspectives.\n", + "\n", + "## Overview\n", + "\n", + "In this advanced example, you'll learn how to:\n", + "- **Create specialized research agents** with domain-specific tools\n", + "- **Build collaborative teams** that discuss and reach consensus\n", + "- **Integrate multiple research tools** (Google Search, HackerNews, Arxiv, DuckDuckGo)\n", + "- **Enable real-time streaming** of agent discussions\n", + "- **Monitor complex interactions** with AgentOps\n", + "\n", + "## Architecture\n", + "\n", + "Our research team consists of four specialized agents:\n", + "\n", + "1. **Reddit Researcher** - Community insights and user experiences\n", + "2. **HackerNews Researcher** - Technical discussions and industry trends\n", + "3. **Academic Researcher** - Scholarly papers and evidence-based research\n", + "4. **Twitter Researcher** - Real-time trends and public sentiment\n", + "\n", + "Each agent brings unique tools and perspectives to create a comprehensive research output.\n", + "\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Research Tools Overview\n", + "\n", + "Our research team uses specialized tools for different platforms:\n", + "\n", + "- **GoogleSearchTools**: General web search and Reddit discovery\n", + "- **HackerNewsTools**: Direct access to HackerNews API for tech discussions\n", + "- **ArxivTools**: Academic paper search and retrieval\n", + "- **DuckDuckGoTools**: Privacy-focused search for Twitter/X content\n", + "\n", + "Each tool is assigned to agents based on their research domain.\n", + "\n", + "\n", + "## Pre-requisites\n", + "\n", + "Before running this notebook, ensure you have:\n", + "- **AgentOps API key** from [AgentOps](https://agentops.ai)\n", + "- **OpenAI API key** from [OpenAI](https://openai.com)\n", + "- **Optional**: API keys for specific research tools (if required)\n", + "\n", + "Create a `.env` file in your project root:\n", + "```\n", + "AGENTOPS_API_KEY=your_agentops_key_here\n", + "OPENAI_API_KEY=your_openai_key_here\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "id": "ebf75e5f", + "metadata": {}, + "source": [ + "## Implementation\n", + "\n", + "Let's start by importing the necessary libraries and tools for our research team." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "49a2467d", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from textwrap import dedent\n", + "from agno.agent import Agent\n", + "from agno.team import Team\n", + "from agno.tools.googlesearch import GoogleSearchTools\n", + "from agno.tools.hackernews import HackerNewsTools\n", + "from agno.tools.arxiv import ArxivTools\n", + "from agno.tools.duckduckgo import DuckDuckGoTools\n", + "from agno.models.openai import OpenAIChat\n", + "import asyncio\n", + "import agentops\n", + "from dotenv import load_dotenv\n", + "\n", + "# Load environment variables\n", + "load_dotenv()\n", + "\n", + "# Initialize AgentOps for monitoring and analytics\n", + "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", + "\n", + "# Configuration\n", + "MODEL_ID = \"gpt-4o-mini\" # Default model for agents" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Environment Setup\n", + "\n", + "Initialize AgentOps for monitoring and configure the default model:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c640e31", + "metadata": { + "lines_to_next_cell": 1 + }, + "outputs": [], + "source": [ + "def check_environment():\n", + " \"\"\"\n", + " Verify that all required API keys are properly configured.\n", + " \n", + " Returns:\n", + " bool: True if all required environment variables are set\n", + " \"\"\"\n", + " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\"]\n", + " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", + "\n", + " if missing_vars:\n", + " print(f\"Missing required environment variables: {missing_vars}\")\n", + " print(\"Please set these in your .env file or environment\")\n", + " return False\n", + "\n", + " print(\"✓ Environment variables checked successfully\")\n", + " return True" + ] + }, + { + "cell_type": "markdown", + "id": "d9283778", + "metadata": {}, + "source": [ + "## Agent Setup Details\n", + "\n", + "In the following function, we create four specialized research agents, each with unique characteristics:\n", + "\n", + "### 1. Reddit Researcher\n", + "- **Purpose**: Captures community perspectives and real-world experiences\n", + "- **Tool**: GoogleSearchTools (to find Reddit discussions)\n", + "- **Model**: GPT-4 for nuanced understanding of community sentiment\n", + "\n", + "\n", + "### 2. HackerNews Researcher\n", + "- **Purpose**: Provides technical depth and industry insights\n", + "- **Tool**: HackerNewsTools (direct API access)\n", + "- **Model**: GPT-4 for technical comprehension\n", + "\n", + "\n", + "### 3. Academic Paper Researcher\n", + "- **Purpose**: Brings evidence-based research and scholarly perspectives\n", + "- **Tools**: GoogleSearchTools + ArxivTools (comprehensive academic search)\n", + "- **Model**: GPT-4 for understanding complex research\n", + "\n", + "\n", + "### 4. Twitter Researcher\n", + "- **Purpose**: Captures real-time trends and public sentiment\n", + "- **Tool**: DuckDuckGoTools (privacy-focused search)\n", + "- **Model**: GPT-4 for trend analysis\n", + "\n", + "\n", + "### Team Configuration\n", + "The agents are organized into a collaborative team with:\n", + "- **Collaboration Mode**: Enables discussion and consensus building\n", + "- **Discussion Master**: Coordinates the conversation\n", + "- **Context Preservation**: Maintains conversation history\n", + "- **Transparent Process**: Shows tool usage and reasoning" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "561993af", + "metadata": {}, + "outputs": [], + "source": [ + "def demonstrate_research_team():\n", + " \"\"\"\n", + " Demonstrate a collaborative research team with multiple specialized agents.\n", + " \n", + " This function creates a team of researchers, each with:\n", + " - Specific expertise and research focus\n", + " - Specialized tools for their domain\n", + " - Custom instructions for their research approach\n", + " \n", + " The team collaborates to provide comprehensive research insights.\n", + " \"\"\"\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(\"COLLABORATIVE RESEARCH TEAM DEMONSTRATION\")\n", + " print(\"=\" * 60)\n", + "\n", + " try:\n", + " print(\"\\n1. Creating specialized research agents...\")\n", + " \n", + " # Reddit Researcher: Focuses on community discussions and user experiences\n", + " reddit_researcher = Agent(\n", + " name=\"Reddit Researcher\",\n", + " role=\"Research a topic on Reddit\",\n", + " model=OpenAIChat(id=\"gpt-4o\"), # Using more capable model for research\n", + " tools=[GoogleSearchTools()], # Google Search to find Reddit discussions\n", + " add_name_to_instructions=True, # Adds agent name to its instructions\n", + " instructions=dedent(\n", + " \"\"\"\n", + " You are a Reddit researcher specializing in community insights.\n", + " You will be given a topic to research on Reddit.\n", + " Your tasks:\n", + " - Find the most relevant and popular Reddit posts\n", + " - Identify common opinions and experiences from users\n", + " - Highlight both positive and negative perspectives\n", + " - Focus on practical advice and real-world experiences\n", + " \"\"\"\n", + " ),\n", + " )\n", + " print(\" ✓ Reddit Researcher created\")\n", + "\n", + " # HackerNews Researcher: Focuses on technical discussions and industry trends\n", + " hackernews_researcher = Agent(\n", + " name=\"HackerNews Researcher\",\n", + " model=OpenAIChat(\"gpt-4o\"),\n", + " role=\"Research a topic on HackerNews.\",\n", + " tools=[HackerNewsTools()], # Direct access to HackerNews API\n", + " add_name_to_instructions=True,\n", + " instructions=dedent(\n", + " \"\"\"\n", + " You are a HackerNews researcher specializing in technical insights.\n", + " You will be given a topic to research on HackerNews.\n", + " Your tasks:\n", + " - Find the most relevant technical discussions\n", + " - Identify industry trends and expert opinions\n", + " - Focus on technical depth and innovation\n", + " - Highlight startup and technology perspectives\n", + " \"\"\"\n", + " ),\n", + " )\n", + " print(\" ✓ HackerNews Researcher created\")\n", + "\n", + " # Academic Paper Researcher: Focuses on scholarly research and evidence\n", + " academic_paper_researcher = Agent(\n", + " name=\"Academic Paper Researcher\",\n", + " model=OpenAIChat(\"gpt-4o\"),\n", + " role=\"Research academic papers and scholarly content\",\n", + " tools=[GoogleSearchTools(), ArxivTools()], # Multiple tools for comprehensive research\n", + " add_name_to_instructions=True,\n", + " instructions=dedent(\n", + " \"\"\"\n", + " You are an academic paper researcher specializing in scholarly content.\n", + " You will be given a topic to research in academic literature.\n", + " Your tasks:\n", + " - Find relevant scholarly articles, papers, and academic discussions\n", + " - Focus on peer-reviewed content and citations from reputable sources\n", + " - Provide brief summaries of key findings and methodologies\n", + " - Highlight evidence-based conclusions and research gaps\n", + " \"\"\"\n", + " ),\n", + " )\n", + " print(\" ✓ Academic Paper Researcher created\")\n", + "\n", + " # Twitter Researcher: Focuses on real-time trends and public sentiment\n", + " twitter_researcher = Agent(\n", + " name=\"Twitter Researcher\",\n", + " model=OpenAIChat(\"gpt-4o\"),\n", + " role=\"Research trending discussions and real-time updates\",\n", + " tools=[DuckDuckGoTools()], # DuckDuckGo for privacy-focused searching\n", + " add_name_to_instructions=True,\n", + " instructions=dedent(\n", + " \"\"\"\n", + " You are a Twitter/X researcher specializing in real-time insights.\n", + " You will be given a topic to research on Twitter/X.\n", + " Your tasks:\n", + " - Find trending discussions and influential voices\n", + " - Track real-time updates and breaking news\n", + " - Focus on verified accounts and credible sources\n", + " - Identify relevant hashtags and ongoing conversations\n", + " - Capture public sentiment and viral content\n", + " \"\"\"\n", + " ),\n", + " )\n", + " print(\" ✓ Twitter Researcher created\")\n", + "\n", + " # Create collaborative team with advanced features\n", + " print(\"\\n2. Creating collaborative research team...\")\n", + " agent_team = Team(\n", + " name=\"Discussion Team\",\n", + " mode=\"collaborate\", # Agents work together and discuss findings\n", + " model=OpenAIChat(\"gpt-4o\"), # Model for team coordination\n", + " members=[\n", + " reddit_researcher,\n", + " hackernews_researcher,\n", + " academic_paper_researcher,\n", + " twitter_researcher,\n", + " ],\n", + " instructions=[\n", + " \"You are a discussion master coordinating a research team.\",\n", + " \"Facilitate productive discussion between all researchers.\",\n", + " \"Ensure each researcher contributes their unique perspective.\",\n", + " \"Guide the team towards a comprehensive understanding of the topic.\",\n", + " \"You have to stop the discussion when you think the team has reached a consensus.\",\n", + " ],\n", + " success_criteria=\"The team has reached a consensus with insights from all perspectives.\",\n", + " enable_agentic_context=True, # Agents maintain context throughout discussion\n", + " add_context=True, # Include context in agent responses\n", + " show_tool_calls=True, # Display when agents use their tools\n", + " markdown=True, # Format output in markdown\n", + " debug_mode=True, # Show detailed execution information\n", + " show_members_responses=True, # Display individual agent responses\n", + " )\n", + " print(\" ✓ Research team assembled with 4 specialized agents\")\n", + "\n", + " # Execute collaborative research\n", + " print(\"\\n3. Starting collaborative research discussion...\")\n", + " print(\" Topic: 'What is the best way to learn to code?'\")\n", + " print(\"\\n\" + \"-\" * 60)\n", + " \n", + " # Stream the team discussion in real-time\n", + " agent_team.print_response(\n", + " message=\"Start the discussion on the topic: 'What is the best way to learn to code?'\",\n", + " stream=True, # Stream responses as they're generated\n", + " stream_intermediate_steps=True, # Show intermediate thinking steps\n", + " )\n", + "\n", + " except Exception as e:\n", + " print(f\"\\nError during research team demonstration: {e}\")\n", + " print(\"This might be due to API rate limits or configuration issues\")" + ] + }, + { + "cell_type": "markdown", + "id": "aa0c0459", + "metadata": {}, + "source": [ + "## Running the Demo\n", + "\n", + "Let's execute our main function to see the agents and teams in action:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9c11fe36", + "metadata": {}, + "outputs": [], + "source": [ + "async def main():\n", + " \"\"\"\n", + " Main function that orchestrates the research team demonstration.\n", + " \n", + " This async function handles:\n", + " - Environment validation\n", + " - Running the collaborative research team demo\n", + " - Error handling and user feedback\n", + " \"\"\"\n", + " print(\"Welcome to Agno Collaborative Research Team Demo\")\n", + " print(\"This demo shows how multiple specialized agents can work together\")\n", + " print(\"to provide comprehensive research insights from different perspectives.\")\n", + " \n", + " # Validate environment setup\n", + " if not check_environment():\n", + " print(\"Cannot proceed without proper API configuration\")\n", + " return\n", + "\n", + " # Run demonstration\n", + " print(\"\\nStarting research team demonstration...\")\n", + "\n", + " try:\n", + " demonstrate_research_team()\n", + " print(\"\\n\\n✓ Research team demo completed successfully!\")\n", + " print(\"\\nKey Takeaways:\")\n", + " print(\"- Specialized agents bring unique perspectives and tools\")\n", + " print(\"- Collaborative mode enables rich discussions between agents\")\n", + " print(\"- Each agent uses appropriate tools for their research domain\")\n", + " print(\"- Teams can reach consensus through structured discussion\")\n", + " print(\"- AgentOps tracks all interactions for analysis\")\n", + " \n", + " except Exception as e:\n", + " print(f\"Demo failed: {e}\")\n", + " print(\"Please check your API keys and network connection\")" + ] + }, + { + "cell_type": "markdown", + "id": "db07a0e4", + "metadata": {}, + "source": [ + "## Execute the Demo\n", + "\n", + "Run the cell below to see how agents collaborate within a team:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4afc6da0", + "metadata": {}, + "outputs": [], + "source": [ + "if __name__ == \"__main__\":\n", + " \"\"\"\n", + " Entry point for the script.\n", + " \n", + " Uses asyncio to run the main function, maintaining consistency\n", + " with other examples and preparing for async operations.\n", + " \"\"\"\n", + " asyncio.run(main())" + ] + } + ], + "metadata": { + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/agno/agno_research_team.py b/examples/agno/agno_research_team.py new file mode 100644 index 000000000..2eaa93408 --- /dev/null +++ b/examples/agno/agno_research_team.py @@ -0,0 +1,249 @@ +""" +Collaborative Research Team with Agno + +This example demonstrates how to create a sophisticated research team with multiple specialized agents, +each equipped with different tools and expertise. The team collaborates to research topics from +multiple perspectives, providing comprehensive insights. + +Key features demonstrated: +- Creating specialized agents with specific research tools +- Building collaborative teams that discuss and reach consensus +- Using various research tools (Google Search, HackerNews, Arxiv, DuckDuckGo) +- Enabling real-time streaming of agent discussions +- Tracking agent interactions with AgentOps +""" + +import os +from textwrap import dedent +from agno.agent import Agent +from agno.team import Team +from agno.tools.googlesearch import GoogleSearchTools +from agno.tools.hackernews import HackerNewsTools +from agno.tools.arxiv import ArxivTools +from agno.tools.duckduckgo import DuckDuckGoTools +from agno.models.openai import OpenAIChat +import asyncio +import agentops +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + +# Initialize AgentOps for monitoring and analytics +agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) + +# Configuration +MODEL_ID = "gpt-4o-mini" # Default model for agents + +def check_environment(): + """ + Verify that all required API keys are properly configured. + + Returns: + bool: True if all required environment variables are set + """ + required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] + missing_vars = [var for var in required_vars if not os.getenv(var)] + + if missing_vars: + print(f"Missing required environment variables: {missing_vars}") + print("Please set these in your .env file or environment") + return False + + print("✓ Environment variables checked successfully") + return True + +def demonstrate_research_team(): + """ + Demonstrate a collaborative research team with multiple specialized agents. + + This function creates a team of researchers, each with: + - Specific expertise and research focus + - Specialized tools for their domain + - Custom instructions for their research approach + + The team collaborates to provide comprehensive research insights. + """ + print("\n" + "=" * 60) + print("COLLABORATIVE RESEARCH TEAM DEMONSTRATION") + print("=" * 60) + + try: + print("\n1. Creating specialized research agents...") + + # Reddit Researcher: Focuses on community discussions and user experiences + reddit_researcher = Agent( + name="Reddit Researcher", + role="Research a topic on Reddit", + model=OpenAIChat(id="gpt-4o"), # Using more capable model for research + tools=[GoogleSearchTools()], # Google Search to find Reddit discussions + add_name_to_instructions=True, # Adds agent name to its instructions + instructions=dedent( + """ + You are a Reddit researcher specializing in community insights. + You will be given a topic to research on Reddit. + Your tasks: + - Find the most relevant and popular Reddit posts + - Identify common opinions and experiences from users + - Highlight both positive and negative perspectives + - Focus on practical advice and real-world experiences + """ + ), + ) + print(" ✓ Reddit Researcher created") + + # HackerNews Researcher: Focuses on technical discussions and industry trends + hackernews_researcher = Agent( + name="HackerNews Researcher", + model=OpenAIChat("gpt-4o"), + role="Research a topic on HackerNews.", + tools=[HackerNewsTools()], # Direct access to HackerNews API + add_name_to_instructions=True, + instructions=dedent( + """ + You are a HackerNews researcher specializing in technical insights. + You will be given a topic to research on HackerNews. + Your tasks: + - Find the most relevant technical discussions + - Identify industry trends and expert opinions + - Focus on technical depth and innovation + - Highlight startup and technology perspectives + """ + ), + ) + print(" ✓ HackerNews Researcher created") + + # Academic Paper Researcher: Focuses on scholarly research and evidence + academic_paper_researcher = Agent( + name="Academic Paper Researcher", + model=OpenAIChat("gpt-4o"), + role="Research academic papers and scholarly content", + tools=[GoogleSearchTools(), ArxivTools()], # Multiple tools for comprehensive research + add_name_to_instructions=True, + instructions=dedent( + """ + You are an academic paper researcher specializing in scholarly content. + You will be given a topic to research in academic literature. + Your tasks: + - Find relevant scholarly articles, papers, and academic discussions + - Focus on peer-reviewed content and citations from reputable sources + - Provide brief summaries of key findings and methodologies + - Highlight evidence-based conclusions and research gaps + """ + ), + ) + print(" ✓ Academic Paper Researcher created") + + # Twitter Researcher: Focuses on real-time trends and public sentiment + twitter_researcher = Agent( + name="Twitter Researcher", + model=OpenAIChat("gpt-4o"), + role="Research trending discussions and real-time updates", + tools=[DuckDuckGoTools()], # DuckDuckGo for privacy-focused searching + add_name_to_instructions=True, + instructions=dedent( + """ + You are a Twitter/X researcher specializing in real-time insights. + You will be given a topic to research on Twitter/X. + Your tasks: + - Find trending discussions and influential voices + - Track real-time updates and breaking news + - Focus on verified accounts and credible sources + - Identify relevant hashtags and ongoing conversations + - Capture public sentiment and viral content + """ + ), + ) + print(" ✓ Twitter Researcher created") + + # Create collaborative team with advanced features + print("\n2. Creating collaborative research team...") + agent_team = Team( + name="Discussion Team", + mode="collaborate", # Agents work together and discuss findings + model=OpenAIChat("gpt-4o"), # Model for team coordination + members=[ + reddit_researcher, + hackernews_researcher, + academic_paper_researcher, + twitter_researcher, + ], + instructions=[ + "You are a discussion master coordinating a research team.", + "Facilitate productive discussion between all researchers.", + "Ensure each researcher contributes their unique perspective.", + "Guide the team towards a comprehensive understanding of the topic.", + "You have to stop the discussion when you think the team has reached a consensus.", + ], + success_criteria="The team has reached a consensus with insights from all perspectives.", + enable_agentic_context=True, # Agents maintain context throughout discussion + add_context=True, # Include context in agent responses + show_tool_calls=True, # Display when agents use their tools + markdown=True, # Format output in markdown + debug_mode=True, # Show detailed execution information + show_members_responses=True, # Display individual agent responses + ) + print(" ✓ Research team assembled with 4 specialized agents") + + # Execute collaborative research + print("\n3. Starting collaborative research discussion...") + print(" Topic: 'What is the best way to learn to code?'") + print("\n" + "-" * 60) + + # Stream the team discussion in real-time + agent_team.print_response( + message="Start the discussion on the topic: 'What is the best way to learn to code?'", + stream=True, # Stream responses as they're generated + stream_intermediate_steps=True, # Show intermediate thinking steps + ) + + except Exception as e: + print(f"\nError during research team demonstration: {e}") + print("This might be due to API rate limits or configuration issues") + + +async def main(): + """ + Main function that orchestrates the research team demonstration. + + This async function handles: + - Environment validation + - Running the collaborative research team demo + - Error handling and user feedback + """ + print("Welcome to Agno Collaborative Research Team Demo") + print("This demo shows how multiple specialized agents can work together") + print("to provide comprehensive research insights from different perspectives.") + print() + + # Validate environment setup + if not check_environment(): + print("Cannot proceed without proper API configuration") + return + + # Run demonstration + print("\nStarting research team demonstration...") + + try: + demonstrate_research_team() + print("\n\n✓ Research team demo completed successfully!") + print("\nKey Takeaways:") + print("- Specialized agents bring unique perspectives and tools") + print("- Collaborative mode enables rich discussions between agents") + print("- Each agent uses appropriate tools for their research domain") + print("- Teams can reach consensus through structured discussion") + print("- AgentOps tracks all interactions for analysis") + + except Exception as e: + print(f"Demo failed: {e}") + print("Please check your API keys and network connection") + + +if __name__ == "__main__": + """ + Entry point for the script. + + Uses asyncio to run the main function, maintaining consistency + with other examples and preparing for async operations. + """ + asyncio.run(main()) diff --git a/examples/agno/agno_tool_integrations.ipynb b/examples/agno/agno_tool_integrations.ipynb new file mode 100644 index 000000000..8e7e38be5 --- /dev/null +++ b/examples/agno/agno_tool_integrations.ipynb @@ -0,0 +1,443 @@ +{ + "cells": [ + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "# Tool Integration with RAG (Retrieval-Augmented Generation)\n", + "\n", + "This notebook demonstrates how to build intelligent agents that can access and reason over external knowledge bases using RAG technology.\n", + "\n", + "## What is RAG?\n", + "\n", + "**Retrieval-Augmented Generation (RAG)** combines the power of large language models with the ability to retrieve information from external sources. Instead of relying solely on training data, RAG-enabled agents can:\n", + "\n", + "- Access up-to-date information from documents, websites, and databases\n", + "- Provide accurate, source-backed responses\n", + "- Reduce hallucinations by grounding answers in retrieved content\n", + "- Scale to large knowledge bases without retraining\n", + "\n", + "## Key Components\n", + "\n", + "### 1. Knowledge Base\n", + "- Loads content from URLs, documents, or databases\n", + "- Processes and indexes information for efficient retrieval\n", + "\n", + "### 2. Vector Database\n", + "- Stores document embeddings (numerical representations)\n", + "- Enables semantic search based on meaning, not just keywords\n", + "- Supports hybrid search combining semantic and keyword matching\n", + "\n", + "### 3. Embeddings & Reranking\n", + "- **Embeddings**: Convert text to vectors for similarity comparison\n", + "- **Reranking**: Improves search results by re-scoring them based on relevance\n", + "\n", + "### 4. Reasoning Tools\n", + "- Enable step-by-step problem solving\n", + "- Show transparent reasoning process\n", + "- Combine retrieved information with logical thinking\n", + "\n", + "## Pre-requisites\n", + "\n", + "This demo requires three API keys:\n", + "- **AgentOps API key** from [AgentOps](https://agentops.ai)\n", + "- **OpenAI API key** from [OpenAI](https://openai.com)\n", + "- **Cohere API key** from [Cohere](https://cohere.com) (for embeddings)\n", + "\n", + "Create a `.env` file:\n", + "```\n", + "AGENTOPS_API_KEY=your_agentops_key\n", + "OPENAI_API_KEY=your_openai_key\n", + "COHERE_API_KEY=your_cohere_key\n", + "```\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Implementation Overview\n", + "\n", + "Let's build a RAG-enabled agent that can access documentation and provide informed responses with source citations.\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "### Import Core Libraries\n", + "\n", + "First, we'll import the basic libraries needed for our agent:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c49901be", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from agno.agent import Agent\n", + "from agno.team import Team\n", + "from agno.models.openai import OpenAIChat\n", + "import asyncio\n", + "import agentops\n", + "from dotenv import load_dotenv" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "### Import RAG Components\n", + "\n", + "Now let's import the specialized components for RAG functionality:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a75c84fc", + "metadata": {}, + "outputs": [], + "source": [ + "# Knowledge and RAG components\n", + "from agno.knowledge.url import UrlKnowledge # For loading knowledge from URLs\n", + "from agno.vectordb.lancedb import LanceDb # Vector database for storing embeddings\n", + "from agno.vectordb.search import SearchType # Search strategies (hybrid, semantic, etc.)\n", + "from agno.embedder.cohere import CohereEmbedder # For creating text embeddings\n", + "from agno.reranker.cohere import CohereReranker # For improving search results\n", + "from agno.tools.reasoning import ReasoningTools # Advanced reasoning capabilities" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "### Environment Setup\n", + "\n", + "Load environment variables and initialize monitoring:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ee6b59d8", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables\n", + "load_dotenv()\n", + "\n", + "# Initialize AgentOps for monitoring\n", + "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", + "\n", + "# API keys and configuration\n", + "cohere_api_key = os.getenv(\"COHERE_API_KEY\") # Required for embeddings and reranking\n", + "MODEL_ID = \"gpt-4o-mini\" # Default model for agents" + ] + }, + { + "cell_type": "raw", + "id": "5bfac31e", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Environment Validation\n", + "\n", + "Before proceeding, let's ensure all required API keys are configured:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f6923641", + "metadata": {}, + "outputs": [], + "source": [ + "def check_environment():\n", + " \"\"\"\n", + " Verify that all required API keys are properly configured.\n", + " \n", + " This demo requires:\n", + " - AGENTOPS_API_KEY: For monitoring agent behavior\n", + " - OPENAI_API_KEY: For the AI model\n", + " - COHERE_API_KEY: For embeddings and reranking\n", + " \n", + " Returns:\n", + " bool: True if all required environment variables are set\n", + " \"\"\"\n", + " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\", \"COHERE_API_KEY\"]\n", + " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", + "\n", + " if missing_vars:\n", + " print(f\"Missing required environment variables: {missing_vars}\")\n", + " print(\"Please set these in your .env file or environment\")\n", + " print(\"\\nExample .env file:\")\n", + " print(\"AGENTOPS_API_KEY=your_agentops_key\")\n", + " print(\"OPENAI_API_KEY=your_openai_key\")\n", + " print(\"COHERE_API_KEY=your_cohere_key\")\n", + " return False\n", + "\n", + " print(\"✓ Environment variables checked successfully\")\n", + " return True" + ] + }, + { + "cell_type": "raw", + "id": "2740686f", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Building a RAG-Enabled Agent\n", + "\n", + "The following function demonstrates the complete process of creating a RAG-enabled agent:\n", + "\n", + "### Step 1: Knowledge Base Creation\n", + "We'll load documentation from a URL and prepare it for semantic search.\n", + "\n", + "### Step 2: Vector Database Setup\n", + "Configure LanceDB with:\n", + "- **Hybrid search**: Combines keyword and semantic search\n", + "- **Cohere embeddings**: High-quality text representations\n", + "- **Reranking**: Improves result relevance\n", + "\n", + "### Step 3: Agent Configuration\n", + "Create an agent with:\n", + "- Access to the knowledge base\n", + "- On-demand search capabilities\n", + "- Reasoning tools for complex queries\n", + "- Instructions for proper citation\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "01bc16c0", + "metadata": { + "lines_to_next_cell": 1 + }, + "outputs": [], + "source": [ + "def demonstrate_tool_integration():\n", + " \"\"\"\n", + " Demonstrate advanced tool integration with RAG and knowledge bases.\n", + " \n", + " This function shows how to:\n", + " 1. Create a knowledge base from external sources\n", + " 2. Set up a vector database with embeddings\n", + " 3. Configure an agent with RAG capabilities\n", + " 4. Enable reasoning tools for complex problem-solving\n", + " \"\"\"\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(\"TOOL INTEGRATION WITH RAG (Retrieval-Augmented Generation)\")\n", + " print(\"=\" * 60)\n", + "\n", + " try:\n", + " print(\"\\n1. Setting up knowledge base and vector database...\")\n", + " \n", + " # Create knowledge base from documentation URLs\n", + " # This loads content from the specified URLs and prepares it for RAG\n", + " knowledge_base = UrlKnowledge(\n", + " urls=[\"https://docs.agno.com/introduction/agents.md\"], # Documentation to learn from\n", + " # Configure vector database for efficient semantic search\n", + " vector_db=LanceDb(\n", + " uri=\"tmp/lancedb\", # Local storage path for the database\n", + " table_name=\"agno_docs\", # Table to store document embeddings\n", + " search_type=SearchType.hybrid, # Combines keyword and semantic search\n", + " # Embedder converts text to numerical vectors for similarity search\n", + " embedder=CohereEmbedder(\n", + " id=\"embed-v4.0\", # Cohere's embedding model\n", + " api_key=cohere_api_key\n", + " ),\n", + " # Reranker improves search results by re-scoring them\n", + " reranker=CohereReranker(\n", + " model=\"rerank-v3.5\", # Cohere's reranking model\n", + " api_key=cohere_api_key\n", + " ),\n", + " ),\n", + " )\n", + " print(\" ✓ Knowledge base created from documentation\")\n", + " print(\" ✓ Vector database configured with hybrid search\")\n", + "\n", + " # Create an intelligent agent with RAG capabilities\n", + " print(\"\\n2. Creating RAG-enabled agent...\")\n", + " agent = Agent(\n", + " model=OpenAIChat(id=MODEL_ID),\n", + " # Agentic RAG is automatically enabled when knowledge is provided\n", + " knowledge=knowledge_base,\n", + " # Allow the agent to search its knowledge base on demand\n", + " search_knowledge=True,\n", + " # Add reasoning tools for step-by-step problem solving\n", + " tools=[ReasoningTools(add_instructions=True)],\n", + " # Custom instructions for how the agent should behave\n", + " instructions=[\n", + " \"Include sources in your response.\", # Cite where information comes from\n", + " \"Always search your knowledge before answering the question.\", # Use RAG first\n", + " \"Only include the output in your response. No other text.\", # Clean responses\n", + " ],\n", + " markdown=True, # Format responses in markdown\n", + " )\n", + " print(\" ✓ Agent created with:\")\n", + " print(\" - Knowledge base access\")\n", + " print(\" - On-demand search capability\")\n", + " print(\" - Reasoning tools\")\n", + " print(\" - Source citation requirements\")\n", + "\n", + " # Test the RAG agent with a question about its knowledge base\n", + " print(\"\\n3. Testing RAG agent with knowledge query...\")\n", + " print(\" Question: 'What are Agents?'\")\n", + " print(\"\\n\" + \"-\" * 60)\n", + " \n", + " # Print response with full reasoning process visible\n", + " agent.print_response(\n", + " \"What are Agents?\",\n", + " show_full_reasoning=True, # Shows how the agent searches and reasons\n", + " )\n", + " \n", + " print(\"\\n\" + \"-\" * 60)\n", + " print(\"✓ RAG demonstration completed\")\n", + " print(\"\\nNotice how the agent:\")\n", + " print(\"- Searched the knowledge base for relevant information\")\n", + " print(\"- Used reasoning tools to formulate the answer\")\n", + " print(\"- Included sources from the documentation\")\n", + "\n", + " except Exception as e:\n", + " print(f\"\\nError during tool integration: {e}\")\n", + " print(\"This might be due to:\")\n", + " print(\"- Missing API keys (especially COHERE_API_KEY)\")\n", + " print(\"- Network issues accessing documentation URLs\")\n", + " print(\"- Vector database initialization problems\")" + ] + }, + { + "cell_type": "raw", + "id": "8495b685", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Main Orchestration Function\n", + "\n", + "The main function coordinates the entire demonstration:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "37692f77", + "metadata": {}, + "outputs": [], + "source": [ + "async def main():\n", + " \"\"\"\n", + " Main function that orchestrates the tool integration demonstration.\n", + " \n", + " This async function handles:\n", + " - Environment validation\n", + " - Running the RAG and tool integration demo\n", + " - Error handling and user feedback\n", + " \"\"\"\n", + " print(\"Welcome to Agno Tool Integration Demo\")\n", + " print(\"This demo showcases RAG (Retrieval-Augmented Generation)\")\n", + " print(\"and advanced tool integration capabilities.\")\n", + " print()\n", + " \n", + " # Validate environment setup\n", + " if not check_environment():\n", + " print(\"\\nCannot proceed without proper API configuration\")\n", + " print(\"Please obtain a Cohere API key from: https://cohere.com\")\n", + " return\n", + "\n", + " # Run demonstration\n", + " print(\"\\nStarting tool integration demonstration...\")\n", + "\n", + " try:\n", + " demonstrate_tool_integration()\n", + " print(\"\\n\\n✓ Tool integration demo completed successfully!\")\n", + " print(\"\\nKey Takeaways:\")\n", + " print(\"- RAG enables agents to access external knowledge bases\")\n", + " print(\"- Vector databases provide efficient semantic search\")\n", + " print(\"- Embeddings and reranking improve information retrieval\")\n", + " print(\"- Reasoning tools enhance problem-solving capabilities\")\n", + " print(\"- AgentOps tracks all tool usage and knowledge searches\")\n", + " \n", + " except Exception as e:\n", + " print(f\"Demo failed: {e}\")\n", + " print(\"Please check your API keys and network connection\")" + ] + }, + { + "cell_type": "raw", + "id": "87b383cf", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Execute the Demo\n", + "\n", + "Run the following cell to see RAG in action:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "095a7df6", + "metadata": {}, + "outputs": [], + "source": [ + "if __name__ == \"__main__\":\n", + " \"\"\"\n", + " Entry point for the script.\n", + " \n", + " Uses asyncio to run the main function, maintaining consistency\n", + " with other examples and preparing for async operations.\n", + " \"\"\"\n", + " asyncio.run(main())" + ] + } + ], + "metadata": { + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/agno/agno_tool_integrations.py b/examples/agno/agno_tool_integrations.py new file mode 100644 index 000000000..5392f6eb0 --- /dev/null +++ b/examples/agno/agno_tool_integrations.py @@ -0,0 +1,203 @@ +""" +Tool Integration with RAG (Retrieval-Augmented Generation) in Agno + +This example demonstrates how to create intelligent agents with: +- Knowledge bases from external sources (URLs, documents) +- Vector databases for efficient semantic search +- Embeddings and reranking for accurate information retrieval +- Reasoning tools for enhanced problem-solving capabilities + +RAG enables agents to access and reason over large knowledge bases, +providing accurate, source-backed responses instead of relying solely on training data. +""" + +import os +from agno.agent import Agent +from agno.team import Team +from agno.models.openai import OpenAIChat +import asyncio +import agentops +from dotenv import load_dotenv + +# Knowledge and RAG components +from agno.knowledge.url import UrlKnowledge # For loading knowledge from URLs +from agno.vectordb.lancedb import LanceDb # Vector database for storing embeddings +from agno.vectordb.search import SearchType # Search strategies (hybrid, semantic, etc.) +from agno.embedder.cohere import CohereEmbedder # For creating text embeddings +from agno.reranker.cohere import CohereReranker # For improving search results +from agno.tools.reasoning import ReasoningTools # Advanced reasoning capabilities + +# Load environment variables +load_dotenv() + +# Initialize AgentOps for monitoring +agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) + +# API keys and configuration +cohere_api_key = os.getenv("COHERE_API_KEY") # Required for embeddings and reranking +MODEL_ID = "gpt-4o-mini" # Default model for agents + +def check_environment(): + """ + Verify that all required API keys are properly configured. + + This demo requires: + - AGENTOPS_API_KEY: For monitoring agent behavior + - OPENAI_API_KEY: For the AI model + - COHERE_API_KEY: For embeddings and reranking + + Returns: + bool: True if all required environment variables are set + """ + required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY", "COHERE_API_KEY"] + missing_vars = [var for var in required_vars if not os.getenv(var)] + + if missing_vars: + print(f"Missing required environment variables: {missing_vars}") + print("Please set these in your .env file or environment") + print("\nExample .env file:") + print("AGENTOPS_API_KEY=your_agentops_key") + print("OPENAI_API_KEY=your_openai_key") + print("COHERE_API_KEY=your_cohere_key") + return False + + print("✓ Environment variables checked successfully") + return True + + +def demonstrate_tool_integration(): + """ + Demonstrate advanced tool integration with RAG and knowledge bases. + + This function shows how to: + 1. Create a knowledge base from external sources + 2. Set up a vector database with embeddings + 3. Configure an agent with RAG capabilities + 4. Enable reasoning tools for complex problem-solving + """ + print("\n" + "=" * 60) + print("TOOL INTEGRATION WITH RAG (Retrieval-Augmented Generation)") + print("=" * 60) + + try: + print("\n1. Setting up knowledge base and vector database...") + + # Create knowledge base from documentation URLs + # This loads content from the specified URLs and prepares it for RAG + knowledge_base = UrlKnowledge( + urls=["https://docs.agno.com/introduction/agents.md"], # Documentation to learn from + # Configure vector database for efficient semantic search + vector_db=LanceDb( + uri="tmp/lancedb", # Local storage path for the database + table_name="agno_docs", # Table to store document embeddings + search_type=SearchType.hybrid, # Combines keyword and semantic search + # Embedder converts text to numerical vectors for similarity search + embedder=CohereEmbedder( + id="embed-v4.0", # Cohere's embedding model + api_key=cohere_api_key + ), + # Reranker improves search results by re-scoring them + reranker=CohereReranker( + model="rerank-v3.5", # Cohere's reranking model + api_key=cohere_api_key + ), + ), + ) + print(" ✓ Knowledge base created from documentation") + print(" ✓ Vector database configured with hybrid search") + + # Create an intelligent agent with RAG capabilities + print("\n2. Creating RAG-enabled agent...") + agent = Agent( + model=OpenAIChat(id=MODEL_ID), + # Agentic RAG is automatically enabled when knowledge is provided + knowledge=knowledge_base, + # Allow the agent to search its knowledge base on demand + search_knowledge=True, + # Add reasoning tools for step-by-step problem solving + tools=[ReasoningTools(add_instructions=True)], + # Custom instructions for how the agent should behave + instructions=[ + "Include sources in your response.", # Cite where information comes from + "Always search your knowledge before answering the question.", # Use RAG first + "Only include the output in your response. No other text.", # Clean responses + ], + markdown=True, # Format responses in markdown + ) + print(" ✓ Agent created with:") + print(" - Knowledge base access") + print(" - On-demand search capability") + print(" - Reasoning tools") + print(" - Source citation requirements") + + # Test the RAG agent with a question about its knowledge base + print("\n3. Testing RAG agent with knowledge query...") + print(" Question: 'What are Agents?'") + print("\n" + "-" * 60) + + # Print response with full reasoning process visible + agent.print_response( + "What are Agents?", + show_full_reasoning=True, # Shows how the agent searches and reasons + ) + + print("\n" + "-" * 60) + print("✓ RAG demonstration completed") + print("\nNotice how the agent:") + print("- Searched the knowledge base for relevant information") + print("- Used reasoning tools to formulate the answer") + print("- Included sources from the documentation") + + except Exception as e: + print(f"\nError during tool integration: {e}") + print("This might be due to:") + print("- Missing API keys (especially COHERE_API_KEY)") + print("- Network issues accessing documentation URLs") + print("- Vector database initialization problems") + +async def main(): + """ + Main function that orchestrates the tool integration demonstration. + + This async function handles: + - Environment validation + - Running the RAG and tool integration demo + - Error handling and user feedback + """ + print("Welcome to Agno Tool Integration Demo") + print("This demo showcases RAG (Retrieval-Augmented Generation)") + print("and advanced tool integration capabilities.") + print() + + # Validate environment setup + if not check_environment(): + print("\nCannot proceed without proper API configuration") + print("Please obtain a Cohere API key from: https://cohere.com") + return + + # Run demonstration + print("\nStarting tool integration demonstration...") + + try: + demonstrate_tool_integration() + print("\n\n✓ Tool integration demo completed successfully!") + print("\nKey Takeaways:") + print("- RAG enables agents to access external knowledge bases") + print("- Vector databases provide efficient semantic search") + print("- Embeddings and reranking improve information retrieval") + print("- Reasoning tools enhance problem-solving capabilities") + print("- AgentOps tracks all tool usage and knowledge searches") + + except Exception as e: + print(f"Demo failed: {e}") + print("Please check your API keys and network connection") + + +if __name__ == "__main__": + """ + Entry point for the script. + + Uses asyncio to run the main function, maintaining consistency + with other examples and preparing for async operations. + """ + asyncio.run(main()) diff --git a/examples/agno/agno_workflow_setup.ipynb b/examples/agno/agno_workflow_setup.ipynb new file mode 100644 index 000000000..8e8035b5f --- /dev/null +++ b/examples/agno/agno_workflow_setup.ipynb @@ -0,0 +1,446 @@ +{ + "cells": [ + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "# Workflow Setup with Intelligent Caching\n", + "\n", + "This notebook demonstrates how to build custom workflows in Agno that can orchestrate complex agent interactions with performance optimizations.\n", + "\n", + "## What are Workflows?\n", + "\n", + "Workflows are powerful abstractions that allow you to:\n", + "- **Orchestrate** multiple agents and teams in complex patterns\n", + "- **Maintain state** across multiple invocations\n", + "- **Implement custom logic** like caching, routing, or validation\n", + "- **Optimize performance** through intelligent design patterns\n", + "\n", + "## Why Use Caching?\n", + "\n", + "Caching is particularly valuable for AI agents because:\n", + "- **Cost Reduction**: Avoid redundant API calls for identical queries\n", + "- **Performance**: Instant responses for repeated questions\n", + "- **Development**: Faster iteration during testing\n", + "- **User Experience**: Immediate responses for common queries\n", + "\n", + "## Use Cases\n", + "\n", + "This caching workflow pattern is ideal for:\n", + "- **FAQ Systems**: Where users ask similar questions repeatedly\n", + "- **Development/Testing**: To avoid API costs during iteration\n", + "- **Customer Support**: For common inquiries with standard responses\n", + "- **Documentation Assistants**: Where queries about specific topics repeat\n", + "\n", + "## Pre-requisites\n", + "\n", + "Ensure you have:\n", + "- **AgentOps API key** from [AgentOps](https://agentops.ai)\n", + "- **OpenAI API key** from [OpenAI](https://openai.com)\n", + "\n", + "Create a `.env` file:\n", + "```\n", + "AGENTOPS_API_KEY=your_agentops_key\n", + "OPENAI_API_KEY=your_openai_key\n", + "```\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Implementation Overview\n", + "\n", + "Let's build a caching workflow that demonstrates how to optimize agent performance through intelligent response caching.\n" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "### Import Required Libraries\n", + "\n", + "First, we'll import the necessary components for building workflows:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "70c1fba6", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from agno.agent import Agent, RunResponse \n", + "from agno.team import Team\n", + "import asyncio\n", + "import agentops\n", + "from dotenv import load_dotenv\n", + "from agno.workflow import Workflow\n", + "from agno.utils.pprint import pprint_run_response\n", + "from agno.models.openai import OpenAIChat\n", + "from agno.utils.log import logger\n", + "from typing import Iterator" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "### Environment Setup\n", + "\n", + "Load environment variables and initialize AgentOps monitoring:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a50a15ac", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables\n", + "load_dotenv()\n", + "\n", + "# Initialize AgentOps for monitoring workflow execution\n", + "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", + "\n", + "# Configuration\n", + "MODEL_ID = \"gpt-4o-mini\" # Default model for agents" + ] + }, + { + "cell_type": "raw", + "id": "bcd36544", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Environment Validation\n", + "\n", + "Before proceeding, let's ensure all required API keys are configured:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3e0f2612", + "metadata": { + "lines_to_next_cell": 1 + }, + "outputs": [], + "source": [ + "def check_environment():\n", + " \"\"\"\n", + " Verify that all required API keys are properly configured.\n", + " \n", + " Returns:\n", + " bool: True if all required environment variables are set\n", + " \"\"\"\n", + " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\"]\n", + " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", + "\n", + " if missing_vars:\n", + " print(f\"Missing required environment variables: {missing_vars}\")\n", + " print(\"Please set these in your .env file or environment\")\n", + " return False\n", + "\n", + " print(\"✓ Environment variables checked successfully\")\n", + " return True" + ] + }, + { + "cell_type": "raw", + "id": "68b12e89", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Building the CacheWorkflow Class\n", + "\n", + "Now let's create our custom workflow class that implements intelligent caching:\n", + "\n", + "### Key Components:\n", + "\n", + "1. **Workflow Base Class**: Inherit from `Workflow` to get state management capabilities\n", + "2. **Session State**: Persistent storage that survives across workflow runs\n", + "3. **Agent Integration**: Embed agents as workflow attributes\n", + "4. **Custom Logic**: Implement caching in the `run()` method\n", + "\n", + "### How It Works:\n", + "\n", + "1. **Check Cache**: Look for existing responses in session state\n", + "2. **Cache Hit**: Return immediately without API call\n", + "3. **Cache Miss**: Generate new response and cache it\n", + "4. **Stream Support**: Maintain real-time response streaming\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4f388879", + "metadata": {}, + "outputs": [], + "source": [ + "class CacheWorkflow(Workflow):\n", + " \"\"\"\n", + " A workflow that demonstrates intelligent caching capabilities.\n", + " \n", + " This workflow:\n", + " - Caches agent responses to avoid redundant API calls\n", + " - Maintains session state across multiple invocations\n", + " - Provides instant responses for repeated queries\n", + " - Reduces costs and improves performance\n", + " \n", + " Use cases:\n", + " - FAQ systems where questions repeat frequently\n", + " - Development/testing to avoid repeated API calls\n", + " - Systems with predictable query patterns\n", + " \"\"\"\n", + "\n", + " # Workflow metadata (descriptive, not functional)\n", + " description: str = \"A workflow that caches previous outputs for efficiency\"\n", + "\n", + " # Initialize agents as workflow attributes\n", + " # This agent will be used to generate responses when cache misses occur\n", + " agent = Agent(\n", + " model=OpenAIChat(id=MODEL_ID),\n", + " description=\"General purpose agent for generating responses\"\n", + " )\n", + "\n", + " def run(self, message: str) -> Iterator[RunResponse]:\n", + " \"\"\"\n", + " Execute the workflow with caching logic.\n", + " \n", + " This method:\n", + " 1. Checks if the response is already cached\n", + " 2. Returns cached response immediately if found\n", + " 3. Generates new response if not cached\n", + " 4. Caches the new response for future use\n", + " \n", + " Args:\n", + " message: The input query to process\n", + " \n", + " Yields:\n", + " RunResponse: Streamed response chunks\n", + " \"\"\"\n", + " logger.info(f\"Checking cache for '{message}'\")\n", + " \n", + " # Check if we've already processed this exact message\n", + " # session_state persists across workflow runs\n", + " if self.session_state.get(message):\n", + " logger.info(f\"Cache hit for '{message}'\")\n", + " # Return cached response immediately (no API call needed)\n", + " yield RunResponse(\n", + " run_id=self.run_id, \n", + " content=self.session_state.get(message)\n", + " )\n", + " return\n", + "\n", + " # Cache miss - need to generate new response\n", + " logger.info(f\"Cache miss for '{message}'\")\n", + " \n", + " # Run the agent and stream the response\n", + " # Using stream=True for real-time output\n", + " yield from self.agent.run(message, stream=True)\n", + "\n", + " # After streaming completes, cache the full response\n", + " # This makes future requests for the same message instant\n", + " self.session_state[message] = self.agent.run_response.content\n", + " logger.info(f\"Cached response for future use\")" + ] + }, + { + "cell_type": "raw", + "id": "fa918b08", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Demonstration Function\n", + "\n", + "The following function showcases the dramatic performance improvement from caching:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d1a925ad", + "metadata": {}, + "outputs": [], + "source": [ + "def demonstrate_workflows():\n", + " \"\"\"\n", + " Demonstrate workflow capabilities with caching.\n", + " \n", + " This function shows:\n", + " - How to create and use custom workflows\n", + " - The performance benefits of caching\n", + " - Session state persistence\n", + " - Response streaming\n", + " \"\"\"\n", + " print(\"\\n\" + \"=\" * 60)\n", + " print(\"WORKFLOWS WITH INTELLIGENT CACHING\")\n", + " print(\"=\" * 60)\n", + "\n", + " try:\n", + " # Create an instance of our caching workflow\n", + " print(\"\\n1. Creating CacheWorkflow instance...\")\n", + " workflow = CacheWorkflow()\n", + " print(\" ✓ Workflow initialized with caching capabilities\")\n", + "\n", + " # First run - this will be a cache miss\n", + " print(\"\\n2. First run (expecting cache miss):\")\n", + " print(\" This will make an API call and take ~1-2 seconds\")\n", + " \n", + " # Run workflow with a test message\n", + " response: Iterator[RunResponse] = workflow.run(message=\"Tell me a joke.\")\n", + " \n", + " # Pretty print the response with timing information\n", + " pprint_run_response(response, markdown=True, show_time=True)\n", + "\n", + " # Second run - this should be a cache hit\n", + " print(\"\\n3. Second run (expecting cache hit):\")\n", + " print(\" This should return instantly from cache\")\n", + " \n", + " # Run workflow with the same message\n", + " response: Iterator[RunResponse] = workflow.run(message=\"Tell me a joke.\")\n", + " \n", + " # Pretty print the response - notice the instant response time\n", + " pprint_run_response(response, markdown=True, show_time=True)\n", + " \n", + " print(\"\\n✓ Workflow demonstration completed\")\n", + " print(\"\\nNotice the performance difference:\")\n", + " print(\"- First run: Makes API call, takes time\")\n", + " print(\"- Second run: Returns from cache instantly\")\n", + " print(\"- Same content, but much faster delivery\")\n", + "\n", + " except Exception as e:\n", + " print(f\"\\nError during workflow demonstration: {e}\")\n", + " print(\"This might be due to API issues or configuration problems\")" + ] + }, + { + "cell_type": "raw", + "id": "35d5cfd4", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Main Orchestration\n", + "\n", + "The main function coordinates the entire demonstration:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ba15c8be", + "metadata": {}, + "outputs": [], + "source": [ + "async def main():\n", + " \"\"\"\n", + " Main function that orchestrates the workflow demonstration.\n", + " \n", + " This async function handles:\n", + " - Environment validation\n", + " - Running the workflow demonstration\n", + " - Error handling and user feedback\n", + " \"\"\"\n", + " print(\"Welcome to Agno Workflow Demo\")\n", + " print(\"This demo showcases custom workflows with caching capabilities\")\n", + " print()\n", + " \n", + " # Validate environment setup\n", + " if not check_environment():\n", + " print(\"Cannot proceed without proper API configuration\")\n", + " return\n", + "\n", + " # Run demonstration\n", + " print(\"\\nStarting workflow demonstration...\")\n", + "\n", + " try:\n", + " demonstrate_workflows()\n", + " print(\"\\n\\n✓ Workflow demo completed successfully!\")\n", + " print(\"\\nKey Takeaways:\")\n", + " print(\"- Workflows enable custom agent orchestration\")\n", + " print(\"- Caching dramatically improves performance\")\n", + " print(\"- Session state persists across runs\")\n", + " print(\"- Streaming responses provide real-time feedback\")\n", + " print(\"- AgentOps tracks all workflow executions\")\n", + " \n", + " except Exception as e:\n", + " print(f\"Demo failed: {e}\")\n", + " print(\"Please check your API keys and network connection\")" + ] + }, + { + "cell_type": "raw", + "id": "e242b650", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "## Execute the Demo\n", + "\n", + "Run the following cell to see the caching workflow in action:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2dc108a", + "metadata": {}, + "outputs": [], + "source": [ + "if __name__ == \"__main__\":\n", + " \"\"\"\n", + " Entry point for the script.\n", + " \n", + " Uses asyncio to run the main function, maintaining consistency\n", + " with other examples and preparing for async operations.\n", + " \"\"\"\n", + " asyncio.run(main())" + ] + } + ], + "metadata": { + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/agno/agno_workflow_setup.py b/examples/agno/agno_workflow_setup.py new file mode 100644 index 000000000..ca40e84d2 --- /dev/null +++ b/examples/agno/agno_workflow_setup.py @@ -0,0 +1,215 @@ +""" +Workflow Setup with Caching in Agno + +This example demonstrates how to create custom workflows that can: +- Orchestrate complex agent interactions +- Implement caching for improved performance +- Maintain session state across multiple runs +- Stream responses efficiently + +Workflows are powerful abstractions that allow you to build reusable, +stateful agent pipelines with custom logic and optimizations. +""" + +import os +from agno.agent import Agent, RunResponse +from agno.team import Team +import asyncio +import agentops +from dotenv import load_dotenv +from agno.workflow import Workflow +from agno.utils.pprint import pprint_run_response +from agno.models.openai import OpenAIChat +from agno.utils.log import logger +from typing import Iterator + +# Load environment variables +load_dotenv() + +# Initialize AgentOps for monitoring workflow execution +agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) + +# Configuration +MODEL_ID = "gpt-4o-mini" # Default model for agents + +def check_environment(): + """ + Verify that all required API keys are properly configured. + + Returns: + bool: True if all required environment variables are set + """ + required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] + missing_vars = [var for var in required_vars if not os.getenv(var)] + + if missing_vars: + print(f"Missing required environment variables: {missing_vars}") + print("Please set these in your .env file or environment") + return False + + print("✓ Environment variables checked successfully") + return True + +class CacheWorkflow(Workflow): + """ + A workflow that demonstrates intelligent caching capabilities. + + This workflow: + - Caches agent responses to avoid redundant API calls + - Maintains session state across multiple invocations + - Provides instant responses for repeated queries + - Reduces costs and improves performance + + Use cases: + - FAQ systems where questions repeat frequently + - Development/testing to avoid repeated API calls + - Systems with predictable query patterns + """ + + # Workflow metadata (descriptive, not functional) + description: str = "A workflow that caches previous outputs for efficiency" + + # Initialize agents as workflow attributes + # This agent will be used to generate responses when cache misses occur + agent = Agent( + model=OpenAIChat(id=MODEL_ID), + description="General purpose agent for generating responses" + ) + + def run(self, message: str) -> Iterator[RunResponse]: + """ + Execute the workflow with caching logic. + + This method: + 1. Checks if the response is already cached + 2. Returns cached response immediately if found + 3. Generates new response if not cached + 4. Caches the new response for future use + + Args: + message: The input query to process + + Yields: + RunResponse: Streamed response chunks + """ + logger.info(f"Checking cache for '{message}'") + + # Check if we've already processed this exact message + # session_state persists across workflow runs + if self.session_state.get(message): + logger.info(f"Cache hit for '{message}'") + # Return cached response immediately (no API call needed) + yield RunResponse( + run_id=self.run_id, + content=self.session_state.get(message) + ) + return + + # Cache miss - need to generate new response + logger.info(f"Cache miss for '{message}'") + + # Run the agent and stream the response + # Using stream=True for real-time output + yield from self.agent.run(message, stream=True) + + # After streaming completes, cache the full response + # This makes future requests for the same message instant + self.session_state[message] = self.agent.run_response.content + logger.info(f"Cached response for future use") + + +def demonstrate_workflows(): + """ + Demonstrate workflow capabilities with caching. + + This function shows: + - How to create and use custom workflows + - The performance benefits of caching + - Session state persistence + - Response streaming + """ + print("\n" + "=" * 60) + print("WORKFLOWS WITH INTELLIGENT CACHING") + print("=" * 60) + + try: + # Create an instance of our caching workflow + print("\n1. Creating CacheWorkflow instance...") + workflow = CacheWorkflow() + print(" ✓ Workflow initialized with caching capabilities") + + # First run - this will be a cache miss + print("\n2. First run (expecting cache miss):") + print(" This will make an API call and take ~1-2 seconds") + + # Run workflow with a test message + response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") + + # Pretty print the response with timing information + pprint_run_response(response, markdown=True, show_time=True) + + # Second run - this should be a cache hit + print("\n3. Second run (expecting cache hit):") + print(" This should return instantly from cache") + + # Run workflow with the same message + response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") + + # Pretty print the response - notice the instant response time + pprint_run_response(response, markdown=True, show_time=True) + + print("\n✓ Workflow demonstration completed") + print("\nNotice the performance difference:") + print("- First run: Makes API call, takes time") + print("- Second run: Returns from cache instantly") + print("- Same content, but much faster delivery") + + except Exception as e: + print(f"\nError during workflow demonstration: {e}") + print("This might be due to API issues or configuration problems") + + +async def main(): + """ + Main function that orchestrates the workflow demonstration. + + This async function handles: + - Environment validation + - Running the workflow demonstration + - Error handling and user feedback + """ + print("Welcome to Agno Workflow Demo") + print("This demo showcases custom workflows with caching capabilities") + print() + + # Validate environment setup + if not check_environment(): + print("Cannot proceed without proper API configuration") + return + + # Run demonstration + print("\nStarting workflow demonstration...") + + try: + demonstrate_workflows() + print("\n\n✓ Workflow demo completed successfully!") + print("\nKey Takeaways:") + print("- Workflows enable custom agent orchestration") + print("- Caching dramatically improves performance") + print("- Session state persists across runs") + print("- Streaming responses provide real-time feedback") + print("- AgentOps tracks all workflow executions") + + except Exception as e: + print(f"Demo failed: {e}") + print("Please check your API keys and network connection") + + +if __name__ == "__main__": + """ + Entry point for the script. + + Uses asyncio to run the main function, maintaining consistency + with other examples and preparing for async operations. + """ + asyncio.run(main()) From c51303df091c0f90633e00f542b3aec4103d366a Mon Sep 17 00:00:00 2001 From: fenilfaldu Date: Thu, 12 Jun 2025 20:54:25 +0530 Subject: [PATCH 09/14] ruff checks --- examples/agno/agno_async_operations.py | 20 +++++----- examples/agno/agno_basic_agents.py | 35 +++++++++-------- examples/agno/agno_research_team.py | 18 +++++---- examples/agno/agno_tool_integrations.py | 25 ++++++------ examples/agno/agno_workflow_setup.py | 51 +++++++++++-------------- 5 files changed, 73 insertions(+), 76 deletions(-) diff --git a/examples/agno/agno_async_operations.py b/examples/agno/agno_async_operations.py index 298ca90bc..4256d9ae6 100644 --- a/examples/agno/agno_async_operations.py +++ b/examples/agno/agno_async_operations.py @@ -9,7 +9,6 @@ import os from agno.agent import Agent -from agno.team import Team from agno.models.openai import OpenAIChat import asyncio # For concurrent task execution import agentops # For tracking AI operations and analytics @@ -24,10 +23,11 @@ # Configuration MODEL_ID = "gpt-4o-mini" # Cost-effective OpenAI model suitable for most tasks + def check_environment(): """ Validate that required API keys are properly configured. - + Returns: bool: True if all required environment variables are set """ @@ -48,11 +48,11 @@ def check_environment(): async def demonstrate_async_operations(): """ Demonstrate concurrent execution of multiple AI agent tasks. - + This function creates multiple async tasks that execute concurrently rather than sequentially. - Each task makes an independent API call to the AI model, and asyncio.gather() + Each task makes an independent API call to the AI model, and asyncio.gather() waits for all tasks to complete before returning results. - + Performance benefit: Instead of 3 sequential calls taking ~90 seconds total, concurrent execution typically completes in ~30 seconds. """ @@ -68,7 +68,7 @@ async def demonstrate_async_operations(): # Define async task functions # Each function is a coroutine that can be executed concurrently - + async def task1(): """Query AI about Python programming language.""" print("→ Starting Python explanation task...") @@ -96,7 +96,7 @@ async def task3(): print("\n" + "=" * 60) print("RESULTS") print("=" * 60) - + for i, result in enumerate(results, 1): print(f"\nTask {i} Result:") print(result) @@ -110,14 +110,14 @@ async def task3(): async def main(): """ Main async function that orchestrates the demonstration. - + Handles environment validation and executes the async operations demo with proper error handling and user feedback. """ print("Agno Async Operations Demonstration") print("Showcasing concurrent AI task execution for improved performance") print() - + # Validate environment setup if not check_environment(): print("Cannot proceed without proper API configuration") @@ -130,7 +130,7 @@ async def main(): await demonstrate_async_operations() print("\n✓ Demo completed successfully") print("Note: Observe the performance improvement compared to sequential execution") - + except Exception as e: print(f"Demo execution failed: {e}") print("Check your API keys, rate limits, and network connectivity") diff --git a/examples/agno/agno_basic_agents.py b/examples/agno/agno_basic_agents.py index ea768a7e3..e621867dd 100644 --- a/examples/agno/agno_basic_agents.py +++ b/examples/agno/agno_basic_agents.py @@ -25,10 +25,11 @@ # Configuration MODEL_ID = "gpt-4o-mini" # Using OpenAI's cost-effective model + def check_environment(): """ Verify that all required API keys are properly configured. - + Returns: bool: True if all required environment variables are set """ @@ -47,7 +48,7 @@ def check_environment(): def demonstrate_basic_agents(): """ Demonstrate basic agent creation and team coordination. - + This function shows how to: 1. Create specialized agents with specific roles 2. Organize agents into a team @@ -60,22 +61,20 @@ def demonstrate_basic_agents(): try: # Create individual agents with specific roles # Each agent has a name and a role that defines its expertise - + print("\n1. Creating specialized agents...") - + # News Agent: Specializes in gathering and analyzing news information news_agent = Agent( - name="News Agent", - role="Get the latest news and provide news analysis", - model=OpenAIChat(id=MODEL_ID) + name="News Agent", role="Get the latest news and provide news analysis", model=OpenAIChat(id=MODEL_ID) ) print(" ✓ News Agent created") # Weather Agent: Specializes in weather forecasting and analysis weather_agent = Agent( - name="Weather Agent", - role="Get weather forecasts and provide weather analysis", - model=OpenAIChat(id=MODEL_ID) + name="Weather Agent", + role="Get weather forecasts and provide weather analysis", + model=OpenAIChat(id=MODEL_ID), ) print(" ✓ Weather Agent created") @@ -83,9 +82,9 @@ def demonstrate_basic_agents(): # The "coordinate" mode allows agents to work together and share information print("\n2. Creating a team with coordination capabilities...") team = Team( - name="News and Weather Team", + name="News and Weather Team", mode="coordinate", # Agents will coordinate their responses - members=[news_agent, weather_agent] + members=[news_agent, weather_agent], ) print(" ✓ Team created with 2 agents") @@ -93,14 +92,14 @@ def demonstrate_basic_agents(): # The team will automatically determine which agent(s) should respond print("\n3. Running team task...") print(" Query: 'What is the weather in Tokyo?'") - + response = team.run("What is the weather in Tokyo?") - + print("\n4. Team Response:") print("-" * 60) print(f"{response.content}") print("-" * 60) - + # The team intelligently routes the query to the Weather Agent # since it's weather-related, demonstrating smart task delegation @@ -112,7 +111,7 @@ def demonstrate_basic_agents(): async def main(): """ Main function that orchestrates the demonstration. - + This async function handles: - Environment validation - Running the basic agents demonstration @@ -121,7 +120,7 @@ async def main(): print("Welcome to Agno Basic Agents Demo") print("This demo shows how to create and coordinate AI agents") print() - + # Validate environment setup if not check_environment(): print("Cannot proceed without proper API configuration") @@ -139,7 +138,7 @@ async def main(): print("- Teams enable multiple agents to collaborate on tasks") print("- Coordination mode allows intelligent task delegation") print("- AgentOps tracks all agent interactions for monitoring") - + except Exception as e: print(f"Demo failed: {e}") print("Please check your API keys and network connection") diff --git a/examples/agno/agno_research_team.py b/examples/agno/agno_research_team.py index 2eaa93408..18291f882 100644 --- a/examples/agno/agno_research_team.py +++ b/examples/agno/agno_research_team.py @@ -35,10 +35,11 @@ # Configuration MODEL_ID = "gpt-4o-mini" # Default model for agents + def check_environment(): """ Verify that all required API keys are properly configured. - + Returns: bool: True if all required environment variables are set """ @@ -53,15 +54,16 @@ def check_environment(): print("✓ Environment variables checked successfully") return True + def demonstrate_research_team(): """ Demonstrate a collaborative research team with multiple specialized agents. - + This function creates a team of researchers, each with: - Specific expertise and research focus - Specialized tools for their domain - Custom instructions for their research approach - + The team collaborates to provide comprehensive research insights. """ print("\n" + "=" * 60) @@ -70,7 +72,7 @@ def demonstrate_research_team(): try: print("\n1. Creating specialized research agents...") - + # Reddit Researcher: Focuses on community discussions and user experiences reddit_researcher = Agent( name="Reddit Researcher", @@ -189,7 +191,7 @@ def demonstrate_research_team(): print("\n3. Starting collaborative research discussion...") print(" Topic: 'What is the best way to learn to code?'") print("\n" + "-" * 60) - + # Stream the team discussion in real-time agent_team.print_response( message="Start the discussion on the topic: 'What is the best way to learn to code?'", @@ -205,7 +207,7 @@ def demonstrate_research_team(): async def main(): """ Main function that orchestrates the research team demonstration. - + This async function handles: - Environment validation - Running the collaborative research team demo @@ -215,7 +217,7 @@ async def main(): print("This demo shows how multiple specialized agents can work together") print("to provide comprehensive research insights from different perspectives.") print() - + # Validate environment setup if not check_environment(): print("Cannot proceed without proper API configuration") @@ -233,7 +235,7 @@ async def main(): print("- Each agent uses appropriate tools for their research domain") print("- Teams can reach consensus through structured discussion") print("- AgentOps tracks all interactions for analysis") - + except Exception as e: print(f"Demo failed: {e}") print("Please check your API keys and network connection") diff --git a/examples/agno/agno_tool_integrations.py b/examples/agno/agno_tool_integrations.py index 5392f6eb0..a185fd700 100644 --- a/examples/agno/agno_tool_integrations.py +++ b/examples/agno/agno_tool_integrations.py @@ -13,7 +13,6 @@ import os from agno.agent import Agent -from agno.team import Team from agno.models.openai import OpenAIChat import asyncio import agentops @@ -37,15 +36,16 @@ cohere_api_key = os.getenv("COHERE_API_KEY") # Required for embeddings and reranking MODEL_ID = "gpt-4o-mini" # Default model for agents + def check_environment(): """ Verify that all required API keys are properly configured. - + This demo requires: - AGENTOPS_API_KEY: For monitoring agent behavior - OPENAI_API_KEY: For the AI model - COHERE_API_KEY: For embeddings and reranking - + Returns: bool: True if all required environment variables are set """ @@ -68,7 +68,7 @@ def check_environment(): def demonstrate_tool_integration(): """ Demonstrate advanced tool integration with RAG and knowledge bases. - + This function shows how to: 1. Create a knowledge base from external sources 2. Set up a vector database with embeddings @@ -81,7 +81,7 @@ def demonstrate_tool_integration(): try: print("\n1. Setting up knowledge base and vector database...") - + # Create knowledge base from documentation URLs # This loads content from the specified URLs and prepares it for RAG knowledge_base = UrlKnowledge( @@ -94,12 +94,12 @@ def demonstrate_tool_integration(): # Embedder converts text to numerical vectors for similarity search embedder=CohereEmbedder( id="embed-v4.0", # Cohere's embedding model - api_key=cohere_api_key + api_key=cohere_api_key, ), # Reranker improves search results by re-scoring them reranker=CohereReranker( model="rerank-v3.5", # Cohere's reranking model - api_key=cohere_api_key + api_key=cohere_api_key, ), ), ) @@ -134,13 +134,13 @@ def demonstrate_tool_integration(): print("\n3. Testing RAG agent with knowledge query...") print(" Question: 'What are Agents?'") print("\n" + "-" * 60) - + # Print response with full reasoning process visible agent.print_response( "What are Agents?", show_full_reasoning=True, # Shows how the agent searches and reasons ) - + print("\n" + "-" * 60) print("✓ RAG demonstration completed") print("\nNotice how the agent:") @@ -155,10 +155,11 @@ def demonstrate_tool_integration(): print("- Network issues accessing documentation URLs") print("- Vector database initialization problems") + async def main(): """ Main function that orchestrates the tool integration demonstration. - + This async function handles: - Environment validation - Running the RAG and tool integration demo @@ -168,7 +169,7 @@ async def main(): print("This demo showcases RAG (Retrieval-Augmented Generation)") print("and advanced tool integration capabilities.") print() - + # Validate environment setup if not check_environment(): print("\nCannot proceed without proper API configuration") @@ -187,7 +188,7 @@ async def main(): print("- Embeddings and reranking improve information retrieval") print("- Reasoning tools enhance problem-solving capabilities") print("- AgentOps tracks all tool usage and knowledge searches") - + except Exception as e: print(f"Demo failed: {e}") print("Please check your API keys and network connection") diff --git a/examples/agno/agno_workflow_setup.py b/examples/agno/agno_workflow_setup.py index ca40e84d2..071977a6a 100644 --- a/examples/agno/agno_workflow_setup.py +++ b/examples/agno/agno_workflow_setup.py @@ -12,8 +12,7 @@ """ import os -from agno.agent import Agent, RunResponse -from agno.team import Team +from agno.agent import Agent, RunResponse import asyncio import agentops from dotenv import load_dotenv @@ -32,10 +31,11 @@ # Configuration MODEL_ID = "gpt-4o-mini" # Default model for agents + def check_environment(): """ Verify that all required API keys are properly configured. - + Returns: bool: True if all required environment variables are set """ @@ -50,16 +50,17 @@ def check_environment(): print("✓ Environment variables checked successfully") return True + class CacheWorkflow(Workflow): """ A workflow that demonstrates intelligent caching capabilities. - + This workflow: - Caches agent responses to avoid redundant API calls - Maintains session state across multiple invocations - Provides instant responses for repeated queries - Reduces costs and improves performance - + Use cases: - FAQ systems where questions repeat frequently - Development/testing to avoid repeated API calls @@ -71,43 +72,37 @@ class CacheWorkflow(Workflow): # Initialize agents as workflow attributes # This agent will be used to generate responses when cache misses occur - agent = Agent( - model=OpenAIChat(id=MODEL_ID), - description="General purpose agent for generating responses" - ) + agent = Agent(model=OpenAIChat(id=MODEL_ID), description="General purpose agent for generating responses") def run(self, message: str) -> Iterator[RunResponse]: """ Execute the workflow with caching logic. - + This method: 1. Checks if the response is already cached 2. Returns cached response immediately if found 3. Generates new response if not cached 4. Caches the new response for future use - + Args: message: The input query to process - + Yields: RunResponse: Streamed response chunks """ logger.info(f"Checking cache for '{message}'") - + # Check if we've already processed this exact message # session_state persists across workflow runs if self.session_state.get(message): logger.info(f"Cache hit for '{message}'") # Return cached response immediately (no API call needed) - yield RunResponse( - run_id=self.run_id, - content=self.session_state.get(message) - ) + yield RunResponse(run_id=self.run_id, content=self.session_state.get(message)) return # Cache miss - need to generate new response logger.info(f"Cache miss for '{message}'") - + # Run the agent and stream the response # Using stream=True for real-time output yield from self.agent.run(message, stream=True) @@ -115,13 +110,13 @@ def run(self, message: str) -> Iterator[RunResponse]: # After streaming completes, cache the full response # This makes future requests for the same message instant self.session_state[message] = self.agent.run_response.content - logger.info(f"Cached response for future use") + logger.info("Cached response for future use") def demonstrate_workflows(): """ Demonstrate workflow capabilities with caching. - + This function shows: - How to create and use custom workflows - The performance benefits of caching @@ -141,23 +136,23 @@ def demonstrate_workflows(): # First run - this will be a cache miss print("\n2. First run (expecting cache miss):") print(" This will make an API call and take ~1-2 seconds") - + # Run workflow with a test message response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") - + # Pretty print the response with timing information pprint_run_response(response, markdown=True, show_time=True) # Second run - this should be a cache hit print("\n3. Second run (expecting cache hit):") print(" This should return instantly from cache") - + # Run workflow with the same message response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") - + # Pretty print the response - notice the instant response time pprint_run_response(response, markdown=True, show_time=True) - + print("\n✓ Workflow demonstration completed") print("\nNotice the performance difference:") print("- First run: Makes API call, takes time") @@ -172,7 +167,7 @@ def demonstrate_workflows(): async def main(): """ Main function that orchestrates the workflow demonstration. - + This async function handles: - Environment validation - Running the workflow demonstration @@ -181,7 +176,7 @@ async def main(): print("Welcome to Agno Workflow Demo") print("This demo showcases custom workflows with caching capabilities") print() - + # Validate environment setup if not check_environment(): print("Cannot proceed without proper API configuration") @@ -199,7 +194,7 @@ async def main(): print("- Session state persists across runs") print("- Streaming responses provide real-time feedback") print("- AgentOps tracks all workflow executions") - + except Exception as e: print(f"Demo failed: {e}") print("Please check your API keys and network connection") From ef6110d271865109700099e87233cb561f4dcd42 Mon Sep 17 00:00:00 2001 From: Pratyush Shukla Date: Thu, 12 Jun 2025 22:44:26 +0530 Subject: [PATCH 10/14] some work for @fenilfaldu to follow --- examples/agno/agno_async_operations.ipynb | 185 +++++------------ examples/agno/agno_async_operations.py | 115 +++-------- examples/agno/agno_basic_agents.ipynb | 238 ++++++---------------- examples/agno/agno_basic_agents.py | 130 ++++-------- 4 files changed, 175 insertions(+), 493 deletions(-) diff --git a/examples/agno/agno_async_operations.ipynb b/examples/agno/agno_async_operations.ipynb index aedb041a7..664dc8b3c 100644 --- a/examples/agno/agno_async_operations.ipynb +++ b/examples/agno/agno_async_operations.ipynb @@ -11,45 +11,26 @@ "This notebook demonstrates how to leverage asynchronous programming with Agno agents to execute multiple AI tasks concurrently, significantly improving performance and efficiency.\n", "\n", "## Overview\n", + "This notebook demonstrates a practical example of concurrent AI operations where we:\n", "\n", - "This implementation showcases:\n", - "- **Agno** for creating and managing AI agents\n", - "- **Asyncio** for concurrent task execution\n", - "- **AgentOps** for monitoring and tracking AI operations\n", - "\n", - "By using async operations, you can run multiple AI queries simultaneously instead of waiting for each one to complete sequentially. This is particularly beneficial when dealing with I/O-bound operations like API calls to AI models.\n", - "\n", - "## Pre-requisites\n", - "\n", - "Before running this notebook, ensure you have:\n", - "- **AgentOps API key** from [AgentOps](https://agentops.ai)\n", - "- **OpenAI API key** from [OpenAI](https://openai.com)\n", - "\n", - "## Setup\n", + "1. **Initialize an Agno agent** with OpenAI's GPT-4o-mini model\n", + "2. **Create multiple async tasks** that query the AI about different programming languages\n", + "3. **Compare performance** between concurrent and sequential execution\n", "\n", - "Create a `.env` file in your project root with:\n", - "```\n", - "AGENTOPS_API_KEY=your_agentops_key_here\n", - "OPENAI_API_KEY=your_openai_key_here\n", - "```\n" + "By using async operations, you can run multiple AI queries simultaneously instead of waiting for each one to complete sequentially. This is particularly beneficial when dealing with I/O-bound operations like API calls to AI models.\n" ] }, { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, + "cell_type": "code", + "execution_count": null, + "id": "75767381", + "metadata": {}, + "outputs": [], "source": [ - "## Implementation Details\n", - "\n", - "This notebook demonstrates a practical example of concurrent AI operations where we:\n", - "\n", - "1. **Initialize an Agno agent** with OpenAI's GPT-4o-mini model\n", - "2. **Create multiple async tasks** that query the AI about different programming languages\n", - "3. **Compare performance** between concurrent and sequential execution\n", - "\n" + "# Install the required dependencies:\n", + "%pip install agentops\n", + "%pip install agno\n", + "%pip install python-dotenv" ] }, { @@ -60,49 +41,35 @@ "outputs": [], "source": [ "import os\n", - "from agno.agent import Agent\n", - "from agno.team import Team\n", - "from agno.models.openai import OpenAIChat\n", - "import asyncio # For concurrent task execution\n", - "import agentops # For tracking AI operations and analytics\n", + "import asyncio\n", "from dotenv import load_dotenv\n", "\n", - "# Load environment variables from .env file\n", + "import agentops\n", + "from agno.agent import Agent\n", + "from agno.team import Team\n", + "from agno.models.openai import OpenAIChat" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6653555", + "metadata": {}, + "outputs": [], + "source": [ "load_dotenv()\n", - "\n", - "# Initialize AgentOps for monitoring AI usage, costs, and performance\n", - "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", - "\n", - "# Configuration\n", - "MODEL_ID = \"gpt-4o-mini\" " + "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")\n", + "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_agentops_api_key_here\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "b6607818", + "id": "ac01eb8a", "metadata": {}, "outputs": [], "source": [ - "def check_environment():\n", - " \"\"\"\n", - " Validate that required API keys are properly configured.\n", - " \n", - " Returns:\n", - " bool: True if all required environment variables are set\n", - " \"\"\"\n", - " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\"]\n", - " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", - "\n", - " if missing_vars:\n", - " print(f\"Missing required environment variables: {missing_vars}\")\n", - " print(\"Please configure these in your .env file:\")\n", - " for var in missing_vars:\n", - " print(f\" {var}=your_key_here\")\n", - " return False\n", - "\n", - " print(\"✓ Environment variables configured successfully\")\n", - " return True" + "agentops.init(auto_start_session=False, tags=[\"agno-example\", \"async-operation\"])" ] }, { @@ -123,108 +90,50 @@ " Performance benefit: Instead of 3 sequential calls taking ~90 seconds total,\n", " concurrent execution typically completes in ~30 seconds.\n", " \"\"\"\n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"CONCURRENT AI OPERATIONS DEMO\")\n", - " print(\"=\" * 60)\n", + " tracer = agentops.start_trace(trace_name=\"Agno Async Operations Example\",)\n", "\n", " try:\n", " # Initialize AI agent with specified model\n", - " print(\"Initializing AI agent...\")\n", - " agent = Agent(model=OpenAIChat(id=MODEL_ID))\n", - " print(\"✓ Agent ready\")\n", - "\n", - " # Define async task functions\n", - " # Each function is a coroutine that can be executed concurrently\n", + " agent = Agent(model=OpenAIChat(id=\"gpt-4o-mini\"))\n", " \n", " async def task1():\n", " \"\"\"Query AI about Python programming language.\"\"\"\n", - " print(\"→ Starting Python explanation task...\")\n", " response = await agent.arun(\"Explain Python programming language in one paragraph\")\n", " return f\"Python: {response.content}\"\n", "\n", " async def task2():\n", " \"\"\"Query AI about JavaScript programming language.\"\"\"\n", - " print(\"→ Starting JavaScript explanation task...\")\n", " response = await agent.arun(\"Explain JavaScript programming language in one paragraph\")\n", " return f\"JavaScript: {response.content}\"\n", "\n", " async def task3():\n", " \"\"\"Query AI for comparison between programming languages.\"\"\"\n", - " print(\"→ Starting comparison task...\")\n", " response = await agent.arun(\"Compare Python and JavaScript briefly\")\n", " return f\"Comparison: {response.content}\"\n", "\n", " # Execute all tasks concurrently using asyncio.gather()\n", - " # This is the key to async performance - tasks run simultaneously\n", - " print(\"\\nExecuting tasks concurrently...\")\n", " results = await asyncio.gather(task1(), task2(), task3())\n", - "\n", - " # Process and display results\n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"RESULTS\")\n", - " print(\"=\" * 60)\n", " \n", " for i, result in enumerate(results, 1):\n", " print(f\"\\nTask {i} Result:\")\n", " print(result)\n", " print(\"-\" * 50)\n", "\n", - " except Exception as e:\n", - " print(f\"Error during async operations: {e}\")\n", - " print(\"This may be due to API rate limits, network issues, or authentication problems\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c0c03fe4", - "metadata": {}, - "outputs": [], - "source": [ - "async def main():\n", - " \"\"\"\n", - " Main async function that orchestrates the demonstration.\n", - " \n", - " Handles environment validation and executes the async operations demo\n", - " with proper error handling and user feedback.\n", - " \"\"\"\n", - " print(\"Agno Async Operations Demonstration\")\n", - " print(\"Showcasing concurrent AI task execution for improved performance\")\n", - " print()\n", - " \n", - " # Validate environment setup\n", - " if not check_environment():\n", - " print(\"Cannot proceed without proper API configuration\")\n", - " return\n", - "\n", - " print(\"\\nStarting async operations demo...\")\n", + " agentops.end_trace(tracer, end_state=\"Success\")\n", "\n", - " # Execute async operations demonstration\n", - " try:\n", - " await demonstrate_async_operations()\n", - " print(\"\\n✓ Demo completed successfully\")\n", - " print(\"Note: Observe the performance improvement compared to sequential execution\")\n", - " \n", " except Exception as e:\n", - " print(f\"Demo execution failed: {e}\")\n", - " print(\"Check your API keys, rate limits, and network connectivity\")" + " print(f\"An error occurred: {e}\")\n", + " agentops.end_trace(tracer, end_state=\"Error\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "00a4f206", + "id": "0aa21331", "metadata": {}, "outputs": [], "source": [ - "if __name__ == \"__main__\":\n", - " \"\"\"\n", - " Entry point for the script.\n", - " \n", - " Uses asyncio.run() to execute the main async function and handle\n", - " the async event loop lifecycle automatically.\n", - " \"\"\"\n", - " asyncio.run(main())" + "await demonstrate_async_operations()" ] } ], @@ -234,8 +143,22 @@ "main_language": "python", "notebook_metadata_filter": "-all" }, + "kernelspec": { + "display_name": "agentops (3.11.11)", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" } }, "nbformat": 4, diff --git a/examples/agno/agno_async_operations.py b/examples/agno/agno_async_operations.py index 4256d9ae6..8f4c43ded 100644 --- a/examples/agno/agno_async_operations.py +++ b/examples/agno/agno_async_operations.py @@ -1,48 +1,31 @@ """ -Async Operations with Agno +# Async Operations with Agno -This script demonstrates concurrent execution of multiple AI agent tasks using Python's asyncio. -Instead of sequential execution where each task waits for the previous one to complete, -async operations allow multiple tasks to run concurrently, significantly improving performance -when dealing with I/O-bound operations like API calls to AI models. -""" +This notebook demonstrates how to leverage asynchronous programming with Agno agents to execute multiple AI tasks concurrently, significantly improving performance and efficiency. + +## Overview + +This notebook demonstrates a practical example of concurrent AI operations where we: + +1. **Initialize an Agno agent** with OpenAI's GPT-4o-mini model +2. **Create multiple async tasks** that query the AI about different programming languages +3. **Compare performance** between concurrent and sequential execution +By using async operations, you can run multiple AI queries simultaneously instead of waiting for each one to complete sequentially. This is particularly beneficial when dealing with I/O-bound operations like API calls to AI models. +""" import os +import asyncio +from dotenv import load_dotenv +import agentops from agno.agent import Agent from agno.models.openai import OpenAIChat -import asyncio # For concurrent task execution -import agentops # For tracking AI operations and analytics -from dotenv import load_dotenv -# Load environment variables from .env file load_dotenv() -# Initialize AgentOps for monitoring AI usage, costs, and performance -agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) - -# Configuration -MODEL_ID = "gpt-4o-mini" # Cost-effective OpenAI model suitable for most tasks - - -def check_environment(): - """ - Validate that required API keys are properly configured. - - Returns: - bool: True if all required environment variables are set - """ - required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] - missing_vars = [var for var in required_vars if not os.getenv(var)] - - if missing_vars: - print(f"Missing required environment variables: {missing_vars}") - print("Please configure these in your .env file:") - for var in missing_vars: - print(f" {var}=your_key_here") - return False +os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here") +os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_agentops_api_key_here") - print("✓ Environment variables configured successfully") - return True +agentops.init(auto_start_session=False, tags=["agno-example", "async-operation"]) async def demonstrate_async_operations(): @@ -56,91 +39,41 @@ async def demonstrate_async_operations(): Performance benefit: Instead of 3 sequential calls taking ~90 seconds total, concurrent execution typically completes in ~30 seconds. """ - print("\n" + "=" * 60) - print("CONCURRENT AI OPERATIONS DEMO") - print("=" * 60) + tracer = agentops.start_trace(trace_name="Agno Async Operations Example") try: # Initialize AI agent with specified model - print("Initializing AI agent...") - agent = Agent(model=OpenAIChat(id=MODEL_ID)) - print("✓ Agent ready") - - # Define async task functions - # Each function is a coroutine that can be executed concurrently + agent = Agent(model=OpenAIChat(id="gpt-4o-mini")) async def task1(): """Query AI about Python programming language.""" - print("→ Starting Python explanation task...") response = await agent.arun("Explain Python programming language in one paragraph") return f"Python: {response.content}" async def task2(): """Query AI about JavaScript programming language.""" - print("→ Starting JavaScript explanation task...") response = await agent.arun("Explain JavaScript programming language in one paragraph") return f"JavaScript: {response.content}" async def task3(): """Query AI for comparison between programming languages.""" - print("→ Starting comparison task...") response = await agent.arun("Compare Python and JavaScript briefly") return f"Comparison: {response.content}" # Execute all tasks concurrently using asyncio.gather() - # This is the key to async performance - tasks run simultaneously - print("\nExecuting tasks concurrently...") results = await asyncio.gather(task1(), task2(), task3()) - # Process and display results - print("\n" + "=" * 60) - print("RESULTS") - print("=" * 60) - for i, result in enumerate(results, 1): print(f"\nTask {i} Result:") print(result) print("-" * 50) - except Exception as e: - print(f"Error during async operations: {e}") - print("This may be due to API rate limits, network issues, or authentication problems") - - -async def main(): - """ - Main async function that orchestrates the demonstration. - - Handles environment validation and executes the async operations demo - with proper error handling and user feedback. - """ - print("Agno Async Operations Demonstration") - print("Showcasing concurrent AI task execution for improved performance") - print() - - # Validate environment setup - if not check_environment(): - print("Cannot proceed without proper API configuration") - return - - print("\nStarting async operations demo...") - - # Execute async operations demonstration - try: - await demonstrate_async_operations() - print("\n✓ Demo completed successfully") - print("Note: Observe the performance improvement compared to sequential execution") + agentops.end_trace(tracer, end_state="Success") except Exception as e: - print(f"Demo execution failed: {e}") - print("Check your API keys, rate limits, and network connectivity") + print(f"An error occurred: {e}") + agentops.end_trace(tracer, end_state="Error") if __name__ == "__main__": - """ - Entry point for the script. - - Uses asyncio.run() to execute the main async function and handle - the async event loop lifecycle automatically. - """ - asyncio.run(main()) + asyncio.run(demonstrate_async_operations()) diff --git a/examples/agno/agno_basic_agents.ipynb b/examples/agno/agno_basic_agents.ipynb index af5240f4b..4988be5c6 100644 --- a/examples/agno/agno_basic_agents.ipynb +++ b/examples/agno/agno_basic_agents.ipynb @@ -1,16 +1,13 @@ { "cells": [ { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, + "cell_type": "markdown", + "id": "8b2111ae", + "metadata": {}, "source": [ "# Basic Agents and Teams with Agno\n", "\n", - "This notebook demonstrates the fundamentals of creating AI agents and organizing them into collaborative teams using the Agno framework.\n", + "This example demonstrates the fundamentals of creating AI agents and organizing them into collaborative teams using the Agno framework.\n", "\n", "## Overview\n", "\n", @@ -29,32 +26,20 @@ "Collections of agents that work together to solve complex tasks. Teams can coordinate their responses, share information, and delegate tasks based on each agent's expertise.\n", "\n", "### Coordination Modes\n", - "Different strategies for how agents within a team interact and collaborate. The \"coordinate\" mode enables intelligent task routing and information sharing.\n", - "\n", - "## Pre-requisites\n", - "\n", - "Before running this notebook, ensure you have:\n", - "- **AgentOps API key** from [AgentOps](https://agentops.ai)\n", - "- **OpenAI API key** from [OpenAI](https://openai.com)\n", - "\n", - "Create a `.env` file in your project root with:\n", - "```\n", - "AGENTOPS_API_KEY=your_agentops_key_here\n", - "OPENAI_API_KEY=your_openai_key_here\n", - "```\n" + "Different strategies for how agents within a team interact and collaborate. The \"coordinate\" mode enables intelligent task routing and information sharing." ] }, { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, + "cell_type": "code", + "execution_count": null, + "id": "d087e416", + "metadata": {}, + "outputs": [], "source": [ - "## Implementation\n", - "\n", - "Let's start by importing the necessary libraries and setting up our environment.\n" + "# Install the required dependencies\n", + "%pip install agentops\n", + "%pip install agno\n", + "%pip install python-dotenv" ] }, { @@ -65,64 +50,39 @@ "outputs": [], "source": [ "import os\n", - "from agno.agent import Agent\n", - "from agno.team import Team\n", - "from agno.models.openai import OpenAIChat\n", - "import asyncio\n", - "import agentops\n", "from dotenv import load_dotenv\n", "\n", - "# Load environment variables from .env file\n", - "load_dotenv()\n", - "\n", - "# Initialize AgentOps for monitoring and analytics\n", - "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", - "\n", - "# Configuration\n", - "MODEL_ID = \"gpt-4o-mini\" " + "import agentops\n", + "from agno.agent import Agent\n", + "from agno.team import Team\n", + "from agno.models.openai import OpenAIChat" ] }, { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, + "cell_type": "code", + "execution_count": null, + "id": "f733e281", + "metadata": {}, + "outputs": [], "source": [ - "## Environment Validation\n", - "\n", - "Before we create our agents, let's ensure all required API keys are properly configured:\n" + "load_dotenv()\n", + "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")\n", + "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_agentops_api_key_here\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "439ff736", + "id": "fb37819a", "metadata": {}, "outputs": [], "source": [ - "def check_environment():\n", - " \"\"\"\n", - " Verify that all required API keys are properly configured.\n", - " \n", - " Returns:\n", - " bool: True if all required environment variables are set\n", - " \"\"\"\n", - " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\"]\n", - " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", - "\n", - " if missing_vars:\n", - " print(f\"Missing required environment variables: {missing_vars}\")\n", - " print(\"Please set these in your .env file or environment\")\n", - " return False\n", - "\n", - " print(\"✓ Environment variables checked successfully\")\n", - " return True" + "agentops.init(auto_start_session=False, tags=[\"agno-example\", \"basics\", \"agents-and-teams\"])" ] }, { - "cell_type": "raw", + "cell_type": "markdown", + "id": "e954b898", "metadata": { "vscode": { "languageId": "raw" @@ -148,6 +108,14 @@ "The team will automatically delegate tasks to the most appropriate agent(s) based on the query.\n" ] }, + { + "cell_type": "markdown", + "id": "c12702d0", + "metadata": {}, + "source": [ + "Here's the code to implement this:" + ] + }, { "cell_type": "code", "execution_count": null, @@ -164,152 +132,58 @@ " 2. Organize agents into a team\n", " 3. Use the team to solve tasks that require multiple perspectives\n", " \"\"\"\n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"BASIC AGENTS AND TEAMS DEMONSTRATION\")\n", - " print(\"=\" * 60)\n", + " tracer = agentops.start_trace(trace_name=\"Agno Basic Agents and Teams Demonstration\",)\n", "\n", " try:\n", " # Create individual agents with specific roles\n", " # Each agent has a name and a role that defines its expertise\n", - " \n", - " print(\"\\n1. Creating specialized agents...\")\n", - " \n", + "\n", " # News Agent: Specializes in gathering and analyzing news information\n", " news_agent = Agent(\n", " name=\"News Agent\", \n", " role=\"Get the latest news and provide news analysis\", \n", - " model=OpenAIChat(id=MODEL_ID)\n", + " model=OpenAIChat(id=\"gpt-4o-mini\")\n", " )\n", - " print(\" ✓ News Agent created\")\n", "\n", " # Weather Agent: Specializes in weather forecasting and analysis\n", " weather_agent = Agent(\n", " name=\"Weather Agent\", \n", " role=\"Get weather forecasts and provide weather analysis\", \n", - " model=OpenAIChat(id=MODEL_ID)\n", + " model=OpenAIChat(id=\"gpt-4o-mini\")\n", " )\n", - " print(\" ✓ Weather Agent created\")\n", "\n", " # Create a team with coordination mode\n", " # The \"coordinate\" mode allows agents to work together and share information\n", - " print(\"\\n2. Creating a team with coordination capabilities...\")\n", " team = Team(\n", " name=\"News and Weather Team\", \n", " mode=\"coordinate\", # Agents will coordinate their responses\n", " members=[news_agent, weather_agent]\n", " )\n", - " print(\" ✓ Team created with 2 agents\")\n", "\n", " # Run a task that requires team coordination\n", " # The team will automatically determine which agent(s) should respond\n", - " print(\"\\n3. Running team task...\")\n", - " print(\" Query: 'What is the weather in Tokyo?'\")\n", - " \n", " response = team.run(\"What is the weather in Tokyo?\")\n", " \n", - " print(\"\\n4. Team Response:\")\n", + " print(\"\\nTeam Response:\")\n", " print(\"-\" * 60)\n", " print(f\"{response.content}\")\n", " print(\"-\" * 60)\n", "\n", + " agentops.end_trace(tracer, end_state=\"Success\")\n", "\n", " except Exception as e:\n", - " print(f\"Error during basic agents demonstration: {e}\")\n", - " print(\"This might be due to API issues or configuration problems\")" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Running the Demo\n", - "\n", - "Let's execute our main function to see the agents and teams in action:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7c1373b", - "metadata": {}, - "outputs": [], - "source": [ - "async def main():\n", - " \"\"\"\n", - " Main function that orchestrates the demonstration.\n", - " \n", - " This async function handles:\n", - " - Environment validation\n", - " - Running the basic agents demonstration\n", - " - Error handling and user feedback\n", - " \"\"\"\n", - " print(\"Welcome to Agno Basic Agents Demo\")\n", - " print(\"This demo shows how to create and coordinate AI agents\")\n", - " print()\n", - " \n", - " # Validate environment setup\n", - " if not check_environment():\n", - " print(\"Cannot proceed without proper API configuration\")\n", - " return\n", - "\n", - " # Run demonstrations\n", - " print(\"\\nStarting demonstrations...\")\n", - "\n", - " # Basic agents and teams demonstration\n", - " try:\n", - " demonstrate_basic_agents()\n", - " print(\"\\n✓ Demo completed successfully!\")\n", - " print(\"\\nKey Takeaways:\")\n", - " print(\"- Agents can have specialized roles and expertise\")\n", - " print(\"- Teams enable multiple agents to collaborate on tasks\")\n", - " print(\"- Coordination mode allows intelligent task delegation\")\n", - " print(\"- AgentOps tracks all agent interactions for monitoring\")\n", - " \n", - " except Exception as e:\n", - " print(f\"Demo failed: {e}\")\n", - " print(\"Please check your API keys and network connection\")" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Execute the Demo\n", - "\n", - "Run the cell below to see how agents collaborate within a team:\n" + " print(f\"An error occurred: {e}\")\n", + " agentops.end_trace(tracer, end_state=\"Error\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "3b51cb77", + "id": "ca13c9b0", "metadata": {}, "outputs": [], "source": [ - "if __name__ == \"__main__\":\n", - " \"\"\"\n", - " Entry point for the script.\n", - " \n", - " Uses asyncio to run the main function, preparing for future\n", - " async operations and maintaining consistency with other examples.\n", - " \"\"\"\n", - " asyncio.run(main())" - ] - }, - { - "cell_type": "raw", - "id": "0a05b8c0", - "metadata": {}, - "source": [ - "\n" + "demonstrate_basic_agents()" ] } ], @@ -319,8 +193,22 @@ "main_language": "python", "notebook_metadata_filter": "-all" }, + "kernelspec": { + "display_name": "agentops (3.11.11)", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" } }, "nbformat": 4, diff --git a/examples/agno/agno_basic_agents.py b/examples/agno/agno_basic_agents.py index e621867dd..56e042768 100644 --- a/examples/agno/agno_basic_agents.py +++ b/examples/agno/agno_basic_agents.py @@ -1,48 +1,40 @@ """ -Basic Agents and Teams with Agno +# Basic Agents and Teams with Agno -This example demonstrates the fundamentals of creating AI agents and organizing them into teams -using the Agno framework. You'll learn how to: -- Create individual agents with specific roles -- Combine agents into teams for collaborative problem-solving -- Use coordination modes for effective agent communication -""" +This example demonstrates the fundamentals of creating AI agents and organizing them into collaborative teams using the Agno framework. -import os -from agno.agent import Agent -from agno.team import Team -from agno.models.openai import OpenAIChat -import asyncio -import agentops -from dotenv import load_dotenv +## Overview -# Load environment variables from .env file -load_dotenv() +In this example, you'll learn how to: +- **Create specialized AI agents** with specific roles and expertise +- **Organize agents into teams** for collaborative problem-solving +- **Use coordination modes** for effective agent communication +- **Monitor agent interactions** with AgentOps integration -# Initialize AgentOps for monitoring and analytics -agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) +## Key Concepts -# Configuration -MODEL_ID = "gpt-4o-mini" # Using OpenAI's cost-effective model +### Agents +Individual AI entities with specific roles and capabilities. Each agent can be assigned a particular area of expertise, making them specialists in their domain. +### Teams +Collections of agents that work together to solve complex tasks. Teams can coordinate their responses, share information, and delegate tasks based on each agent's expertise. -def check_environment(): - """ - Verify that all required API keys are properly configured. +### Coordination Modes +Different strategies for how agents within a team interact and collaborate. The "coordinate" mode enables intelligent task routing and information sharing. +""" +import os +from dotenv import load_dotenv +import agentops +from agno.agent import Agent +from agno.team import Team +from agno.models.openai import OpenAIChat - Returns: - bool: True if all required environment variables are set - """ - required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] - missing_vars = [var for var in required_vars if not os.getenv(var)] +load_dotenv() - if missing_vars: - print(f"Missing required environment variables: {missing_vars}") - print("Please set these in your .env file or environment") - return False +os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here") +os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_agentops_api_key_here") - print("✓ Environment variables checked successfully") - return True +agentops.init(auto_start_session=False, tags=["agno-example", "basics", "agents-and-teams"]) def demonstrate_basic_agents(): @@ -54,101 +46,47 @@ def demonstrate_basic_agents(): 2. Organize agents into a team 3. Use the team to solve tasks that require multiple perspectives """ - print("\n" + "=" * 60) - print("BASIC AGENTS AND TEAMS DEMONSTRATION") - print("=" * 60) + tracer = agentops.start_trace(trace_name="Agno Basic Agents and Teams Demonstration") try: # Create individual agents with specific roles # Each agent has a name and a role that defines its expertise - print("\n1. Creating specialized agents...") - # News Agent: Specializes in gathering and analyzing news information news_agent = Agent( - name="News Agent", role="Get the latest news and provide news analysis", model=OpenAIChat(id=MODEL_ID) + name="News Agent", role="Get the latest news and provide news analysis", model=OpenAIChat(id="gpt-4o-mini") ) - print(" ✓ News Agent created") # Weather Agent: Specializes in weather forecasting and analysis weather_agent = Agent( name="Weather Agent", role="Get weather forecasts and provide weather analysis", - model=OpenAIChat(id=MODEL_ID), + model=OpenAIChat(id="gpt-4o-mini"), ) - print(" ✓ Weather Agent created") # Create a team with coordination mode # The "coordinate" mode allows agents to work together and share information - print("\n2. Creating a team with coordination capabilities...") team = Team( name="News and Weather Team", mode="coordinate", # Agents will coordinate their responses members=[news_agent, weather_agent], ) - print(" ✓ Team created with 2 agents") # Run a task that requires team coordination # The team will automatically determine which agent(s) should respond - print("\n3. Running team task...") - print(" Query: 'What is the weather in Tokyo?'") - response = team.run("What is the weather in Tokyo?") - print("\n4. Team Response:") + print("\nTeam Response:") print("-" * 60) print(f"{response.content}") print("-" * 60) - # The team intelligently routes the query to the Weather Agent - # since it's weather-related, demonstrating smart task delegation - - except Exception as e: - print(f"Error during basic agents demonstration: {e}") - print("This might be due to API issues or configuration problems") - - -async def main(): - """ - Main function that orchestrates the demonstration. - - This async function handles: - - Environment validation - - Running the basic agents demonstration - - Error handling and user feedback - """ - print("Welcome to Agno Basic Agents Demo") - print("This demo shows how to create and coordinate AI agents") - print() - - # Validate environment setup - if not check_environment(): - print("Cannot proceed without proper API configuration") - return - - # Run demonstrations - print("\nStarting demonstrations...") - - # Basic agents and teams demonstration - try: - demonstrate_basic_agents() - print("\n✓ Demo completed successfully!") - print("\nKey Takeaways:") - print("- Agents can have specialized roles and expertise") - print("- Teams enable multiple agents to collaborate on tasks") - print("- Coordination mode allows intelligent task delegation") - print("- AgentOps tracks all agent interactions for monitoring") + agentops.end_trace(tracer, end_state="Success") except Exception as e: - print(f"Demo failed: {e}") - print("Please check your API keys and network connection") + print(f"An error occurred: {e}") + agentops.end_trace(tracer, end_state="Error") if __name__ == "__main__": - """ - Entry point for the script. - - Uses asyncio to run the main function, preparing for future - async operations and maintaining consistency with other examples. - """ - asyncio.run(main()) + demonstrate_basic_agents() From 650f9dd17d323a59a76c40935f8fc83f19c5ed6e Mon Sep 17 00:00:00 2001 From: fenilfaldu Date: Fri, 13 Jun 2025 00:53:41 +0530 Subject: [PATCH 11/14] docs final update --- docs/v2/examples/agno.mdx | 250 ++++--------- examples/agno/agno_research_team.ipynb | 382 +++++++------------- examples/agno/agno_research_team.py | 167 ++++----- examples/agno/agno_tool_integrations.ipynb | 387 ++++----------------- examples/agno/agno_tool_integrations.py | 180 ++-------- examples/agno/agno_workflow_setup.ipynb | 372 ++++---------------- examples/agno/agno_workflow_setup.py | 133 ++----- 7 files changed, 441 insertions(+), 1430 deletions(-) diff --git a/docs/v2/examples/agno.mdx b/docs/v2/examples/agno.mdx index 42963263f..b587721e8 100644 --- a/docs/v2/examples/agno.mdx +++ b/docs/v2/examples/agno.mdx @@ -1,233 +1,119 @@ --- title: 'Agno' -description: 'Basic Agents and Teams with Agno' +description: 'Async Operations with Agno' --- -{/* SOURCE_FILE: examples/agno/agno_basic_agents.ipynb */} +{/* SOURCE_FILE: examples/agno/agno_async_operations.ipynb */} -_View Notebook on Github_ +_View Notebook on Github_ -# Basic Agents and Teams with Agno +# Async Operations with Agno -This notebook demonstrates the fundamentals of creating AI agents and organizing them into collaborative teams using the Agno framework. +This notebook demonstrates how to leverage asynchronous programming with Agno agents to execute multiple AI tasks concurrently, significantly improving performance and efficiency. ## Overview +This notebook demonstrates a practical example of concurrent AI operations where we: -In this example, you'll learn how to: -- **Create specialized AI agents** with specific roles and expertise -- **Organize agents into teams** for collaborative problem-solving -- **Use coordination modes** for effective agent communication -- **Monitor agent interactions** with AgentOps integration +1. **Initialize an Agno agent** with OpenAI's GPT-4o-mini model +2. **Create multiple async tasks** that query the AI about different programming languages +3. **Compare performance** between concurrent and sequential execution -## Key Concepts +By using async operations, you can run multiple AI queries simultaneously instead of waiting for each one to complete sequentially. This is particularly beneficial when dealing with I/O-bound operations like API calls to AI models. -### Agents -Individual AI entities with specific roles and capabilities. Each agent can be assigned a particular area of expertise, making them specialists in their domain. -### Teams -Collections of agents that work together to solve complex tasks. Teams can coordinate their responses, share information, and delegate tasks based on each agent's expertise. -### Coordination Modes -Different strategies for how agents within a team interact and collaborate. The "coordinate" mode enables intelligent task routing and information sharing. -## Pre-requisites - -Before running this notebook, ensure you have: -- **AgentOps API key** from [AgentOps](https://agentops.ai) -- **OpenAI API key** from [OpenAI](https://openai.com) - -Create a `.env` file in your project root with: -``` -AGENTOPS_API_KEY=your_agentops_key_here -OPENAI_API_KEY=your_openai_key_here -``` -## Implementation - -Let's start by importing the necessary libraries and setting up our environment. +## Installation + + ```bash pip + pip install agentops agno python-dotenv + ``` + ```bash poetry + poetry add agentops agno python-dotenv + ``` + ```bash uv + uv add agentops agno python-dotenv + ``` + ```python import os -from agno.agent import Agent -from agno.team import Team -from agno.models.openai import OpenAIChat import asyncio -import agentops from dotenv import load_dotenv -# Load environment variables from .env file -load_dotenv() - -# Initialize AgentOps for monitoring and analytics -agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) - -# Configuration -MODEL_ID = "gpt-4o-mini" +import agentops +from agno.agent import Agent +from agno.team import Team +from agno.models.openai import OpenAIChat ``` -## Environment Validation - -Before we create our agents, let's ensure all required API keys are properly configured: ```python -def check_environment(): - """ - Verify that all required API keys are properly configured. - - Returns: - bool: True if all required environment variables are set - """ - required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] - missing_vars = [var for var in required_vars if not os.getenv(var)] - - if missing_vars: - print(f"Missing required environment variables: {missing_vars}") - print("Please set these in your .env file or environment") - return False - - print("✓ Environment variables checked successfully") - return True +load_dotenv() +os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here") +os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_agentops_api_key_here") ``` -## Creating Agents and Teams - -Now let's create our specialized agents and organize them into a collaborative team: - -### Step 1: Create Individual Agents -We'll create two agents with different specializations: -- **News Agent**: Specializes in gathering and analyzing news -- **Weather Agent**: Specializes in weather forecasting and analysis -### Step 2: Form a Team -We'll combine these agents into a team using the "coordinate" mode, which enables: -- Intelligent task routing based on agent expertise -- Information sharing between agents -- Collaborative problem-solving -### Step 3: Execute Tasks -The team will automatically delegate tasks to the most appropriate agent(s) based on the query. +```python +agentops.init(auto_start_session=False, tags=["agno-example", "async-operation"]) +``` ```python -def demonstrate_basic_agents(): +async def demonstrate_async_operations(): """ - Demonstrate basic agent creation and team coordination. + Demonstrate concurrent execution of multiple AI agent tasks. + + This function creates multiple async tasks that execute concurrently rather than sequentially. + Each task makes an independent API call to the AI model, and asyncio.gather() + waits for all tasks to complete before returning results. - This function shows how to: - 1. Create specialized agents with specific roles - 2. Organize agents into a team - 3. Use the team to solve tasks that require multiple perspectives + Performance benefit: Instead of 3 sequential calls taking ~90 seconds total, + concurrent execution typically completes in ~30 seconds. """ - print("\n" + "=" * 60) - print("BASIC AGENTS AND TEAMS DEMONSTRATION") - print("=" * 60) + tracer = agentops.start_trace(trace_name="Agno Async Operations Example",) try: - # Create individual agents with specific roles - # Each agent has a name and a role that defines its expertise + # Initialize AI agent with specified model + agent = Agent(model=OpenAIChat(id="gpt-4o-mini")) - print("\n1. Creating specialized agents...") + async def task1(): + """Query AI about Python programming language.""" + response = await agent.arun("Explain Python programming language in one paragraph") + return f"Python: {response.content}" + + async def task2(): + """Query AI about JavaScript programming language.""" + response = await agent.arun("Explain JavaScript programming language in one paragraph") + return f"JavaScript: {response.content}" + + async def task3(): + """Query AI for comparison between programming languages.""" + response = await agent.arun("Compare Python and JavaScript briefly") + return f"Comparison: {response.content}" + + # Execute all tasks concurrently using asyncio.gather() + results = await asyncio.gather(task1(), task2(), task3()) - # News Agent: Specializes in gathering and analyzing news information - news_agent = Agent( - name="News Agent", - role="Get the latest news and provide news analysis", - model=OpenAIChat(id=MODEL_ID) - ) - print(" ✓ News Agent created") - - # Weather Agent: Specializes in weather forecasting and analysis - weather_agent = Agent( - name="Weather Agent", - role="Get weather forecasts and provide weather analysis", - model=OpenAIChat(id=MODEL_ID) - ) - print(" ✓ Weather Agent created") - - # Create a team with coordination mode - # The "coordinate" mode allows agents to work together and share information - print("\n2. Creating a team with coordination capabilities...") - team = Team( - name="News and Weather Team", - mode="coordinate", # Agents will coordinate their responses - members=[news_agent, weather_agent] - ) - print(" ✓ Team created with 2 agents") - - # Run a task that requires team coordination - # The team will automatically determine which agent(s) should respond - print("\n3. Running team task...") - print(" Query: 'What is the weather in Tokyo?'") - - response = team.run("What is the weather in Tokyo?") - - print("\n4. Team Response:") - print("-" * 60) - print(f"{response.content}") - print("-" * 60) - - - except Exception as e: - print(f"Error during basic agents demonstration: {e}") - print("This might be due to API issues or configuration problems") -``` -## Running the Demo - -Let's execute our main function to see the agents and teams in action: - - -```python -async def main(): - """ - Main function that orchestrates the demonstration. - - This async function handles: - - Environment validation - - Running the basic agents demonstration - - Error handling and user feedback - """ - print("Welcome to Agno Basic Agents Demo") - print("This demo shows how to create and coordinate AI agents") - print() - - # Validate environment setup - if not check_environment(): - print("Cannot proceed without proper API configuration") - return + for i, result in enumerate(results, 1): + print(f"\nTask {i} Result:") + print(result) + print("-" * 50) - # Run demonstrations - print("\nStarting demonstrations...") + agentops.end_trace(tracer, end_state="Success") - # Basic agents and teams demonstration - try: - demonstrate_basic_agents() - print("\n✓ Demo completed successfully!") - print("\nKey Takeaways:") - print("- Agents can have specialized roles and expertise") - print("- Teams enable multiple agents to collaborate on tasks") - print("- Coordination mode allows intelligent task delegation") - print("- AgentOps tracks all agent interactions for monitoring") - except Exception as e: - print(f"Demo failed: {e}") - print("Please check your API keys and network connection") + print(f"An error occurred: {e}") + agentops.end_trace(tracer, end_state="Error") ``` -## Execute the Demo - -Run the cell below to see how agents collaborate within a team: ```python -if __name__ == "__main__": - """ - Entry point for the script. - - Uses asyncio to run the main function, preparing for future - async operations and maintaining consistency with other examples. - """ - asyncio.run(main()) +await demonstrate_async_operations() ``` - diff --git a/examples/agno/agno_research_team.ipynb b/examples/agno/agno_research_team.ipynb index 219b8ef32..be0764f94 100644 --- a/examples/agno/agno_research_team.ipynb +++ b/examples/agno/agno_research_team.ipynb @@ -1,87 +1,82 @@ { "cells": [ { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "# Collaborative Research Team with Agno\n", - "\n", - "This notebook demonstrates how to build a sophisticated multi-agent research team where specialized agents collaborate to provide comprehensive insights from different perspectives.\n", - "\n", - "## Overview\n", - "\n", - "In this advanced example, you'll learn how to:\n", - "- **Create specialized research agents** with domain-specific tools\n", - "- **Build collaborative teams** that discuss and reach consensus\n", - "- **Integrate multiple research tools** (Google Search, HackerNews, Arxiv, DuckDuckGo)\n", - "- **Enable real-time streaming** of agent discussions\n", - "- **Monitor complex interactions** with AgentOps\n", - "\n", - "## Architecture\n", - "\n", - "Our research team consists of four specialized agents:\n", - "\n", - "1. **Reddit Researcher** - Community insights and user experiences\n", - "2. **HackerNews Researcher** - Technical discussions and industry trends\n", - "3. **Academic Researcher** - Scholarly papers and evidence-based research\n", - "4. **Twitter Researcher** - Real-time trends and public sentiment\n", - "\n", - "Each agent brings unique tools and perspectives to create a comprehensive research output.\n", - "\n" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, + "cell_type": "markdown", + "id": "e78c48e2", + "metadata": {}, "source": [ - "## Research Tools Overview\n", - "\n", - "Our research team uses specialized tools for different platforms:\n", - "\n", - "- **GoogleSearchTools**: General web search and Reddit discovery\n", - "- **HackerNewsTools**: Direct access to HackerNews API for tech discussions\n", - "- **ArxivTools**: Academic paper search and retrieval\n", - "- **DuckDuckGoTools**: Privacy-focused search for Twitter/X content\n", - "\n", - "Each tool is assigned to agents based on their research domain.\n", - "\n", "\n", - "## Pre-requisites\n", - "\n", - "Before running this notebook, ensure you have:\n", - "- **AgentOps API key** from [AgentOps](https://agentops.ai)\n", - "- **OpenAI API key** from [OpenAI](https://openai.com)\n", - "- **Optional**: API keys for specific research tools (if required)\n", - "\n", - "Create a `.env` file in your project root:\n", - "```\n", - "AGENTOPS_API_KEY=your_agentops_key_here\n", - "OPENAI_API_KEY=your_openai_key_here\n", - "```\n" + "Collaborative Research Team with Agno\n", + "--------------------------------------\n", + "\n", + "This example demonstrates how to create a sophisticated research team with multiple specialized agents,\n", + "each equipped with different tools and expertise. The team collaborates to research topics from\n", + "multiple perspectives, providing comprehensive insights.\n", + "\n", + "**Overview:**\n", + "\n", + "This example creates a research team consisting of four specialized agents:\n", + "\n", + "1. **Reddit Researcher**\n", + " - Focus: Community discussions and user experiences\n", + " - Tools: Google Search (to find Reddit discussions)\n", + " - Expertise: Analyzing user opinions, practical advice, and real-world experiences\n", + " - Role: Provides insights from community perspectives\n", + "\n", + "2. **HackerNews Researcher**\n", + " - Focus: Technical discussions and industry trends\n", + " - Tools: HackerNews API\n", + " - Expertise: Technical analysis and industry insights\n", + " - Role: Provides technical and startup ecosystem perspectives\n", + "\n", + "3. **Academic Paper Researcher**\n", + " - Focus: Scholarly research and evidence-based findings\n", + " - Tools: Google Search + Arxiv API\n", + " - Expertise: Academic literature and research methodology\n", + " - Role: Provides evidence-based academic insights\n", + "\n", + "4. **Twitter Researcher**\n", + " - Focus: Real-time trends and public sentiment\n", + " - Tools: DuckDuckGo Search\n", + " - Expertise: Current events and public opinion\n", + " - Role: Provides real-time social media insights\n", + "\n", + "**Team Collaboration:**\n", + "\n", + "- Mode: Collaborative discussion\n", + "- Coordination: Team uses GPT-4 for discussion management\n", + "- Process: \n", + " 1. Each agent researches independently using their tools\n", + " 2. Agents share findings and discuss implications\n", + " 3. Team works towards consensus through structured discussion\n", + " 4. Discussion continues until comprehensive understanding is reached\n", + "\n", + "**Features Demonstrated:**\n", + "\n", + "- Creating specialized agents with specific research tools\n", + "- Building collaborative teams that discuss and reach consensus\n", + "- Using various research tools (Google Search, HackerNews, Arxiv, DuckDuckGo)\n", + "- Enabling real-time streaming of agent discussions\n", + "- Tracking agent interactions with AgentOps\n" ] }, { - "cell_type": "markdown", - "id": "ebf75e5f", + "cell_type": "code", + "execution_count": null, + "id": "c9f7fb8d", "metadata": {}, + "outputs": [], "source": [ - "## Implementation\n", - "\n", - "Let's start by importing the necessary libraries and tools for our research team." + "# Install the required dependencies:\n", + "%pip install agentops\n", + "%pip install agno\n", + "%pip install python-dotenv" ] }, { "cell_type": "code", - "execution_count": null, - "id": "49a2467d", + "execution_count": 7, + "id": "08978603", "metadata": {}, "outputs": [], "source": [ @@ -94,134 +89,64 @@ "from agno.tools.arxiv import ArxivTools\n", "from agno.tools.duckduckgo import DuckDuckGoTools\n", "from agno.models.openai import OpenAIChat\n", - "import asyncio\n", "import agentops\n", - "from dotenv import load_dotenv\n", - "\n", - "# Load environment variables\n", - "load_dotenv()\n", - "\n", - "# Initialize AgentOps for monitoring and analytics\n", - "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", - "\n", - "# Configuration\n", - "MODEL_ID = \"gpt-4o-mini\" # Default model for agents" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Environment Setup\n", - "\n", - "Initialize AgentOps for monitoring and configure the default model:\n" + "from dotenv import load_dotenv" ] }, { "cell_type": "code", - "execution_count": null, - "id": "0c640e31", - "metadata": { - "lines_to_next_cell": 1 - }, + "execution_count": 8, + "id": "143be722", + "metadata": {}, "outputs": [], "source": [ - "def check_environment():\n", - " \"\"\"\n", - " Verify that all required API keys are properly configured.\n", - " \n", - " Returns:\n", - " bool: True if all required environment variables are set\n", - " \"\"\"\n", - " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\"]\n", - " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", - "\n", - " if missing_vars:\n", - " print(f\"Missing required environment variables: {missing_vars}\")\n", - " print(\"Please set these in your .env file or environment\")\n", - " return False\n", - "\n", - " print(\"✓ Environment variables checked successfully\")\n", - " return True" + "# Load environment variables\n", + "load_dotenv()\n", + "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")\n", + "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_agentops_api_key_here\")\n" ] }, { - "cell_type": "markdown", - "id": "d9283778", + "cell_type": "code", + "execution_count": null, + "id": "2e071b24", "metadata": {}, + "outputs": [], "source": [ - "## Agent Setup Details\n", - "\n", - "In the following function, we create four specialized research agents, each with unique characteristics:\n", - "\n", - "### 1. Reddit Researcher\n", - "- **Purpose**: Captures community perspectives and real-world experiences\n", - "- **Tool**: GoogleSearchTools (to find Reddit discussions)\n", - "- **Model**: GPT-4 for nuanced understanding of community sentiment\n", - "\n", - "\n", - "### 2. HackerNews Researcher\n", - "- **Purpose**: Provides technical depth and industry insights\n", - "- **Tool**: HackerNewsTools (direct API access)\n", - "- **Model**: GPT-4 for technical comprehension\n", - "\n", - "\n", - "### 3. Academic Paper Researcher\n", - "- **Purpose**: Brings evidence-based research and scholarly perspectives\n", - "- **Tools**: GoogleSearchTools + ArxivTools (comprehensive academic search)\n", - "- **Model**: GPT-4 for understanding complex research\n", - "\n", - "\n", - "### 4. Twitter Researcher\n", - "- **Purpose**: Captures real-time trends and public sentiment\n", - "- **Tool**: DuckDuckGoTools (privacy-focused search)\n", - "- **Model**: GPT-4 for trend analysis\n", - "\n", - "\n", - "### Team Configuration\n", - "The agents are organized into a collaborative team with:\n", - "- **Collaboration Mode**: Enables discussion and consensus building\n", - "- **Discussion Master**: Coordinates the conversation\n", - "- **Context Preservation**: Maintains conversation history\n", - "- **Transparent Process**: Shows tool usage and reasoning" + "# Initialize AgentOps for monitoring and analytics\n", + "agentops.init(auto_start_session=False, tags=[\"agno-example\", \"research-team\"])" ] }, { "cell_type": "code", - "execution_count": null, - "id": "561993af", + "execution_count": 5, + "id": "8eb6fbe2", "metadata": {}, "outputs": [], "source": [ "def demonstrate_research_team():\n", " \"\"\"\n", " Demonstrate a collaborative research team with multiple specialized agents.\n", - " \n", + "\n", " This function creates a team of researchers, each with:\n", " - Specific expertise and research focus\n", " - Specialized tools for their domain\n", " - Custom instructions for their research approach\n", - " \n", + "\n", " The team collaborates to provide comprehensive research insights.\n", " \"\"\"\n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"COLLABORATIVE RESEARCH TEAM DEMONSTRATION\")\n", - " print(\"=\" * 60)\n", + " tracer = agentops.start_trace(trace_name=\"Agno Research Team Demonstration\")\n", "\n", " try:\n", " print(\"\\n1. Creating specialized research agents...\")\n", - " \n", + "\n", " # Reddit Researcher: Focuses on community discussions and user experiences\n", " reddit_researcher = Agent(\n", " name=\"Reddit Researcher\",\n", " role=\"Research a topic on Reddit\",\n", - " model=OpenAIChat(id=\"gpt-4o\"), # Using more capable model for research\n", - " tools=[GoogleSearchTools()], # Google Search to find Reddit discussions\n", - " add_name_to_instructions=True, # Adds agent name to its instructions\n", + " model=OpenAIChat(id=\"gpt-4o\"), \n", + " tools=[GoogleSearchTools()], \n", + " add_name_to_instructions=True, \n", " instructions=dedent(\n", " \"\"\"\n", " You are a Reddit researcher specializing in community insights.\n", @@ -234,14 +159,13 @@ " \"\"\"\n", " ),\n", " )\n", - " print(\" ✓ Reddit Researcher created\")\n", "\n", " # HackerNews Researcher: Focuses on technical discussions and industry trends\n", " hackernews_researcher = Agent(\n", " name=\"HackerNews Researcher\",\n", " model=OpenAIChat(\"gpt-4o\"),\n", " role=\"Research a topic on HackerNews.\",\n", - " tools=[HackerNewsTools()], # Direct access to HackerNews API\n", + " tools=[HackerNewsTools()],\n", " add_name_to_instructions=True,\n", " instructions=dedent(\n", " \"\"\"\n", @@ -255,14 +179,13 @@ " \"\"\"\n", " ),\n", " )\n", - " print(\" ✓ HackerNews Researcher created\")\n", "\n", " # Academic Paper Researcher: Focuses on scholarly research and evidence\n", " academic_paper_researcher = Agent(\n", " name=\"Academic Paper Researcher\",\n", " model=OpenAIChat(\"gpt-4o\"),\n", " role=\"Research academic papers and scholarly content\",\n", - " tools=[GoogleSearchTools(), ArxivTools()], # Multiple tools for comprehensive research\n", + " tools=[GoogleSearchTools(), ArxivTools()], \n", " add_name_to_instructions=True,\n", " instructions=dedent(\n", " \"\"\"\n", @@ -276,14 +199,13 @@ " \"\"\"\n", " ),\n", " )\n", - " print(\" ✓ Academic Paper Researcher created\")\n", "\n", " # Twitter Researcher: Focuses on real-time trends and public sentiment\n", " twitter_researcher = Agent(\n", " name=\"Twitter Researcher\",\n", " model=OpenAIChat(\"gpt-4o\"),\n", " role=\"Research trending discussions and real-time updates\",\n", - " tools=[DuckDuckGoTools()], # DuckDuckGo for privacy-focused searching\n", + " tools=[DuckDuckGoTools()],\n", " add_name_to_instructions=True,\n", " instructions=dedent(\n", " \"\"\"\n", @@ -298,14 +220,12 @@ " \"\"\"\n", " ),\n", " )\n", - " print(\" ✓ Twitter Researcher created\")\n", "\n", " # Create collaborative team with advanced features\n", - " print(\"\\n2. Creating collaborative research team...\")\n", " agent_team = Team(\n", " name=\"Discussion Team\",\n", - " mode=\"collaborate\", # Agents work together and discuss findings\n", - " model=OpenAIChat(\"gpt-4o\"), # Model for team coordination\n", + " mode=\"collaborate\",\n", + " model=OpenAIChat(\"gpt-4o\"),\n", " members=[\n", " reddit_researcher,\n", " hackernews_researcher,\n", @@ -320,110 +240,36 @@ " \"You have to stop the discussion when you think the team has reached a consensus.\",\n", " ],\n", " success_criteria=\"The team has reached a consensus with insights from all perspectives.\",\n", - " enable_agentic_context=True, # Agents maintain context throughout discussion\n", - " add_context=True, # Include context in agent responses\n", - " show_tool_calls=True, # Display when agents use their tools\n", - " markdown=True, # Format output in markdown\n", - " debug_mode=True, # Show detailed execution information\n", - " show_members_responses=True, # Display individual agent responses\n", + " enable_agentic_context=True,\n", + " add_context=True,\n", + " show_tool_calls=True,\n", + " markdown=True,\n", + " debug_mode=True,\n", + " show_members_responses=True,\n", " )\n", - " print(\" ✓ Research team assembled with 4 specialized agents\")\n", "\n", - " # Execute collaborative research\n", - " print(\"\\n3. Starting collaborative research discussion...\")\n", - " print(\" Topic: 'What is the best way to learn to code?'\")\n", - " print(\"\\n\" + \"-\" * 60)\n", - " \n", " # Stream the team discussion in real-time\n", " agent_team.print_response(\n", " message=\"Start the discussion on the topic: 'What is the best way to learn to code?'\",\n", - " stream=True, # Stream responses as they're generated\n", - " stream_intermediate_steps=True, # Show intermediate thinking steps\n", + " stream=True,\n", + " stream_intermediate_steps=True,\n", " )\n", "\n", - " except Exception as e:\n", - " print(f\"\\nError during research team demonstration: {e}\")\n", - " print(\"This might be due to API rate limits or configuration issues\")" - ] - }, - { - "cell_type": "markdown", - "id": "aa0c0459", - "metadata": {}, - "source": [ - "## Running the Demo\n", + " agentops.end_trace(tracer, end_state=\"Success\")\n", "\n", - "Let's execute our main function to see the agents and teams in action:\n" + " except Exception:\n", + " agentops.end_trace(tracer, end_state=\"Error\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "9c11fe36", + "id": "754f75be", "metadata": {}, "outputs": [], "source": [ - "async def main():\n", - " \"\"\"\n", - " Main function that orchestrates the research team demonstration.\n", - " \n", - " This async function handles:\n", - " - Environment validation\n", - " - Running the collaborative research team demo\n", - " - Error handling and user feedback\n", - " \"\"\"\n", - " print(\"Welcome to Agno Collaborative Research Team Demo\")\n", - " print(\"This demo shows how multiple specialized agents can work together\")\n", - " print(\"to provide comprehensive research insights from different perspectives.\")\n", - " \n", - " # Validate environment setup\n", - " if not check_environment():\n", - " print(\"Cannot proceed without proper API configuration\")\n", - " return\n", - "\n", - " # Run demonstration\n", - " print(\"\\nStarting research team demonstration...\")\n", - "\n", - " try:\n", - " demonstrate_research_team()\n", - " print(\"\\n\\n✓ Research team demo completed successfully!\")\n", - " print(\"\\nKey Takeaways:\")\n", - " print(\"- Specialized agents bring unique perspectives and tools\")\n", - " print(\"- Collaborative mode enables rich discussions between agents\")\n", - " print(\"- Each agent uses appropriate tools for their research domain\")\n", - " print(\"- Teams can reach consensus through structured discussion\")\n", - " print(\"- AgentOps tracks all interactions for analysis\")\n", - " \n", - " except Exception as e:\n", - " print(f\"Demo failed: {e}\")\n", - " print(\"Please check your API keys and network connection\")" - ] - }, - { - "cell_type": "markdown", - "id": "db07a0e4", - "metadata": {}, - "source": [ - "## Execute the Demo\n", "\n", - "Run the cell below to see how agents collaborate within a team:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4afc6da0", - "metadata": {}, - "outputs": [], - "source": [ - "if __name__ == \"__main__\":\n", - " \"\"\"\n", - " Entry point for the script.\n", - " \n", - " Uses asyncio to run the main function, maintaining consistency\n", - " with other examples and preparing for async operations.\n", - " \"\"\"\n", - " asyncio.run(main())" + "demonstrate_research_team()" ] } ], @@ -433,8 +279,22 @@ "main_language": "python", "notebook_metadata_filter": "-all" }, + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" } }, "nbformat": 4, diff --git a/examples/agno/agno_research_team.py b/examples/agno/agno_research_team.py index 18291f882..073fcf50a 100644 --- a/examples/agno/agno_research_team.py +++ b/examples/agno/agno_research_team.py @@ -5,7 +5,46 @@ each equipped with different tools and expertise. The team collaborates to research topics from multiple perspectives, providing comprehensive insights. -Key features demonstrated: +Overview: +--------- +This example creates a research team consisting of four specialized agents: + +1. Reddit Researcher + - Focus: Community discussions and user experiences + - Tools: Google Search (to find Reddit discussions) + - Expertise: Analyzing user opinions, practical advice, and real-world experiences + - Role: Provides insights from community perspectives + +2. HackerNews Researcher + - Focus: Technical discussions and industry trends + - Tools: HackerNews API + - Expertise: Technical analysis and industry insights + - Role: Provides technical and startup ecosystem perspectives + +3. Academic Paper Researcher + - Focus: Scholarly research and evidence-based findings + - Tools: Google Search + Arxiv API + - Expertise: Academic literature and research methodology + - Role: Provides evidence-based academic insights + +4. Twitter Researcher + - Focus: Real-time trends and public sentiment + - Tools: DuckDuckGo Search + - Expertise: Current events and public opinion + - Role: Provides real-time social media insights + +Team Collaboration: +------------------ +- Mode: Collaborative discussion +- Coordination: Team uses GPT-4 for discussion management +- Process: + 1. Each agent researches independently using their tools + 2. Agents share findings and discuss implications + 3. Team works towards consensus through structured discussion + 4. Discussion continues until comprehensive understanding is reached + +Features Demonstrated: +--------------------- - Creating specialized agents with specific research tools - Building collaborative teams that discuss and reach consensus - Using various research tools (Google Search, HackerNews, Arxiv, DuckDuckGo) @@ -13,7 +52,6 @@ - Tracking agent interactions with AgentOps """ -import os from textwrap import dedent from agno.agent import Agent from agno.team import Team @@ -22,7 +60,6 @@ from agno.tools.arxiv import ArxivTools from agno.tools.duckduckgo import DuckDuckGoTools from agno.models.openai import OpenAIChat -import asyncio import agentops from dotenv import load_dotenv @@ -30,29 +67,7 @@ load_dotenv() # Initialize AgentOps for monitoring and analytics -agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) - -# Configuration -MODEL_ID = "gpt-4o-mini" # Default model for agents - - -def check_environment(): - """ - Verify that all required API keys are properly configured. - - Returns: - bool: True if all required environment variables are set - """ - required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] - missing_vars = [var for var in required_vars if not os.getenv(var)] - - if missing_vars: - print(f"Missing required environment variables: {missing_vars}") - print("Please set these in your .env file or environment") - return False - - print("✓ Environment variables checked successfully") - return True +agentops.init(auto_start_session=False, tags=["agno-example", "research-team"]) def demonstrate_research_team(): @@ -66,20 +81,16 @@ def demonstrate_research_team(): The team collaborates to provide comprehensive research insights. """ - print("\n" + "=" * 60) - print("COLLABORATIVE RESEARCH TEAM DEMONSTRATION") - print("=" * 60) + tracer = agentops.start_trace(trace_name="Agno Research Team Demonstration") try: - print("\n1. Creating specialized research agents...") - # Reddit Researcher: Focuses on community discussions and user experiences reddit_researcher = Agent( name="Reddit Researcher", role="Research a topic on Reddit", - model=OpenAIChat(id="gpt-4o"), # Using more capable model for research - tools=[GoogleSearchTools()], # Google Search to find Reddit discussions - add_name_to_instructions=True, # Adds agent name to its instructions + model=OpenAIChat(id="gpt-4o"), + tools=[GoogleSearchTools()], + add_name_to_instructions=True, instructions=dedent( """ You are a Reddit researcher specializing in community insights. @@ -92,14 +103,13 @@ def demonstrate_research_team(): """ ), ) - print(" ✓ Reddit Researcher created") # HackerNews Researcher: Focuses on technical discussions and industry trends hackernews_researcher = Agent( name="HackerNews Researcher", model=OpenAIChat("gpt-4o"), role="Research a topic on HackerNews.", - tools=[HackerNewsTools()], # Direct access to HackerNews API + tools=[HackerNewsTools()], add_name_to_instructions=True, instructions=dedent( """ @@ -113,14 +123,13 @@ def demonstrate_research_team(): """ ), ) - print(" ✓ HackerNews Researcher created") # Academic Paper Researcher: Focuses on scholarly research and evidence academic_paper_researcher = Agent( name="Academic Paper Researcher", model=OpenAIChat("gpt-4o"), role="Research academic papers and scholarly content", - tools=[GoogleSearchTools(), ArxivTools()], # Multiple tools for comprehensive research + tools=[GoogleSearchTools(), ArxivTools()], add_name_to_instructions=True, instructions=dedent( """ @@ -134,14 +143,13 @@ def demonstrate_research_team(): """ ), ) - print(" ✓ Academic Paper Researcher created") # Twitter Researcher: Focuses on real-time trends and public sentiment twitter_researcher = Agent( name="Twitter Researcher", model=OpenAIChat("gpt-4o"), role="Research trending discussions and real-time updates", - tools=[DuckDuckGoTools()], # DuckDuckGo for privacy-focused searching + tools=[DuckDuckGoTools()], add_name_to_instructions=True, instructions=dedent( """ @@ -156,14 +164,12 @@ def demonstrate_research_team(): """ ), ) - print(" ✓ Twitter Researcher created") # Create collaborative team with advanced features - print("\n2. Creating collaborative research team...") agent_team = Team( name="Discussion Team", - mode="collaborate", # Agents work together and discuss findings - model=OpenAIChat("gpt-4o"), # Model for team coordination + mode="collaborate", + model=OpenAIChat("gpt-4o"), members=[ reddit_researcher, hackernews_researcher, @@ -178,74 +184,25 @@ def demonstrate_research_team(): "You have to stop the discussion when you think the team has reached a consensus.", ], success_criteria="The team has reached a consensus with insights from all perspectives.", - enable_agentic_context=True, # Agents maintain context throughout discussion - add_context=True, # Include context in agent responses - show_tool_calls=True, # Display when agents use their tools - markdown=True, # Format output in markdown - debug_mode=True, # Show detailed execution information - show_members_responses=True, # Display individual agent responses + enable_agentic_context=True, + add_context=True, + show_tool_calls=True, + markdown=True, + debug_mode=True, + show_members_responses=True, ) - print(" ✓ Research team assembled with 4 specialized agents") - - # Execute collaborative research - print("\n3. Starting collaborative research discussion...") - print(" Topic: 'What is the best way to learn to code?'") - print("\n" + "-" * 60) # Stream the team discussion in real-time agent_team.print_response( message="Start the discussion on the topic: 'What is the best way to learn to code?'", - stream=True, # Stream responses as they're generated - stream_intermediate_steps=True, # Show intermediate thinking steps + stream=True, + stream_intermediate_steps=True, ) - except Exception as e: - print(f"\nError during research team demonstration: {e}") - print("This might be due to API rate limits or configuration issues") + agentops.end_trace(tracer, end_state="Success") + except Exception: + agentops.end_trace(tracer, end_state="Error") -async def main(): - """ - Main function that orchestrates the research team demonstration. - - This async function handles: - - Environment validation - - Running the collaborative research team demo - - Error handling and user feedback - """ - print("Welcome to Agno Collaborative Research Team Demo") - print("This demo shows how multiple specialized agents can work together") - print("to provide comprehensive research insights from different perspectives.") - print() - # Validate environment setup - if not check_environment(): - print("Cannot proceed without proper API configuration") - return - - # Run demonstration - print("\nStarting research team demonstration...") - - try: - demonstrate_research_team() - print("\n\n✓ Research team demo completed successfully!") - print("\nKey Takeaways:") - print("- Specialized agents bring unique perspectives and tools") - print("- Collaborative mode enables rich discussions between agents") - print("- Each agent uses appropriate tools for their research domain") - print("- Teams can reach consensus through structured discussion") - print("- AgentOps tracks all interactions for analysis") - - except Exception as e: - print(f"Demo failed: {e}") - print("Please check your API keys and network connection") - - -if __name__ == "__main__": - """ - Entry point for the script. - - Uses asyncio to run the main function, maintaining consistency - with other examples and preparing for async operations. - """ - asyncio.run(main()) +demonstrate_research_team() diff --git a/examples/agno/agno_tool_integrations.ipynb b/examples/agno/agno_tool_integrations.ipynb index 8e7e38be5..cfdea9e56 100644 --- a/examples/agno/agno_tool_integrations.ipynb +++ b/examples/agno/agno_tool_integrations.ipynb @@ -1,430 +1,167 @@ { "cells": [ { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, + "cell_type": "markdown", + "id": "813b80b7", + "metadata": {}, "source": [ - "# Tool Integration with RAG (Retrieval-Augmented Generation)\n", - "\n", - "This notebook demonstrates how to build intelligent agents that can access and reason over external knowledge bases using RAG technology.\n", - "\n", - "## What is RAG?\n", - "\n", - "**Retrieval-Augmented Generation (RAG)** combines the power of large language models with the ability to retrieve information from external sources. Instead of relying solely on training data, RAG-enabled agents can:\n", - "\n", - "- Access up-to-date information from documents, websites, and databases\n", - "- Provide accurate, source-backed responses\n", - "- Reduce hallucinations by grounding answers in retrieved content\n", - "- Scale to large knowledge bases without retraining\n", "\n", - "## Key Components\n", + "# Tool Integration with RAG (Retrieval-Augmented Generation) in Agno\n", "\n", - "### 1. Knowledge Base\n", - "- Loads content from URLs, documents, or databases\n", - "- Processes and indexes information for efficient retrieval\n", + "This example demonstrates how to enhance Agno agents with RAG capabilities, allowing them to access and reason over external knowledge bases for more accurate and source-backed responses.\n", "\n", - "### 2. Vector Database\n", - "- Stores document embeddings (numerical representations)\n", - "- Enables semantic search based on meaning, not just keywords\n", - "- Supports hybrid search combining semantic and keyword matching\n", + "**Overview**\n", "\n", - "### 3. Embeddings & Reranking\n", - "- **Embeddings**: Convert text to vectors for similarity comparison\n", - "- **Reranking**: Improves search results by re-scoring them based on relevance\n", + "This example shows how to integrate RAG with Agno agents where we:\n", "\n", - "### 4. Reasoning Tools\n", - "- Enable step-by-step problem solving\n", - "- Show transparent reasoning process\n", - "- Combine retrieved information with logical thinking\n", + "1. **Set up a knowledge base** with documents, URLs, and other external sources\n", + "2. **Configure vector databases** (like Pinecone, Weaviate, or ChromaDB) for efficient semantic search\n", + "3. **Implement retrieval** using embeddings and reranking for accurate information access\n", + "4. **Create RAG-enabled agents** that can search, retrieve, and reason over the knowledge base\n", "\n", - "## Pre-requisites\n", - "\n", - "This demo requires three API keys:\n", - "- **AgentOps API key** from [AgentOps](https://agentops.ai)\n", - "- **OpenAI API key** from [OpenAI](https://openai.com)\n", - "- **Cohere API key** from [Cohere](https://cohere.com) (for embeddings)\n", - "\n", - "Create a `.env` file:\n", - "```\n", - "AGENTOPS_API_KEY=your_agentops_key\n", - "OPENAI_API_KEY=your_openai_key\n", - "COHERE_API_KEY=your_cohere_key\n", - "```\n" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Implementation Overview\n", + "By using RAG, agents can provide responses backed by external sources rather than relying solely on their training data, significantly improving accuracy and verifiability of their outputs.\n", "\n", - "Let's build a RAG-enabled agent that can access documentation and provide informed responses with source citations.\n" + "RAG enables agents to access and reason over large knowledge bases,\n" ] }, { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, + "cell_type": "code", + "execution_count": null, + "id": "dfa7ae6b", + "metadata": {}, + "outputs": [], "source": [ - "### Import Core Libraries\n", - "\n", - "First, we'll import the basic libraries needed for our agent:\n" + "# Install the required dependencies:\n", + "%pip install agentops\n", + "%pip install agno\n", + "%pip install python-dotenv" ] }, { "cell_type": "code", - "execution_count": null, - "id": "c49901be", + "execution_count": 24, + "id": "208656e8", "metadata": {}, "outputs": [], "source": [ "import os\n", "from agno.agent import Agent\n", - "from agno.team import Team\n", "from agno.models.openai import OpenAIChat\n", - "import asyncio\n", "import agentops\n", "from dotenv import load_dotenv" ] }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "### Import RAG Components\n", - "\n", - "Now let's import the specialized components for RAG functionality:\n" - ] - }, { "cell_type": "code", - "execution_count": null, - "id": "a75c84fc", + "execution_count": 25, + "id": "0cc22e40", "metadata": {}, "outputs": [], "source": [ "# Knowledge and RAG components\n", - "from agno.knowledge.url import UrlKnowledge # For loading knowledge from URLs\n", - "from agno.vectordb.lancedb import LanceDb # Vector database for storing embeddings\n", - "from agno.vectordb.search import SearchType # Search strategies (hybrid, semantic, etc.)\n", - "from agno.embedder.cohere import CohereEmbedder # For creating text embeddings\n", - "from agno.reranker.cohere import CohereReranker # For improving search results\n", - "from agno.tools.reasoning import ReasoningTools # Advanced reasoning capabilities" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "### Environment Setup\n", - "\n", - "Load environment variables and initialize monitoring:\n" + "from agno.knowledge.url import UrlKnowledge\n", + "from agno.vectordb.lancedb import LanceDb\n", + "from agno.vectordb.search import SearchType\n", + "from agno.embedder.cohere import CohereEmbedder\n", + "from agno.reranker.cohere import CohereReranker\n", + "from agno.tools.reasoning import ReasoningTools" ] }, { "cell_type": "code", "execution_count": null, - "id": "ee6b59d8", + "id": "50eedd68", "metadata": {}, "outputs": [], "source": [ "# Load environment variables\n", "load_dotenv()\n", - "\n", - "# Initialize AgentOps for monitoring\n", - "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", - "\n", - "# API keys and configuration\n", - "cohere_api_key = os.getenv(\"COHERE_API_KEY\") # Required for embeddings and reranking\n", - "MODEL_ID = \"gpt-4o-mini\" # Default model for agents" - ] - }, - { - "cell_type": "raw", - "id": "5bfac31e", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Environment Validation\n", - "\n", - "Before proceeding, let's ensure all required API keys are configured:\n" + "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")\n", + "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_agentops_api_key_here\")\n", + "os.environ[\"COHERE_API_KEY\"] = os.getenv(\"COHERE_API_KEY\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "f6923641", + "id": "51235ba1", "metadata": {}, "outputs": [], "source": [ - "def check_environment():\n", - " \"\"\"\n", - " Verify that all required API keys are properly configured.\n", - " \n", - " This demo requires:\n", - " - AGENTOPS_API_KEY: For monitoring agent behavior\n", - " - OPENAI_API_KEY: For the AI model\n", - " - COHERE_API_KEY: For embeddings and reranking\n", - " \n", - " Returns:\n", - " bool: True if all required environment variables are set\n", - " \"\"\"\n", - " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\", \"COHERE_API_KEY\"]\n", - " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", - "\n", - " if missing_vars:\n", - " print(f\"Missing required environment variables: {missing_vars}\")\n", - " print(\"Please set these in your .env file or environment\")\n", - " print(\"\\nExample .env file:\")\n", - " print(\"AGENTOPS_API_KEY=your_agentops_key\")\n", - " print(\"OPENAI_API_KEY=your_openai_key\")\n", - " print(\"COHERE_API_KEY=your_cohere_key\")\n", - " return False\n", - "\n", - " print(\"✓ Environment variables checked successfully\")\n", - " return True" - ] - }, - { - "cell_type": "raw", - "id": "2740686f", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Building a RAG-Enabled Agent\n", - "\n", - "The following function demonstrates the complete process of creating a RAG-enabled agent:\n", - "\n", - "### Step 1: Knowledge Base Creation\n", - "We'll load documentation from a URL and prepare it for semantic search.\n", - "\n", - "### Step 2: Vector Database Setup\n", - "Configure LanceDB with:\n", - "- **Hybrid search**: Combines keyword and semantic search\n", - "- **Cohere embeddings**: High-quality text representations\n", - "- **Reranking**: Improves result relevance\n", - "\n", - "### Step 3: Agent Configuration\n", - "Create an agent with:\n", - "- Access to the knowledge base\n", - "- On-demand search capabilities\n", - "- Reasoning tools for complex queries\n", - "- Instructions for proper citation\n" + "# Initialize AgentOps for monitoring\n", + "agentops.init(auto_start_session=False, tags=[\"agno-example\", \"tool-integrations\"])" ] }, { "cell_type": "code", - "execution_count": null, - "id": "01bc16c0", - "metadata": { - "lines_to_next_cell": 1 - }, + "execution_count": 28, + "id": "24da7b0b", + "metadata": {}, "outputs": [], "source": [ "def demonstrate_tool_integration():\n", " \"\"\"\n", " Demonstrate advanced tool integration with RAG and knowledge bases.\n", - " \n", + "\n", " This function shows how to:\n", " 1. Create a knowledge base from external sources\n", " 2. Set up a vector database with embeddings\n", " 3. Configure an agent with RAG capabilities\n", " 4. Enable reasoning tools for complex problem-solving\n", " \"\"\"\n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"TOOL INTEGRATION WITH RAG (Retrieval-Augmented Generation)\")\n", - " print(\"=\" * 60)\n", - "\n", + " tracer = agentops.start_trace(trace_name=\"Agno Tool Integration Demonstration\")\n", " try:\n", - " print(\"\\n1. Setting up knowledge base and vector database...\")\n", - " \n", " # Create knowledge base from documentation URLs\n", " # This loads content from the specified URLs and prepares it for RAG\n", " knowledge_base = UrlKnowledge(\n", - " urls=[\"https://docs.agno.com/introduction/agents.md\"], # Documentation to learn from\n", - " # Configure vector database for efficient semantic search\n", + " urls=[\"https://docs.agno.com/introduction/agents.md\"],\n", " vector_db=LanceDb(\n", - " uri=\"tmp/lancedb\", # Local storage path for the database\n", - " table_name=\"agno_docs\", # Table to store document embeddings\n", - " search_type=SearchType.hybrid, # Combines keyword and semantic search\n", - " # Embedder converts text to numerical vectors for similarity search\n", + " uri=\"tmp/lancedb\",\n", + " table_name=\"agno_docs\",\n", + " search_type=SearchType.hybrid,\n", " embedder=CohereEmbedder(\n", - " id=\"embed-v4.0\", # Cohere's embedding model\n", - " api_key=cohere_api_key\n", + " id=\"embed-v4.0\",\n", + " \n", " ),\n", - " # Reranker improves search results by re-scoring them\n", " reranker=CohereReranker(\n", - " model=\"rerank-v3.5\", # Cohere's reranking model\n", - " api_key=cohere_api_key\n", + " model=\"rerank-v3.5\",\n", + " \n", " ),\n", " ),\n", " )\n", - " print(\" ✓ Knowledge base created from documentation\")\n", - " print(\" ✓ Vector database configured with hybrid search\")\n", "\n", " # Create an intelligent agent with RAG capabilities\n", - " print(\"\\n2. Creating RAG-enabled agent...\")\n", " agent = Agent(\n", - " model=OpenAIChat(id=MODEL_ID),\n", - " # Agentic RAG is automatically enabled when knowledge is provided\n", + " model=OpenAIChat(id=\"gpt-4o-mini\"),\n", " knowledge=knowledge_base,\n", - " # Allow the agent to search its knowledge base on demand\n", " search_knowledge=True,\n", - " # Add reasoning tools for step-by-step problem solving\n", " tools=[ReasoningTools(add_instructions=True)],\n", - " # Custom instructions for how the agent should behave\n", " instructions=[\n", - " \"Include sources in your response.\", # Cite where information comes from\n", - " \"Always search your knowledge before answering the question.\", # Use RAG first\n", - " \"Only include the output in your response. No other text.\", # Clean responses\n", + " \"Include sources in your response.\",\n", + " \"Always search your knowledge before answering the question.\",\n", + " \"Only include the output in your response. No other text.\",\n", " ],\n", - " markdown=True, # Format responses in markdown\n", " )\n", - " print(\" ✓ Agent created with:\")\n", - " print(\" - Knowledge base access\")\n", - " print(\" - On-demand search capability\")\n", - " print(\" - Reasoning tools\")\n", - " print(\" - Source citation requirements\")\n", "\n", - " # Test the RAG agent with a question about its knowledge base\n", - " print(\"\\n3. Testing RAG agent with knowledge query...\")\n", - " print(\" Question: 'What are Agents?'\")\n", - " print(\"\\n\" + \"-\" * 60)\n", - " \n", " # Print response with full reasoning process visible\n", " agent.print_response(\n", " \"What are Agents?\",\n", - " show_full_reasoning=True, # Shows how the agent searches and reasons\n", + " show_full_reasoning=True,\n", " )\n", - " \n", - " print(\"\\n\" + \"-\" * 60)\n", - " print(\"✓ RAG demonstration completed\")\n", - " print(\"\\nNotice how the agent:\")\n", - " print(\"- Searched the knowledge base for relevant information\")\n", - " print(\"- Used reasoning tools to formulate the answer\")\n", - " print(\"- Included sources from the documentation\")\n", - "\n", - " except Exception as e:\n", - " print(f\"\\nError during tool integration: {e}\")\n", - " print(\"This might be due to:\")\n", - " print(\"- Missing API keys (especially COHERE_API_KEY)\")\n", - " print(\"- Network issues accessing documentation URLs\")\n", - " print(\"- Vector database initialization problems\")" - ] - }, - { - "cell_type": "raw", - "id": "8495b685", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Main Orchestration Function\n", - "\n", - "The main function coordinates the entire demonstration:\n" + " agentops.end_trace(tracer, end_state=\"Success\")\n", + " except Exception:\n", + " agentops.end_trace(tracer, end_state=\"Error\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "37692f77", + "id": "3cd3e2dc", "metadata": {}, "outputs": [], "source": [ - "async def main():\n", - " \"\"\"\n", - " Main function that orchestrates the tool integration demonstration.\n", - " \n", - " This async function handles:\n", - " - Environment validation\n", - " - Running the RAG and tool integration demo\n", - " - Error handling and user feedback\n", - " \"\"\"\n", - " print(\"Welcome to Agno Tool Integration Demo\")\n", - " print(\"This demo showcases RAG (Retrieval-Augmented Generation)\")\n", - " print(\"and advanced tool integration capabilities.\")\n", - " print()\n", - " \n", - " # Validate environment setup\n", - " if not check_environment():\n", - " print(\"\\nCannot proceed without proper API configuration\")\n", - " print(\"Please obtain a Cohere API key from: https://cohere.com\")\n", - " return\n", - "\n", - " # Run demonstration\n", - " print(\"\\nStarting tool integration demonstration...\")\n", - "\n", - " try:\n", - " demonstrate_tool_integration()\n", - " print(\"\\n\\n✓ Tool integration demo completed successfully!\")\n", - " print(\"\\nKey Takeaways:\")\n", - " print(\"- RAG enables agents to access external knowledge bases\")\n", - " print(\"- Vector databases provide efficient semantic search\")\n", - " print(\"- Embeddings and reranking improve information retrieval\")\n", - " print(\"- Reasoning tools enhance problem-solving capabilities\")\n", - " print(\"- AgentOps tracks all tool usage and knowledge searches\")\n", - " \n", - " except Exception as e:\n", - " print(f\"Demo failed: {e}\")\n", - " print(\"Please check your API keys and network connection\")" - ] - }, - { - "cell_type": "raw", - "id": "87b383cf", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Execute the Demo\n", "\n", - "Run the following cell to see RAG in action:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "095a7df6", - "metadata": {}, - "outputs": [], - "source": [ - "if __name__ == \"__main__\":\n", - " \"\"\"\n", - " Entry point for the script.\n", - " \n", - " Uses asyncio to run the main function, maintaining consistency\n", - " with other examples and preparing for async operations.\n", - " \"\"\"\n", - " asyncio.run(main())" + "demonstrate_tool_integration()" ] } ], diff --git a/examples/agno/agno_tool_integrations.py b/examples/agno/agno_tool_integrations.py index a185fd700..6cc0d0b4a 100644 --- a/examples/agno/agno_tool_integrations.py +++ b/examples/agno/agno_tool_integrations.py @@ -1,11 +1,17 @@ """ -Tool Integration with RAG (Retrieval-Augmented Generation) in Agno +# Tool Integration with RAG (Retrieval-Augmented Generation) in Agno -This example demonstrates how to create intelligent agents with: -- Knowledge bases from external sources (URLs, documents) -- Vector databases for efficient semantic search -- Embeddings and reranking for accurate information retrieval -- Reasoning tools for enhanced problem-solving capabilities +This example demonstrates how to enhance Agno agents with RAG capabilities, allowing them to access and reason over external knowledge bases for more accurate and source-backed responses. + +## Overview +This example shows how to integrate RAG with Agno agents where we: + +1. **Set up a knowledge base** with documents, URLs, and other external sources +2. **Configure vector databases** (like Pinecone, Weaviate, or ChromaDB) for efficient semantic search +3. **Implement retrieval** using embeddings and reranking for accurate information access +4. **Create RAG-enabled agents** that can search, retrieve, and reason over the knowledge base + +By using RAG, agents can provide responses backed by external sources rather than relying solely on their training data, significantly improving accuracy and verifiability of their outputs. RAG enables agents to access and reason over large knowledge bases, providing accurate, source-backed responses instead of relying solely on training data. @@ -14,55 +20,25 @@ import os from agno.agent import Agent from agno.models.openai import OpenAIChat -import asyncio import agentops from dotenv import load_dotenv # Knowledge and RAG components -from agno.knowledge.url import UrlKnowledge # For loading knowledge from URLs -from agno.vectordb.lancedb import LanceDb # Vector database for storing embeddings -from agno.vectordb.search import SearchType # Search strategies (hybrid, semantic, etc.) -from agno.embedder.cohere import CohereEmbedder # For creating text embeddings -from agno.reranker.cohere import CohereReranker # For improving search results -from agno.tools.reasoning import ReasoningTools # Advanced reasoning capabilities +from agno.knowledge.url import UrlKnowledge +from agno.vectordb.lancedb import LanceDb +from agno.vectordb.search import SearchType +from agno.embedder.cohere import CohereEmbedder +from agno.reranker.cohere import CohereReranker +from agno.tools.reasoning import ReasoningTools # Load environment variables load_dotenv() # Initialize AgentOps for monitoring -agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) +agentops.init(auto_start_session=False, tags=["agno-example", "tool-integrations"]) # API keys and configuration -cohere_api_key = os.getenv("COHERE_API_KEY") # Required for embeddings and reranking -MODEL_ID = "gpt-4o-mini" # Default model for agents - - -def check_environment(): - """ - Verify that all required API keys are properly configured. - - This demo requires: - - AGENTOPS_API_KEY: For monitoring agent behavior - - OPENAI_API_KEY: For the AI model - - COHERE_API_KEY: For embeddings and reranking - - Returns: - bool: True if all required environment variables are set - """ - required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY", "COHERE_API_KEY"] - missing_vars = [var for var in required_vars if not os.getenv(var)] - - if missing_vars: - print(f"Missing required environment variables: {missing_vars}") - print("Please set these in your .env file or environment") - print("\nExample .env file:") - print("AGENTOPS_API_KEY=your_agentops_key") - print("OPENAI_API_KEY=your_openai_key") - print("COHERE_API_KEY=your_cohere_key") - return False - - print("✓ Environment variables checked successfully") - return True +os.environ["COHERE_API_KEY"] = os.getenv("COHERE_API_KEY") def demonstrate_tool_integration(): @@ -75,130 +51,46 @@ def demonstrate_tool_integration(): 3. Configure an agent with RAG capabilities 4. Enable reasoning tools for complex problem-solving """ - print("\n" + "=" * 60) - print("TOOL INTEGRATION WITH RAG (Retrieval-Augmented Generation)") - print("=" * 60) - + tracer = agentops.start_trace(trace_name="Agno Tool Integration Demonstration") try: - print("\n1. Setting up knowledge base and vector database...") - # Create knowledge base from documentation URLs # This loads content from the specified URLs and prepares it for RAG knowledge_base = UrlKnowledge( - urls=["https://docs.agno.com/introduction/agents.md"], # Documentation to learn from - # Configure vector database for efficient semantic search + urls=["https://docs.agno.com/introduction/agents.md"], vector_db=LanceDb( - uri="tmp/lancedb", # Local storage path for the database - table_name="agno_docs", # Table to store document embeddings - search_type=SearchType.hybrid, # Combines keyword and semantic search - # Embedder converts text to numerical vectors for similarity search + uri="tmp/lancedb", + table_name="agno_docs", + search_type=SearchType.hybrid, embedder=CohereEmbedder( - id="embed-v4.0", # Cohere's embedding model - api_key=cohere_api_key, + id="embed-v4.0", ), - # Reranker improves search results by re-scoring them reranker=CohereReranker( - model="rerank-v3.5", # Cohere's reranking model - api_key=cohere_api_key, + model="rerank-v3.5", ), ), ) - print(" ✓ Knowledge base created from documentation") - print(" ✓ Vector database configured with hybrid search") # Create an intelligent agent with RAG capabilities - print("\n2. Creating RAG-enabled agent...") agent = Agent( - model=OpenAIChat(id=MODEL_ID), - # Agentic RAG is automatically enabled when knowledge is provided + model=OpenAIChat(id="gpt-4o-mini"), knowledge=knowledge_base, - # Allow the agent to search its knowledge base on demand search_knowledge=True, - # Add reasoning tools for step-by-step problem solving tools=[ReasoningTools(add_instructions=True)], - # Custom instructions for how the agent should behave instructions=[ - "Include sources in your response.", # Cite where information comes from - "Always search your knowledge before answering the question.", # Use RAG first - "Only include the output in your response. No other text.", # Clean responses + "Include sources in your response.", + "Always search your knowledge before answering the question.", + "Only include the output in your response. No other text.", ], - markdown=True, # Format responses in markdown ) - print(" ✓ Agent created with:") - print(" - Knowledge base access") - print(" - On-demand search capability") - print(" - Reasoning tools") - print(" - Source citation requirements") - - # Test the RAG agent with a question about its knowledge base - print("\n3. Testing RAG agent with knowledge query...") - print(" Question: 'What are Agents?'") - print("\n" + "-" * 60) # Print response with full reasoning process visible agent.print_response( "What are Agents?", - show_full_reasoning=True, # Shows how the agent searches and reasons + show_full_reasoning=True, ) + agentops.end_trace(tracer, end_state="Success") + except Exception: + agentops.end_trace(tracer, end_state="Error") - print("\n" + "-" * 60) - print("✓ RAG demonstration completed") - print("\nNotice how the agent:") - print("- Searched the knowledge base for relevant information") - print("- Used reasoning tools to formulate the answer") - print("- Included sources from the documentation") - except Exception as e: - print(f"\nError during tool integration: {e}") - print("This might be due to:") - print("- Missing API keys (especially COHERE_API_KEY)") - print("- Network issues accessing documentation URLs") - print("- Vector database initialization problems") - - -async def main(): - """ - Main function that orchestrates the tool integration demonstration. - - This async function handles: - - Environment validation - - Running the RAG and tool integration demo - - Error handling and user feedback - """ - print("Welcome to Agno Tool Integration Demo") - print("This demo showcases RAG (Retrieval-Augmented Generation)") - print("and advanced tool integration capabilities.") - print() - - # Validate environment setup - if not check_environment(): - print("\nCannot proceed without proper API configuration") - print("Please obtain a Cohere API key from: https://cohere.com") - return - - # Run demonstration - print("\nStarting tool integration demonstration...") - - try: - demonstrate_tool_integration() - print("\n\n✓ Tool integration demo completed successfully!") - print("\nKey Takeaways:") - print("- RAG enables agents to access external knowledge bases") - print("- Vector databases provide efficient semantic search") - print("- Embeddings and reranking improve information retrieval") - print("- Reasoning tools enhance problem-solving capabilities") - print("- AgentOps tracks all tool usage and knowledge searches") - - except Exception as e: - print(f"Demo failed: {e}") - print("Please check your API keys and network connection") - - -if __name__ == "__main__": - """ - Entry point for the script. - - Uses asyncio to run the main function, maintaining consistency - with other examples and preparing for async operations. - """ - asyncio.run(main()) +demonstrate_tool_integration() diff --git a/examples/agno/agno_workflow_setup.ipynb b/examples/agno/agno_workflow_setup.ipynb index 8e8035b5f..64a5017dd 100644 --- a/examples/agno/agno_workflow_setup.ipynb +++ b/examples/agno/agno_workflow_setup.ipynb @@ -1,90 +1,51 @@ { "cells": [ { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, + "cell_type": "markdown", + "id": "2ac8b4d4", + "metadata": {}, "source": [ - "# Workflow Setup with Intelligent Caching\n", - "\n", - "This notebook demonstrates how to build custom workflows in Agno that can orchestrate complex agent interactions with performance optimizations.\n", - "\n", - "## What are Workflows?\n", - "\n", - "Workflows are powerful abstractions that allow you to:\n", - "- **Orchestrate** multiple agents and teams in complex patterns\n", - "- **Maintain state** across multiple invocations\n", - "- **Implement custom logic** like caching, routing, or validation\n", - "- **Optimize performance** through intelligent design patterns\n", - "\n", - "## Why Use Caching?\n", "\n", - "Caching is particularly valuable for AI agents because:\n", - "- **Cost Reduction**: Avoid redundant API calls for identical queries\n", - "- **Performance**: Instant responses for repeated questions\n", - "- **Development**: Faster iteration during testing\n", - "- **User Experience**: Immediate responses for common queries\n", + "# Workflow Setup with Caching in Agno\n", "\n", - "## Use Cases\n", + "This example demonstrates how to create efficient, stateful workflows in Agno that orchestrate complex agent interactions while maintaining performance through caching and state management.\n", "\n", - "This caching workflow pattern is ideal for:\n", - "- **FAQ Systems**: Where users ask similar questions repeatedly\n", - "- **Development/Testing**: To avoid API costs during iteration\n", - "- **Customer Support**: For common inquiries with standard responses\n", - "- **Documentation Assistants**: Where queries about specific topics repeat\n", + "## Overview\n", + "This example shows how to build reusable agent workflows where we:\n", "\n", - "## Pre-requisites\n", + "1. **Design workflow architecture** with custom logic and agent orchestration\n", + "2. **Implement caching mechanisms** to store and reuse expensive computations\n", + "3. **Manage session state** to maintain context across multiple interactions\n", + "4. **Set up response streaming** for real-time output handling\n", "\n", - "Ensure you have:\n", - "- **AgentOps API key** from [AgentOps](https://agentops.ai)\n", - "- **OpenAI API key** from [OpenAI](https://openai.com)\n", - "\n", - "Create a `.env` file:\n", - "```\n", - "AGENTOPS_API_KEY=your_agentops_key\n", - "OPENAI_API_KEY=your_openai_key\n", - "```\n" + "By using workflows, you can create sophisticated agent pipelines that are both performant and maintainable, with built-in optimizations for repeated operations and long-running sessions.\n", + "\n" ] }, { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, + "cell_type": "code", + "execution_count": null, + "id": "40bf672a", + "metadata": {}, + "outputs": [], "source": [ - "## Implementation Overview\n", - "\n", - "Let's build a caching workflow that demonstrates how to optimize agent performance through intelligent response caching.\n" + "# Install the required dependencies:\n", + "%pip install agentops\n", + "%pip install agno\n", + "%pip install python-dotenv" ] }, { - "cell_type": "raw", + "cell_type": "code", + "execution_count": 15, + "id": "ee482533", "metadata": { - "vscode": { - "languageId": "raw" - } + "lines_to_next_cell": 2 }, - "source": [ - "### Import Required Libraries\n", - "\n", - "First, we'll import the necessary components for building workflows:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "70c1fba6", - "metadata": {}, "outputs": [], "source": [ + "from agno.agent import Agent, RunResponse\n", "import os\n", - "from agno.agent import Agent, RunResponse \n", - "from agno.team import Team\n", "import asyncio\n", "import agentops\n", "from dotenv import load_dotenv\n", @@ -95,123 +56,46 @@ "from typing import Iterator" ] }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "### Environment Setup\n", - "\n", - "Load environment variables and initialize AgentOps monitoring:\n" - ] - }, { "cell_type": "code", - "execution_count": null, - "id": "a50a15ac", + "execution_count": 16, + "id": "eddbb872", "metadata": {}, "outputs": [], "source": [ "# Load environment variables\n", "load_dotenv()\n", - "\n", - "# Initialize AgentOps for monitoring workflow execution\n", - "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", - "\n", - "# Configuration\n", - "MODEL_ID = \"gpt-4o-mini\" # Default model for agents" - ] - }, - { - "cell_type": "raw", - "id": "bcd36544", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Environment Validation\n", - "\n", - "Before proceeding, let's ensure all required API keys are configured:\n" + "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")\n", + "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_agentops_api_key_here\")\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "3e0f2612", - "metadata": { - "lines_to_next_cell": 1 - }, + "id": "6f234791", + "metadata": {}, "outputs": [], "source": [ - "def check_environment():\n", - " \"\"\"\n", - " Verify that all required API keys are properly configured.\n", - " \n", - " Returns:\n", - " bool: True if all required environment variables are set\n", - " \"\"\"\n", - " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\"]\n", - " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", - "\n", - " if missing_vars:\n", - " print(f\"Missing required environment variables: {missing_vars}\")\n", - " print(\"Please set these in your .env file or environment\")\n", - " return False\n", - "\n", - " print(\"✓ Environment variables checked successfully\")\n", - " return True" - ] - }, - { - "cell_type": "raw", - "id": "68b12e89", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Building the CacheWorkflow Class\n", - "\n", - "Now let's create our custom workflow class that implements intelligent caching:\n", - "\n", - "### Key Components:\n", - "\n", - "1. **Workflow Base Class**: Inherit from `Workflow` to get state management capabilities\n", - "2. **Session State**: Persistent storage that survives across workflow runs\n", - "3. **Agent Integration**: Embed agents as workflow attributes\n", - "4. **Custom Logic**: Implement caching in the `run()` method\n", - "\n", - "### How It Works:\n", - "\n", - "1. **Check Cache**: Look for existing responses in session state\n", - "2. **Cache Hit**: Return immediately without API call\n", - "3. **Cache Miss**: Generate new response and cache it\n", - "4. **Stream Support**: Maintain real-time response streaming\n" + "agentops.init(auto_start_session=False, tags=[\"agno-example\", \"workflow-setup\"])\n" ] }, { "cell_type": "code", - "execution_count": null, - "id": "4f388879", + "execution_count": 18, + "id": "ff36679e", "metadata": {}, "outputs": [], "source": [ "class CacheWorkflow(Workflow):\n", " \"\"\"\n", " A workflow that demonstrates intelligent caching capabilities.\n", - " \n", + "\n", " This workflow:\n", " - Caches agent responses to avoid redundant API calls\n", " - Maintains session state across multiple invocations\n", " - Provides instant responses for repeated queries\n", " - Reduces costs and improves performance\n", - " \n", + "\n", " Use cases:\n", " - FAQ systems where questions repeat frequently\n", " - Development/testing to avoid repeated API calls\n", @@ -223,211 +107,85 @@ "\n", " # Initialize agents as workflow attributes\n", " # This agent will be used to generate responses when cache misses occur\n", - " agent = Agent(\n", - " model=OpenAIChat(id=MODEL_ID),\n", - " description=\"General purpose agent for generating responses\"\n", - " )\n", + " agent = Agent(model=OpenAIChat(id=\"gpt-4o-mini\"), description=\"General purpose agent for generating responses\")\n", "\n", " def run(self, message: str) -> Iterator[RunResponse]:\n", " \"\"\"\n", " Execute the workflow with caching logic.\n", - " \n", + "\n", " This method:\n", " 1. Checks if the response is already cached\n", " 2. Returns cached response immediately if found\n", " 3. Generates new response if not cached\n", " 4. Caches the new response for future use\n", - " \n", + "\n", " Args:\n", " message: The input query to process\n", - " \n", + "\n", " Yields:\n", " RunResponse: Streamed response chunks\n", " \"\"\"\n", " logger.info(f\"Checking cache for '{message}'\")\n", - " \n", - " # Check if we've already processed this exact message\n", - " # session_state persists across workflow runs\n", + "\n", " if self.session_state.get(message):\n", " logger.info(f\"Cache hit for '{message}'\")\n", " # Return cached response immediately (no API call needed)\n", - " yield RunResponse(\n", - " run_id=self.run_id, \n", - " content=self.session_state.get(message)\n", - " )\n", + " yield RunResponse(run_id=self.run_id, content=self.session_state.get(message))\n", " return\n", "\n", - " # Cache miss - need to generate new response\n", " logger.info(f\"Cache miss for '{message}'\")\n", - " \n", - " # Run the agent and stream the response\n", - " # Using stream=True for real-time output\n", + "\n", " yield from self.agent.run(message, stream=True)\n", "\n", - " # After streaming completes, cache the full response\n", - " # This makes future requests for the same message instant\n", " self.session_state[message] = self.agent.run_response.content\n", - " logger.info(f\"Cached response for future use\")" - ] - }, - { - "cell_type": "raw", - "id": "fa918b08", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Demonstration Function\n", - "\n", - "The following function showcases the dramatic performance improvement from caching:\n" + " logger.info(\"Cached response for future use\")" ] }, { "cell_type": "code", - "execution_count": null, - "id": "d1a925ad", + "execution_count": 19, + "id": "b1e9e06c", "metadata": {}, "outputs": [], "source": [ "def demonstrate_workflows():\n", " \"\"\"\n", " Demonstrate workflow capabilities with caching.\n", - " \n", + "\n", " This function shows:\n", " - How to create and use custom workflows\n", " - The performance benefits of caching\n", " - Session state persistence\n", " - Response streaming\n", " \"\"\"\n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"WORKFLOWS WITH INTELLIGENT CACHING\")\n", - " print(\"=\" * 60)\n", "\n", + " tracer = agentops.start_trace(trace_name=\"Agno Workflow Setup Demonstration\")\n", " try:\n", - " # Create an instance of our caching workflow\n", - " print(\"\\n1. Creating CacheWorkflow instance...\")\n", " workflow = CacheWorkflow()\n", - " print(\" ✓ Workflow initialized with caching capabilities\")\n", "\n", - " # First run - this will be a cache miss\n", - " print(\"\\n2. First run (expecting cache miss):\")\n", - " print(\" This will make an API call and take ~1-2 seconds\")\n", - " \n", - " # Run workflow with a test message\n", " response: Iterator[RunResponse] = workflow.run(message=\"Tell me a joke.\")\n", - " \n", - " # Pretty print the response with timing information\n", + "\n", " pprint_run_response(response, markdown=True, show_time=True)\n", "\n", - " # Second run - this should be a cache hit\n", - " print(\"\\n3. Second run (expecting cache hit):\")\n", - " print(\" This should return instantly from cache\")\n", - " \n", - " # Run workflow with the same message\n", " response: Iterator[RunResponse] = workflow.run(message=\"Tell me a joke.\")\n", - " \n", - " # Pretty print the response - notice the instant response time\n", + "\n", " pprint_run_response(response, markdown=True, show_time=True)\n", - " \n", - " print(\"\\n✓ Workflow demonstration completed\")\n", - " print(\"\\nNotice the performance difference:\")\n", - " print(\"- First run: Makes API call, takes time\")\n", - " print(\"- Second run: Returns from cache instantly\")\n", - " print(\"- Same content, but much faster delivery\")\n", "\n", - " except Exception as e:\n", - " print(f\"\\nError during workflow demonstration: {e}\")\n", - " print(\"This might be due to API issues or configuration problems\")" - ] - }, - { - "cell_type": "raw", - "id": "35d5cfd4", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Main Orchestration\n", + " agentops.end_trace(tracer, end_state=\"Success\")\n", "\n", - "The main function coordinates the entire demonstration:\n" + " except Exception:\n", + " agentops.end_trace(tracer, end_state=\"Error\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "ba15c8be", + "id": "d09d8f70", "metadata": {}, "outputs": [], "source": [ - "async def main():\n", - " \"\"\"\n", - " Main function that orchestrates the workflow demonstration.\n", - " \n", - " This async function handles:\n", - " - Environment validation\n", - " - Running the workflow demonstration\n", - " - Error handling and user feedback\n", - " \"\"\"\n", - " print(\"Welcome to Agno Workflow Demo\")\n", - " print(\"This demo showcases custom workflows with caching capabilities\")\n", - " print()\n", - " \n", - " # Validate environment setup\n", - " if not check_environment():\n", - " print(\"Cannot proceed without proper API configuration\")\n", - " return\n", - "\n", - " # Run demonstration\n", - " print(\"\\nStarting workflow demonstration...\")\n", - "\n", - " try:\n", - " demonstrate_workflows()\n", - " print(\"\\n\\n✓ Workflow demo completed successfully!\")\n", - " print(\"\\nKey Takeaways:\")\n", - " print(\"- Workflows enable custom agent orchestration\")\n", - " print(\"- Caching dramatically improves performance\")\n", - " print(\"- Session state persists across runs\")\n", - " print(\"- Streaming responses provide real-time feedback\")\n", - " print(\"- AgentOps tracks all workflow executions\")\n", - " \n", - " except Exception as e:\n", - " print(f\"Demo failed: {e}\")\n", - " print(\"Please check your API keys and network connection\")" - ] - }, - { - "cell_type": "raw", - "id": "e242b650", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Execute the Demo\n", "\n", - "Run the following cell to see the caching workflow in action:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f2dc108a", - "metadata": {}, - "outputs": [], - "source": [ - "if __name__ == \"__main__\":\n", - " \"\"\"\n", - " Entry point for the script.\n", - " \n", - " Uses asyncio to run the main function, maintaining consistency\n", - " with other examples and preparing for async operations.\n", - " \"\"\"\n", - " asyncio.run(main())" + "demonstrate_workflows()" ] } ], @@ -437,8 +195,22 @@ "main_language": "python", "notebook_metadata_filter": "-all" }, + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" } }, "nbformat": 4, diff --git a/examples/agno/agno_workflow_setup.py b/examples/agno/agno_workflow_setup.py index 071977a6a..d3aa0c9ea 100644 --- a/examples/agno/agno_workflow_setup.py +++ b/examples/agno/agno_workflow_setup.py @@ -1,17 +1,20 @@ """ -Workflow Setup with Caching in Agno +# Workflow Setup with Caching in Agno -This example demonstrates how to create custom workflows that can: -- Orchestrate complex agent interactions -- Implement caching for improved performance -- Maintain session state across multiple runs -- Stream responses efficiently +This example demonstrates how to create efficient, stateful workflows in Agno that orchestrate complex agent interactions while maintaining performance through caching and state management. + +## Overview +This example shows how to build reusable agent workflows where we: + +1. **Design workflow architecture** with custom logic and agent orchestration +2. **Implement caching mechanisms** to store and reuse expensive computations +3. **Manage session state** to maintain context across multiple interactions +4. **Set up response streaming** for real-time output handling + +By using workflows, you can create sophisticated agent pipelines that are both performant and maintainable, with built-in optimizations for repeated operations and long-running sessions. -Workflows are powerful abstractions that allow you to build reusable, -stateful agent pipelines with custom logic and optimizations. """ -import os from agno.agent import Agent, RunResponse import asyncio import agentops @@ -22,33 +25,9 @@ from agno.utils.log import logger from typing import Iterator -# Load environment variables -load_dotenv() - -# Initialize AgentOps for monitoring workflow execution -agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) - -# Configuration -MODEL_ID = "gpt-4o-mini" # Default model for agents - -def check_environment(): - """ - Verify that all required API keys are properly configured. - - Returns: - bool: True if all required environment variables are set - """ - required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] - missing_vars = [var for var in required_vars if not os.getenv(var)] - - if missing_vars: - print(f"Missing required environment variables: {missing_vars}") - print("Please set these in your .env file or environment") - return False - - print("✓ Environment variables checked successfully") - return True +load_dotenv() +agentops.init(auto_start_session=False, tags=["agno-example", "workflow-setup"]) class CacheWorkflow(Workflow): @@ -72,7 +51,7 @@ class CacheWorkflow(Workflow): # Initialize agents as workflow attributes # This agent will be used to generate responses when cache misses occur - agent = Agent(model=OpenAIChat(id=MODEL_ID), description="General purpose agent for generating responses") + agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), description="General purpose agent for generating responses") def run(self, message: str) -> Iterator[RunResponse]: """ @@ -92,23 +71,16 @@ def run(self, message: str) -> Iterator[RunResponse]: """ logger.info(f"Checking cache for '{message}'") - # Check if we've already processed this exact message - # session_state persists across workflow runs if self.session_state.get(message): logger.info(f"Cache hit for '{message}'") # Return cached response immediately (no API call needed) yield RunResponse(run_id=self.run_id, content=self.session_state.get(message)) return - # Cache miss - need to generate new response logger.info(f"Cache miss for '{message}'") - # Run the agent and stream the response - # Using stream=True for real-time output yield from self.agent.run(message, stream=True) - # After streaming completes, cache the full response - # This makes future requests for the same message instant self.session_state[message] = self.agent.run_response.content logger.info("Cached response for future use") @@ -123,88 +95,23 @@ def demonstrate_workflows(): - Session state persistence - Response streaming """ - print("\n" + "=" * 60) - print("WORKFLOWS WITH INTELLIGENT CACHING") - print("=" * 60) + tracer = agentops.start_trace(trace_name="Agno Workflow Setup Demonstration") try: - # Create an instance of our caching workflow - print("\n1. Creating CacheWorkflow instance...") workflow = CacheWorkflow() - print(" ✓ Workflow initialized with caching capabilities") - - # First run - this will be a cache miss - print("\n2. First run (expecting cache miss):") - print(" This will make an API call and take ~1-2 seconds") - # Run workflow with a test message response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") - # Pretty print the response with timing information pprint_run_response(response, markdown=True, show_time=True) - # Second run - this should be a cache hit - print("\n3. Second run (expecting cache hit):") - print(" This should return instantly from cache") - - # Run workflow with the same message response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") - # Pretty print the response - notice the instant response time pprint_run_response(response, markdown=True, show_time=True) - print("\n✓ Workflow demonstration completed") - print("\nNotice the performance difference:") - print("- First run: Makes API call, takes time") - print("- Second run: Returns from cache instantly") - print("- Same content, but much faster delivery") - - except Exception as e: - print(f"\nError during workflow demonstration: {e}") - print("This might be due to API issues or configuration problems") - - -async def main(): - """ - Main function that orchestrates the workflow demonstration. - - This async function handles: - - Environment validation - - Running the workflow demonstration - - Error handling and user feedback - """ - print("Welcome to Agno Workflow Demo") - print("This demo showcases custom workflows with caching capabilities") - print() + agentops.end_trace(tracer, end_state="Success") - # Validate environment setup - if not check_environment(): - print("Cannot proceed without proper API configuration") - return + except Exception: + agentops.end_trace(tracer, end_state="Error") - # Run demonstration - print("\nStarting workflow demonstration...") - try: - demonstrate_workflows() - print("\n\n✓ Workflow demo completed successfully!") - print("\nKey Takeaways:") - print("- Workflows enable custom agent orchestration") - print("- Caching dramatically improves performance") - print("- Session state persists across runs") - print("- Streaming responses provide real-time feedback") - print("- AgentOps tracks all workflow executions") - - except Exception as e: - print(f"Demo failed: {e}") - print("Please check your API keys and network connection") - - -if __name__ == "__main__": - """ - Entry point for the script. - - Uses asyncio to run the main function, maintaining consistency - with other examples and preparing for async operations. - """ - asyncio.run(main()) +asyncio.run(demonstrate_workflows()) From 65ba4932361a7d7f468823be43ec62a09e61b82a Mon Sep 17 00:00:00 2001 From: Fenil Faldu <65482495+fenilfaldu@users.noreply.github.com> Date: Fri, 13 Jun 2025 01:27:54 +0530 Subject: [PATCH 12/14] Delete examples/agno/agno_comprehensive_tutorial.py code cleanup --- examples/agno/agno_comprehensive_tutorial.py | 371 ------------------- 1 file changed, 371 deletions(-) delete mode 100644 examples/agno/agno_comprehensive_tutorial.py diff --git a/examples/agno/agno_comprehensive_tutorial.py b/examples/agno/agno_comprehensive_tutorial.py deleted file mode 100644 index 41c0ff9d1..000000000 --- a/examples/agno/agno_comprehensive_tutorial.py +++ /dev/null @@ -1,371 +0,0 @@ -""" -Comprehensive Agno Tutorial with AgentOps Instrumentation - -This tutorial demonstrates key Agno features: -1. Basic Agents and Teams -2. Tool Integration and RAG -3. Workflows with Caching -4. Collaborative Research Teams -5. Async Operations - -Each section shows different Agno capabilities with AgentOps tracking. -Run this script to see all examples in action. -""" - -import os -import asyncio -from typing import Iterator -from textwrap import dedent -from dotenv import load_dotenv - -# Load environment variables -load_dotenv() - -# Now import agno to trigger instrumentation, then import specific classes -from agno.agent import Agent, RunResponse # noqa: E402 -from agno.team import Team # noqa: E402 -from agno.models.openai import OpenAIChat # noqa: E402 -from agno.workflow import Workflow # noqa: E402 -from agno.tools.duckduckgo import DuckDuckGoTools # noqa: E402 -from agno.tools.hackernews import HackerNewsTools # noqa: E402 -from agno.tools.reasoning import ReasoningTools # noqa: E402 -from agno.tools.arxiv import ArxivTools # noqa: E402 -from agno.tools.googlesearch import GoogleSearchTools # noqa: E402 -from agno.knowledge.url import UrlKnowledge # noqa: E402 -from agno.utils.pprint import pprint_run_response # noqa: E402 -from agno.utils.log import logger # noqa: E402 -from agno.vectordb.lancedb import LanceDb # noqa: E402 -from agno.vectordb.search import SearchType # noqa: E402 -from agno.embedder.cohere import CohereEmbedder # noqa: E402 -from agno.reranker.cohere import CohereReranker # noqa: E402 - -import agentops # noqa: E402 - -agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) -# Sample configuration -MODEL_ID = "gpt-4o-mini" - - -def check_environment(): - """Check if required environment variables are set.""" - required_vars = ["AGENTOPS_API_KEY", "OPENAI_API_KEY"] - missing_vars = [var for var in required_vars if not os.getenv(var)] - - if missing_vars: - print(f"Missing required environment variables: {missing_vars}") - print("Please set these in your .env file or environment") - return False - - print("Environment variables checked successfully") - return True - - -def demonstrate_basic_agents(): - """Demonstrate basic agent creation and team coordination.""" - print("\n" + "=" * 60) - print("BASIC AGENTS AND TEAMS") - print("=" * 60) - - try: - # Create individual agents - news_agent = Agent(name="News Agent", role="Get the latest news", model=OpenAIChat(id=MODEL_ID)) - - weather_agent = Agent( - name="Weather Agent", role="Get the weather for the next 7 days", model=OpenAIChat(id=MODEL_ID) - ) - - # Create a team with coordination mode - team = Team(name="News and Weather Team", mode="coordinate", members=[news_agent, weather_agent]) - - # Run team task - response = team.run("What is the weather in Tokyo?") - print(f"Team Response: {response.content}") - - except Exception as e: - print(f"Basic agents error: {e}") - - -def demonstrate_tool_integration(): - """Demonstrate tool integration with RAG and knowledge base.""" - print("\n" + "=" * 60) - print("TOOL INTEGRATION WITH RAG") - print("=" * 60) - - try: - # Create knowledge base with vector database - knowledge_base = UrlKnowledge( - urls=["https://docs.agno.com/introduction/agents.md"], - # Use LanceDB as the vector database, store embeddings in the `agno_docs` table - vector_db=LanceDb( - uri="tmp/lancedb", - table_name="agno_docs", - search_type=SearchType.hybrid, - embedder=CohereEmbedder(id="embed-v4.0"), # noqa: E821 - reranker=CohereReranker(model="rerank-v3.5"), # noqa: E821 - ), # noqa: E821 - ) - - # Create agent with knowledge and reasoning tools - agent = Agent( - model=OpenAIChat(id=MODEL_ID), - # Agentic RAG is enabled by default when `knowledge` is provided to the Agent. - knowledge=knowledge_base, - # search_knowledge=True gives the Agent the ability to search on demand - search_knowledge=True, - tools=[ReasoningTools(add_instructions=True)], - instructions=[ - "Include sources in your response.", - "Always search your knowledge before answering the question.", - "Only include the output in your response. No other text.", - ], - markdown=True, - ) - - print("Running RAG agent with knowledge base...") - agent.print_response( - "What are Agents?", - show_full_reasoning=True, - ) - - except Exception as e: - print(f"Tool integration error: {e}") - - -class CacheWorkflow(Workflow): - """A workflow that demonstrates caching capabilities.""" - - # Purely descriptive, not used by the workflow - description: str = "A workflow that caches previous outputs" - - # Add agents or teams as attributes on the workflow - agent = Agent(model=OpenAIChat(id=MODEL_ID)) - - # Write the logic in the `run()` method - def run(self, message: str) -> Iterator[RunResponse]: - logger.info(f"Checking cache for '{message}'") - # Check if the output is already cached - if self.session_state.get(message): - logger.info(f"Cache hit for '{message}'") - yield RunResponse(run_id=self.run_id, content=self.session_state.get(message)) - return - - logger.info(f"Cache miss for '{message}'") - # Run the agent and yield the response - yield from self.agent.run(message, stream=True) - - # Cache the output after response is yielded - self.session_state[message] = self.agent.run_response.content - - -def demonstrate_workflows(): - """Demonstrate workflow capabilities with caching.""" - print("\n" + "=" * 60) - print("WORKFLOWS WITH CACHING") - print("=" * 60) - - try: - workflow = CacheWorkflow() - - print("First run (cache miss):") - # Run workflow (this takes ~1s) - response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") - # Print the response - pprint_run_response(response, markdown=True, show_time=True) - - print("\nSecond run (cache hit):") - # Run workflow again (this is immediate because of caching) - response: Iterator[RunResponse] = workflow.run(message="Tell me a joke.") - # Print the response - pprint_run_response(response, markdown=True, show_time=True) - - except Exception as e: - print(f"Workflow error: {e}") - - -def demonstrate_research_team(): - """Demonstrate collaborative research team with multiple specialized agents.""" - print("\n" + "=" * 60) - print("COLLABORATIVE RESEARCH TEAM") - print("=" * 60) - - try: - # Create specialized research agents - reddit_researcher = Agent( - name="Reddit Researcher", - role="Research a topic on Reddit", - model=OpenAIChat(id="gpt-4o"), - tools=[GoogleSearchTools()], - add_name_to_instructions=True, - instructions=dedent( - """ - You are a Reddit researcher. - You will be given a topic to research on Reddit. - You will need to find the most relevant posts on Reddit. - """ - ), - ) - - hackernews_researcher = Agent( - name="HackerNews Researcher", - model=OpenAIChat("gpt-4o"), - role="Research a topic on HackerNews.", - tools=[HackerNewsTools()], - add_name_to_instructions=True, - instructions=dedent( - """ - You are a HackerNews researcher. - You will be given a topic to research on HackerNews. - You will need to find the most relevant posts on HackerNews. - """ - ), - ) - - academic_paper_researcher = Agent( - name="Academic Paper Researcher", - model=OpenAIChat("gpt-4o"), - role="Research academic papers and scholarly content", - tools=[GoogleSearchTools(), ArxivTools()], - add_name_to_instructions=True, - instructions=dedent( - """ - You are an academic paper researcher. - You will be given a topic to research in academic literature. - You will need to find relevant scholarly articles, papers, and academic discussions. - Focus on peer-reviewed content and citations from reputable sources. - Provide brief summaries of key findings and methodologies. - """ - ), - ) - - twitter_researcher = Agent( - name="Twitter Researcher", - model=OpenAIChat("gpt-4o"), - role="Research trending discussions and real-time updates", - tools=[DuckDuckGoTools()], - add_name_to_instructions=True, - instructions=dedent( - """ - You are a Twitter/X researcher. - You will be given a topic to research on Twitter/X. - You will need to find trending discussions, influential voices, and real-time updates. - Focus on verified accounts and credible sources when possible. - Track relevant hashtags and ongoing conversations. - """ - ), - ) - - # Create collaborative team - agent_team = Team( - name="Discussion Team", - mode="collaborate", - model=OpenAIChat("gpt-4o"), - members=[ - reddit_researcher, - hackernews_researcher, - academic_paper_researcher, - twitter_researcher, - ], - instructions=[ - "You are a discussion master.", - "You have to stop the discussion when you think the team has reached a consensus.", - ], - success_criteria="The team has reached a consensus.", - enable_agentic_context=True, - add_context=True, - show_tool_calls=True, - markdown=True, - debug_mode=True, - show_members_responses=True, - ) - - print("Running collaborative research team...") - agent_team.print_response( - message="Start the discussion on the topic: 'What is the best way to learn to code?'", - stream=True, - stream_intermediate_steps=True, - ) - - except Exception as e: - print(f"Research team error: {e}") - - -async def demonstrate_async_operations(): - """Demonstrate async operations with Agno agents.""" - print("\n" + "=" * 60) - print("ASYNC OPERATIONS") - print("=" * 60) - - try: - # Create async tasks with different agents - agent = Agent(model=OpenAIChat(id=MODEL_ID)) - - # Define async tasks - async def task1(): - response = await agent.arun("Explain Python in one paragraph") - return f"Task 1: {response.content}" - - async def task2(): - response = await agent.arun("Explain JavaScript in one paragraph") - return f"Task 2: {response.content}" - - async def task3(): - response = await agent.arun("Compare them briefly") - return f"Task 3: {response.content}" - - # Run tasks concurrently - print("Running async tasks concurrently...") - results = await asyncio.gather(task1(), task2(), task3()) - - for result in results: - print(result) - print() - - except Exception as e: - print(f"Async operations error: {e}") - - -async def main(): - """Main function to run all Agno demonstrations.""" - print("Starting Comprehensive Agno Example with AgentOps") - print("=" * 80) - - # Check environment - if not check_environment(): - return - - # Run all demonstrations - print("\nRunning all Agno demonstrations...") - - # Research teams - # try: - # demonstrate_research_team() - # except Exception as e: - # print(f"Skipping research team demo due to: {e}") - - # # Basic functionality - # demonstrate_basic_agents() - - # # Tool integration - # try: - # demonstrate_tool_integration() - # except Exception as e: - # print(f"Skipping tool integration demo due to: {e}") - - # # Workflows - # try: - # demonstrate_workflows() - # except Exception as e: - # print(f"Skipping workflow demo due to: {e}") - - # Async operations - try: - await demonstrate_async_operations() - except Exception as e: - print(f"Skipping async demo due to: {e}") - - print("\nAll Agno demonstrations completed!") - print("Check your AgentOps dashboard for detailed traces and metrics.") - - -if __name__ == "__main__": - asyncio.run(main()) From fc63f0977a43b27cfbf34bca1b55c807f061e835 Mon Sep 17 00:00:00 2001 From: Fenil Faldu <65482495+fenilfaldu@users.noreply.github.com> Date: Fri, 13 Jun 2025 01:28:17 +0530 Subject: [PATCH 13/14] Delete examples/agno/agno_comprehensive_tutorial.ipynb code cleanup --- .../agno/agno_comprehensive_tutorial.ipynb | 701 ------------------ 1 file changed, 701 deletions(-) delete mode 100644 examples/agno/agno_comprehensive_tutorial.ipynb diff --git a/examples/agno/agno_comprehensive_tutorial.ipynb b/examples/agno/agno_comprehensive_tutorial.ipynb deleted file mode 100644 index 482f1ebd7..000000000 --- a/examples/agno/agno_comprehensive_tutorial.ipynb +++ /dev/null @@ -1,701 +0,0 @@ -{ - "cells": [ - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "# Agno Framework Comprehensive Tutorial with AgentOps\n", - "\n", - "This tutorial demonstrates the powerful capabilities of the Agno framework for building AI agents and teams. Agno provides a flexible and intuitive way to create intelligent agents that can collaborate, use tools, and perform complex tasks.\n", - "\n", - "## What You'll Learn\n", - "\n", - "1. **Basic Agents and Teams** - Creating individual agents and coordinating them in teams\n", - "2. **Tool Integration with RAG** - Integrating tools and knowledge bases for enhanced capabilities\n", - "3. **Workflows with Caching** - Building efficient workflows with state management\n", - "4. **Collaborative Research Teams** - Creating specialized agents that work together\n", - "5. **Async Operations** - Running agents asynchronously for better performance\n", - "\n", - "Throughout this tutorial, AgentOps will track all agent activities, providing detailed insights into your AI system's behavior.\n" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Getting Started\n", - "\n", - "First, let's import the necessary libraries and set up our environment. We'll need Agno for building agents and AgentOps for monitoring and tracking.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "afacf485", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import asyncio\n", - "from typing import Iterator\n", - "from textwrap import dedent\n", - "from dotenv import load_dotenv" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "376e56d8", - "metadata": {}, - "outputs": [], - "source": [ - "# Load environment variables\n", - "load_dotenv()" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Environment Setup\n", - "\n", - "Now we'll load environment variables and import the Agno framework components. The order of imports is important - we import agno first to ensure proper instrumentation, then import specific classes we'll use.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f8fb8c25", - "metadata": {}, - "outputs": [], - "source": [ - "# Now import agno to trigger instrumentation, then import specific classes\n", - "from agno.agent import Agent, RunResponse \n", - "from agno.team import Team \n", - "from agno.models.openai import OpenAIChat \n", - "from agno.workflow import Workflow \n", - "from agno.tools.duckduckgo import DuckDuckGoTools \n", - "from agno.tools.hackernews import HackerNewsTools \n", - "from agno.tools.reasoning import ReasoningTools \n", - "from agno.tools.arxiv import ArxivTools \n", - "from agno.tools.googlesearch import GoogleSearchTools \n", - "from agno.knowledge.url import UrlKnowledge \n", - "from agno.utils.pprint import pprint_run_response \n", - "from agno.utils.log import logger \n", - "from agno.vectordb.lancedb import LanceDb \n", - "from agno.vectordb.search import SearchType \n", - "from agno.embedder.cohere import CohereEmbedder \n", - "from agno.reranker.cohere import CohereReranker\n", - "import agentops \n", - " " - ] - }, - { - "cell_type": "raw", - "id": "c8b9232b", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Initialize AgentOps\n", - "\n", - "Let's initialize AgentOps to start tracking our agent activities. Make sure you have your AGENTOPS_API_KEY set in your environment variables.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13c47f0c", - "metadata": {}, - "outputs": [], - "source": [ - "agentops.init(api_key=os.getenv(\"AGENTOPS_API_KEY\"))\n", - "# Sample configuration\n", - "MODEL_ID = \"gpt-4o-mini\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "129c88ab", - "metadata": {}, - "outputs": [], - "source": [ - "def check_environment():\n", - " \"\"\"Check if required environment variables are set.\"\"\"\n", - " required_vars = [\"AGENTOPS_API_KEY\", \"OPENAI_API_KEY\"]\n", - " missing_vars = [var for var in required_vars if not os.getenv(var)]\n", - "\n", - " if missing_vars:\n", - " print(f\"Missing required environment variables: {missing_vars}\")\n", - " print(\"Please set these in your .env file or environment\")\n", - " return False\n", - "\n", - " print(\"Environment variables checked successfully\")\n", - " return True" - ] - }, - { - "cell_type": "raw", - "id": "eb9628b8", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## 1. Basic Agents and Teams\n", - "\n", - "Let's start with the fundamentals - creating individual agents and organizing them into teams. In Agno, agents are AI-powered entities with specific roles, and teams coordinate multiple agents to accomplish complex tasks.\n", - "\n", - "### Key Concepts:\n", - "- **Agent**: An individual AI entity with a specific role and model\n", - "- **Team**: A group of agents that can work together in different modes\n", - "- **Coordination Mode**: How agents interact (e.g., 'coordinate' mode)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ccf4268c", - "metadata": {}, - "outputs": [], - "source": [ - "def demonstrate_basic_agents():\n", - " \"\"\"Demonstrate basic agent creation and team coordination.\"\"\"\n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"BASIC AGENTS AND TEAMS\")\n", - " print(\"=\" * 60)\n", - "\n", - " try:\n", - " # Create individual agents\n", - " news_agent = Agent(name=\"News Agent\", role=\"Get the latest news\", model=OpenAIChat(id=MODEL_ID))\n", - "\n", - " weather_agent = Agent(\n", - " name=\"Weather Agent\", role=\"Get the weather for the next 7 days\", model=OpenAIChat(id=MODEL_ID)\n", - " )\n", - "\n", - " # Create a team with coordination mode\n", - " team = Team(name=\"News and Weather Team\", mode=\"coordinate\", members=[news_agent, weather_agent])\n", - "\n", - " # Run team task\n", - " response = team.run(\"What is the weather in Tokyo?\")\n", - " print(f\"Team Response: {response.content}\")\n", - "\n", - " except Exception as e:\n", - " print(f\"Basic agents error: {e}\")" - ] - }, - { - "cell_type": "raw", - "id": "4f7feccc", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## 2. Tool Integration with RAG (Retrieval-Augmented Generation)\n", - "\n", - "One of Agno's most powerful features is the ability to integrate tools and knowledge bases. This example shows how to create an agent with:\n", - "- **Knowledge Base**: Using URLs as sources of information\n", - "- **Vector Database**: LanceDB for efficient similarity search\n", - "- **Embeddings**: Cohere embeddings for semantic understanding\n", - "- **Reranking**: Improving search results with Cohere reranker\n", - "- **Reasoning Tools**: Adding logical reasoning capabilities\n", - "\n", - "This creates an agent that can search through documentation and provide informed answers with sources.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3ff48560", - "metadata": {}, - "outputs": [], - "source": [ - "def demonstrate_tool_integration():\n", - " \"\"\"Demonstrate tool integration with RAG and knowledge base.\"\"\"\n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"TOOL INTEGRATION WITH RAG\")\n", - " print(\"=\" * 60)\n", - "\n", - " try:\n", - " # Create knowledge base with vector database\n", - " knowledge_base = UrlKnowledge(\n", - " urls=[\"https://docs.agno.com/introduction/agents.md\"],\n", - " # Use LanceDB as the vector database, store embeddings in the `agno_docs` table\n", - " vector_db=LanceDb(\n", - " uri=\"tmp/lancedb\",\n", - " table_name=\"agno_docs\",\n", - " search_type=SearchType.hybrid,\n", - " embedder=CohereEmbedder(id=\"embed-v4.0\"), # noqa: E821\n", - " reranker=CohereReranker(model=\"rerank-v3.5\"), # noqa: E821\n", - " ), # noqa: E821\n", - " )\n", - "\n", - " # Create agent with knowledge and reasoning tools\n", - " agent = Agent(\n", - " model=OpenAIChat(id=MODEL_ID),\n", - " # Agentic RAG is enabled by default when `knowledge` is provided to the Agent.\n", - " knowledge=knowledge_base,\n", - " # search_knowledge=True gives the Agent the ability to search on demand\n", - " search_knowledge=True,\n", - " tools=[ReasoningTools(add_instructions=True)],\n", - " instructions=[\n", - " \"Include sources in your response.\",\n", - " \"Always search your knowledge before answering the question.\",\n", - " \"Only include the output in your response. No other text.\",\n", - " ],\n", - " markdown=True,\n", - " )\n", - "\n", - " print(\"Running RAG agent with knowledge base...\")\n", - " agent.print_response(\n", - " \"What are Agents?\",\n", - " show_full_reasoning=True,\n", - " )\n", - "\n", - " except Exception as e:\n", - " print(f\"Tool integration error: {e}\")" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## 3. Workflows with Caching\n", - "\n", - "Workflows in Agno allow you to create reusable, stateful processes. This example demonstrates a caching workflow that:\n", - "- Stores previous responses in session state\n", - "- Checks cache before running expensive operations\n", - "- Yields responses in a streaming fashion\n", - "- Improves performance by avoiding redundant computations\n", - "\n", - "This is particularly useful for applications where users might ask similar questions repeatedly.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b4408138", - "metadata": {}, - "outputs": [], - "source": [ - "class CacheWorkflow(Workflow):\n", - " \"\"\"A workflow that demonstrates caching capabilities.\"\"\"\n", - "\n", - " # Purely descriptive, not used by the workflow\n", - " description: str = \"A workflow that caches previous outputs\"\n", - "\n", - " # Add agents or teams as attributes on the workflow\n", - " agent = Agent(model=OpenAIChat(id=MODEL_ID))\n", - "\n", - " # Write the logic in the `run()` method\n", - " def run(self, message: str) -> Iterator[RunResponse]:\n", - " logger.info(f\"Checking cache for '{message}'\")\n", - " # Check if the output is already cached\n", - " if self.session_state.get(message):\n", - " logger.info(f\"Cache hit for '{message}'\")\n", - " yield RunResponse(run_id=self.run_id, content=self.session_state.get(message))\n", - " return\n", - "\n", - " logger.info(f\"Cache miss for '{message}'\")\n", - " # Run the agent and yield the response\n", - " yield from self.agent.run(message, stream=True)\n", - "\n", - " # Cache the output after response is yielded\n", - " self.session_state[message] = self.agent.run_response.content" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2f8e1e6c", - "metadata": {}, - "outputs": [], - "source": [ - "def demonstrate_workflows():\n", - " \"\"\"Demonstrate workflow capabilities with caching.\"\"\"\n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"WORKFLOWS WITH CACHING\")\n", - " print(\"=\" * 60)\n", - "\n", - " try:\n", - " workflow = CacheWorkflow()\n", - "\n", - " print(\"First run (cache miss):\")\n", - " # Run workflow (this takes ~1s)\n", - " response: Iterator[RunResponse] = workflow.run(message=\"Tell me a joke.\")\n", - " # Print the response\n", - " pprint_run_response(response, markdown=True, show_time=True)\n", - "\n", - " print(\"\\nSecond run (cache hit):\")\n", - " # Run workflow again (this is immediate because of caching)\n", - " response: Iterator[RunResponse] = workflow.run(message=\"Tell me a joke.\")\n", - " # Print the response\n", - " pprint_run_response(response, markdown=True, show_time=True)\n", - "\n", - " except Exception as e:\n", - " print(f\"Workflow error: {e}\")" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## 4. Collaborative Research Teams\n", - "\n", - "This is where Agno truly shines - creating teams of specialized agents that collaborate to solve complex problems. In this example, we create:\n", - "\n", - "### Specialized Agents:\n", - "- **Reddit Researcher**: Searches Reddit for community discussions\n", - "- **HackerNews Researcher**: Finds technical discussions on HackerNews\n", - "- **Academic Paper Researcher**: Searches scholarly articles and papers\n", - "- **Twitter Researcher**: Tracks real-time discussions and trends\n", - "\n", - "### Team Configuration:\n", - "- **Collaborate Mode**: Agents work together and discuss findings\n", - "- **Discussion Master**: The team lead that manages the conversation\n", - "- **Success Criteria**: Defines when the team has reached consensus\n", - "- **Agentic Context**: Agents maintain context throughout the discussion\n", - "\n", - "This demonstrates how multiple AI agents can work together like a human research team!\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "db870fdb", - "metadata": {}, - "outputs": [], - "source": [ - "def demonstrate_research_team():\n", - " \"\"\"Demonstrate collaborative research team with multiple specialized agents.\"\"\"\n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"COLLABORATIVE RESEARCH TEAM\")\n", - " print(\"=\" * 60)\n", - "\n", - " try:\n", - " # Create specialized research agents\n", - " reddit_researcher = Agent(\n", - " name=\"Reddit Researcher\",\n", - " role=\"Research a topic on Reddit\",\n", - " model=OpenAIChat(id=\"gpt-4o\"),\n", - " tools=[GoogleSearchTools()],\n", - " add_name_to_instructions=True,\n", - " instructions=dedent(\n", - " \"\"\"\n", - " You are a Reddit researcher.\n", - " You will be given a topic to research on Reddit.\n", - " You will need to find the most relevant posts on Reddit.\n", - " \"\"\"\n", - " ),\n", - " )\n", - "\n", - " hackernews_researcher = Agent(\n", - " name=\"HackerNews Researcher\",\n", - " model=OpenAIChat(\"gpt-4o\"),\n", - " role=\"Research a topic on HackerNews.\",\n", - " tools=[HackerNewsTools()],\n", - " add_name_to_instructions=True,\n", - " instructions=dedent(\n", - " \"\"\"\n", - " You are a HackerNews researcher.\n", - " You will be given a topic to research on HackerNews.\n", - " You will need to find the most relevant posts on HackerNews.\n", - " \"\"\"\n", - " ),\n", - " )\n", - "\n", - " academic_paper_researcher = Agent(\n", - " name=\"Academic Paper Researcher\",\n", - " model=OpenAIChat(\"gpt-4o\"),\n", - " role=\"Research academic papers and scholarly content\",\n", - " tools=[GoogleSearchTools(), ArxivTools()],\n", - " add_name_to_instructions=True,\n", - " instructions=dedent(\n", - " \"\"\"\n", - " You are an academic paper researcher.\n", - " You will be given a topic to research in academic literature.\n", - " You will need to find relevant scholarly articles, papers, and academic discussions.\n", - " Focus on peer-reviewed content and citations from reputable sources.\n", - " Provide brief summaries of key findings and methodologies.\n", - " \"\"\"\n", - " ),\n", - " )\n", - "\n", - " twitter_researcher = Agent(\n", - " name=\"Twitter Researcher\",\n", - " model=OpenAIChat(\"gpt-4o\"),\n", - " role=\"Research trending discussions and real-time updates\",\n", - " tools=[DuckDuckGoTools()],\n", - " add_name_to_instructions=True,\n", - " instructions=dedent(\n", - " \"\"\"\n", - " You are a Twitter/X researcher.\n", - " You will be given a topic to research on Twitter/X.\n", - " You will need to find trending discussions, influential voices, and real-time updates.\n", - " Focus on verified accounts and credible sources when possible.\n", - " Track relevant hashtags and ongoing conversations.\n", - " \"\"\"\n", - " ),\n", - " )\n", - "\n", - " # Create collaborative team\n", - " agent_team = Team(\n", - " name=\"Discussion Team\",\n", - " mode=\"collaborate\",\n", - " model=OpenAIChat(\"gpt-4o\"),\n", - " members=[\n", - " reddit_researcher,\n", - " hackernews_researcher,\n", - " academic_paper_researcher,\n", - " twitter_researcher,\n", - " ],\n", - " instructions=[\n", - " \"You are a discussion master.\",\n", - " \"You have to stop the discussion when you think the team has reached a consensus.\",\n", - " ],\n", - " success_criteria=\"The team has reached a consensus.\",\n", - " enable_agentic_context=True,\n", - " add_context=True,\n", - " show_tool_calls=True,\n", - " markdown=True,\n", - " debug_mode=True,\n", - " show_members_responses=True,\n", - " )\n", - "\n", - " print(\"Running collaborative research team...\")\n", - " agent_team.print_response(\n", - " message=\"Start the discussion on the topic: 'What is the best way to learn to code?'\",\n", - " stream=True,\n", - " stream_intermediate_steps=True,\n", - " )\n", - "\n", - " except Exception as e:\n", - " print(f\"Research team error: {e}\")" - ] - }, - { - "cell_type": "raw", - "id": "72f38f42", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## 5. Async Operations\n", - "\n", - "Modern applications often need to run multiple AI tasks concurrently. Agno supports async operations out of the box, allowing you to:\n", - "- Run multiple agent tasks in parallel\n", - "- Improve response times for complex queries\n", - "- Handle multiple user requests simultaneously\n", - "- Build scalable AI applications\n", - "\n", - "This example shows how to run three different agent tasks concurrently using Python's asyncio.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17e09033", - "metadata": {}, - "outputs": [], - "source": [ - "async def demonstrate_async_operations():\n", - " \"\"\"Demonstrate async operations with Agno agents.\"\"\"\n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"ASYNC OPERATIONS\")\n", - " print(\"=\" * 60)\n", - "\n", - " try:\n", - " # Create async tasks with different agents\n", - " agent = Agent(model=OpenAIChat(id=MODEL_ID))\n", - "\n", - " # Define async tasks\n", - " async def task1():\n", - " response = await agent.arun(\"Explain Python in one paragraph\")\n", - " return f\"Task 1: {response.content}\"\n", - "\n", - " async def task2():\n", - " response = await agent.arun(\"Explain JavaScript in one paragraph\")\n", - " return f\"Task 2: {response.content}\"\n", - "\n", - " async def task3():\n", - " response = await agent.arun(\"Compare them briefly\")\n", - " return f\"Task 3: {response.content}\"\n", - "\n", - " # Run tasks concurrently\n", - " print(\"Running async tasks concurrently...\")\n", - " results = await asyncio.gather(task1(), task2(), task3())\n", - "\n", - " for result in results:\n", - " print(result)\n", - " print()\n", - "\n", - " except Exception as e:\n", - " print(f\"Async operations error: {e}\")" - ] - }, - { - "cell_type": "raw", - "id": "0df119bd", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Running the Complete Tutorial\n", - "\n", - "The main function orchestrates all the demonstrations, showing how different Agno features work together. Each section is wrapped in error handling to ensure the tutorial continues even if one section encounters issues.\n", - "\n", - "### What Happens When You Run This:\n", - "1. Environment variables are checked\n", - "2. AgentOps begins tracking all agent activities\n", - "3. Each demonstration runs in sequence\n", - "4. Results are displayed in the console\n", - "5. All activities are logged to your AgentOps dashboard\n", - "\n", - "### Viewing Results in AgentOps:\n", - "After running this tutorial, visit your AgentOps dashboard to see:\n", - "- Agent conversation traces\n", - "- Tool usage analytics\n", - "- Performance metrics\n", - "- Error tracking\n", - "- Team collaboration patterns\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b617da0c", - "metadata": {}, - "outputs": [], - "source": [ - "async def main():\n", - " \"\"\"Main function to run all Agno demonstrations.\"\"\"\n", - " print(\"Starting Comprehensive Agno Example with AgentOps\")\n", - " print(\"=\" * 80)\n", - "\n", - " # Check environment\n", - " if not check_environment():\n", - " return\n", - "\n", - " # Run all demonstrations\n", - " print(\"\\nRunning all Agno demonstrations...\")\n", - "\n", - " # Research teams\n", - " try:\n", - " demonstrate_research_team()\n", - " except Exception as e:\n", - " print(f\"Skipping research team demo due to: {e}\")\n", - "\n", - " # Basic functionality\n", - " demonstrate_basic_agents()\n", - "\n", - " # Tool integration\n", - " try:\n", - " demonstrate_tool_integration()\n", - " except Exception as e:\n", - " print(f\"Skipping tool integration demo due to: {e}\")\n", - "\n", - " # Workflows\n", - " try:\n", - " demonstrate_workflows()\n", - " except Exception as e:\n", - " print(f\"Skipping workflow demo due to: {e}\")\n", - "\n", - " # Async operations\n", - " try:\n", - " await demonstrate_async_operations()\n", - " except Exception as e:\n", - " print(f\"Skipping async demo due to: {e}\")\n", - "\n", - " print(\"\\nAll Agno demonstrations completed!\")\n", - " print(\"Check your AgentOps dashboard for detailed traces and metrics.\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5da195a4", - "metadata": {}, - "outputs": [], - "source": [ - "if __name__ == \"__main__\":\n", - " asyncio.run(main())" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "## Summary and Next Steps\n", - "\n", - "Congratulations! You've explored the key features of the Agno framework:\n", - "\n", - "### What We Covered:\n", - "- ✅ Creating and coordinating basic agents and teams\n", - "- ✅ Integrating tools and knowledge bases with RAG\n", - "- ✅ Building efficient workflows with caching\n", - "- ✅ Creating collaborative research teams with specialized agents\n", - "- ✅ Running agents asynchronously for better performance\n", - "\n", - "### Next Steps:\n", - "1. **Experiment with Different Models**: Try using different LLMs (GPT-4, Claude, etc.)\n", - "2. **Add More Tools**: Explore Agno's extensive tool library\n", - "3. **Build Custom Workflows**: Create workflows for your specific use cases\n", - "4. **Scale Your Teams**: Add more agents with specialized roles\n", - "5. **Monitor with AgentOps**: Use the dashboard to optimize your agents\n", - "\n", - "### Resources:\n", - "- [Agno Documentation](https://docs.agno.com)\n", - "- [AgentOps Dashboard](https://app.agentops.ai)\n", - "- [More Examples](https://github.com/agno-ai/agno/tree/main/examples)\n", - "\n", - "Happy building with Agno! 🚀\n" - ] - } - ], - "metadata": { - "jupytext": { - "cell_metadata_filter": "-all", - "main_language": "python", - "notebook_metadata_filter": "-all" - }, - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From 91d6113541be7211868f7482372d3e488eee303c Mon Sep 17 00:00:00 2001 From: Pratyush Shukla Date: Fri, 13 Jun 2025 01:32:27 +0530 Subject: [PATCH 14/14] import agentops before library import --- examples/agno/agno_research_team.ipynb | 9 +++++---- examples/agno/agno_tool_integrations.ipynb | 19 ++++++------------- examples/agno/agno_workflow_setup.ipynb | 14 +++++++------- 3 files changed, 18 insertions(+), 24 deletions(-) diff --git a/examples/agno/agno_research_team.ipynb b/examples/agno/agno_research_team.ipynb index be0764f94..1f14a77fb 100644 --- a/examples/agno/agno_research_team.ipynb +++ b/examples/agno/agno_research_team.ipynb @@ -75,13 +75,15 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "08978603", "metadata": {}, "outputs": [], "source": [ "import os\n", "from textwrap import dedent\n", + "\n", + "import agentops\n", "from agno.agent import Agent\n", "from agno.team import Team\n", "from agno.tools.googlesearch import GoogleSearchTools\n", @@ -89,13 +91,12 @@ "from agno.tools.arxiv import ArxivTools\n", "from agno.tools.duckduckgo import DuckDuckGoTools\n", "from agno.models.openai import OpenAIChat\n", - "import agentops\n", "from dotenv import load_dotenv" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "143be722", "metadata": {}, "outputs": [], @@ -103,7 +104,7 @@ "# Load environment variables\n", "load_dotenv()\n", "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")\n", - "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_agentops_api_key_here\")\n" + "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_agentops_api_key_here\")" ] }, { diff --git a/examples/agno/agno_tool_integrations.ipynb b/examples/agno/agno_tool_integrations.ipynb index cfdea9e56..1d3ac06e6 100644 --- a/examples/agno/agno_tool_integrations.ipynb +++ b/examples/agno/agno_tool_integrations.ipynb @@ -39,26 +39,19 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "id": "208656e8", "metadata": {}, "outputs": [], "source": [ "import os\n", + "from dotenv import load_dotenv\n", + "\n", + "import agentops\n", "from agno.agent import Agent\n", "from agno.models.openai import OpenAIChat\n", - "import agentops\n", - "from dotenv import load_dotenv" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "0cc22e40", - "metadata": {}, - "outputs": [], - "source": [ - "# Knowledge and RAG components\n", + "\n", + "# Knowledge & RAG components\n", "from agno.knowledge.url import UrlKnowledge\n", "from agno.vectordb.lancedb import LanceDb\n", "from agno.vectordb.search import SearchType\n", diff --git a/examples/agno/agno_workflow_setup.ipynb b/examples/agno/agno_workflow_setup.ipynb index 64a5017dd..107792d46 100644 --- a/examples/agno/agno_workflow_setup.ipynb +++ b/examples/agno/agno_workflow_setup.ipynb @@ -37,18 +37,18 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "ee482533", "metadata": { "lines_to_next_cell": 2 }, "outputs": [], "source": [ - "from agno.agent import Agent, RunResponse\n", "import os\n", - "import asyncio\n", - "import agentops\n", "from dotenv import load_dotenv\n", + "\n", + "import agentops\n", + "from agno.agent import Agent, RunResponse\n", "from agno.workflow import Workflow\n", "from agno.utils.pprint import pprint_run_response\n", "from agno.models.openai import OpenAIChat\n", @@ -58,7 +58,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "eddbb872", "metadata": {}, "outputs": [], @@ -66,7 +66,7 @@ "# Load environment variables\n", "load_dotenv()\n", "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")\n", - "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_agentops_api_key_here\")\n" + "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_agentops_api_key_here\")" ] }, { @@ -76,7 +76,7 @@ "metadata": {}, "outputs": [], "source": [ - "agentops.init(auto_start_session=False, tags=[\"agno-example\", \"workflow-setup\"])\n" + "agentops.init(auto_start_session=False, tags=[\"agno-example\", \"workflow-setup\"])" ] }, {