diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py
index b7916e62a..d4e271f3d 100644
--- a/agentops/instrumentation/__init__.py
+++ b/agentops/instrumentation/__init__.py
@@ -183,8 +183,8 @@ class InstrumentorConfig(TypedDict):
"min_version": "0.1.0",
},
"google.genai": {
- "module_name": "agentops.instrumentation.google_generativeai",
- "class_name": "GoogleGenerativeAIInstrumentor",
+ "module_name": "agentops.instrumentation.google_genai",
+ "class_name": "GoogleGenAIInstrumentor",
"min_version": "0.1.0",
"package_name": "google-genai", # Actual pip package name
},
diff --git a/agentops/instrumentation/google_generativeai/README.md b/agentops/instrumentation/google_genai/README.md
similarity index 100%
rename from agentops/instrumentation/google_generativeai/README.md
rename to agentops/instrumentation/google_genai/README.md
diff --git a/agentops/instrumentation/google_generativeai/__init__.py b/agentops/instrumentation/google_genai/__init__.py
similarity index 87%
rename from agentops/instrumentation/google_generativeai/__init__.py
rename to agentops/instrumentation/google_genai/__init__.py
index f4faf7d65..6a7ee24fa 100644
--- a/agentops/instrumentation/google_generativeai/__init__.py
+++ b/agentops/instrumentation/google_genai/__init__.py
@@ -31,10 +31,10 @@ def get_version() -> str:
logger = logging.getLogger(__name__)
# Import after defining constants to avoid circular imports
-from agentops.instrumentation.google_generativeai.instrumentor import GoogleGenerativeAIInstrumentor # noqa: E402
+from agentops.instrumentation.google_genai.instrumentor import GoogleGenAIInstrumentor # noqa: E402
__all__ = [
"LIBRARY_NAME",
"LIBRARY_VERSION",
- "GoogleGenerativeAIInstrumentor",
+ "GoogleGenAIInstrumentor",
]
diff --git a/agentops/instrumentation/google_generativeai/attributes/__init__.py b/agentops/instrumentation/google_genai/attributes/__init__.py
similarity index 70%
rename from agentops/instrumentation/google_generativeai/attributes/__init__.py
rename to agentops/instrumentation/google_genai/attributes/__init__.py
index 243549c99..94407d6cb 100644
--- a/agentops/instrumentation/google_generativeai/attributes/__init__.py
+++ b/agentops/instrumentation/google_genai/attributes/__init__.py
@@ -1,16 +1,16 @@
"""Attribute extractors for Google Generative AI instrumentation."""
-from agentops.instrumentation.google_generativeai.attributes.common import (
+from agentops.instrumentation.google_genai.attributes.common import (
get_common_instrumentation_attributes,
extract_request_attributes,
)
-from agentops.instrumentation.google_generativeai.attributes.model import (
+from agentops.instrumentation.google_genai.attributes.model import (
get_model_attributes,
get_generate_content_attributes,
get_stream_attributes,
get_token_counting_attributes,
)
-from agentops.instrumentation.google_generativeai.attributes.chat import (
+from agentops.instrumentation.google_genai.attributes.chat import (
get_chat_attributes,
)
diff --git a/agentops/instrumentation/google_generativeai/attributes/chat.py b/agentops/instrumentation/google_genai/attributes/chat.py
similarity index 96%
rename from agentops/instrumentation/google_generativeai/attributes/chat.py
rename to agentops/instrumentation/google_genai/attributes/chat.py
index cc29856d9..7b9c3a8ac 100644
--- a/agentops/instrumentation/google_generativeai/attributes/chat.py
+++ b/agentops/instrumentation/google_genai/attributes/chat.py
@@ -5,11 +5,11 @@
from agentops.logging import logger
from agentops.semconv import SpanAttributes, LLMRequestTypeValues, MessageAttributes
from agentops.instrumentation.common.attributes import AttributeMap
-from agentops.instrumentation.google_generativeai.attributes.common import (
+from agentops.instrumentation.google_genai.attributes.common import (
extract_request_attributes,
get_common_instrumentation_attributes,
)
-from agentops.instrumentation.google_generativeai.attributes.model import (
+from agentops.instrumentation.google_genai.attributes.model import (
_extract_content_from_prompt,
_set_response_attributes,
)
diff --git a/agentops/instrumentation/google_generativeai/attributes/common.py b/agentops/instrumentation/google_genai/attributes/common.py
similarity index 97%
rename from agentops/instrumentation/google_generativeai/attributes/common.py
rename to agentops/instrumentation/google_genai/attributes/common.py
index 4e2b67d5b..da158d291 100644
--- a/agentops/instrumentation/google_generativeai/attributes/common.py
+++ b/agentops/instrumentation/google_genai/attributes/common.py
@@ -9,7 +9,7 @@
get_common_attributes,
_extract_attributes_from_mapping,
)
-from agentops.instrumentation.google_generativeai import LIBRARY_NAME, LIBRARY_VERSION
+from agentops.instrumentation.google_genai import LIBRARY_NAME, LIBRARY_VERSION
# Common mapping for config parameters
REQUEST_CONFIG_ATTRIBUTES: AttributeMap = {
diff --git a/agentops/instrumentation/google_generativeai/attributes/model.py b/agentops/instrumentation/google_genai/attributes/model.py
similarity index 99%
rename from agentops/instrumentation/google_generativeai/attributes/model.py
rename to agentops/instrumentation/google_genai/attributes/model.py
index 8082d4263..022a4fbac 100644
--- a/agentops/instrumentation/google_generativeai/attributes/model.py
+++ b/agentops/instrumentation/google_genai/attributes/model.py
@@ -5,7 +5,7 @@
from agentops.logging import logger
from agentops.semconv import SpanAttributes, LLMRequestTypeValues, MessageAttributes
from agentops.instrumentation.common.attributes import AttributeMap
-from agentops.instrumentation.google_generativeai.attributes.common import (
+from agentops.instrumentation.google_genai.attributes.common import (
extract_request_attributes,
get_common_instrumentation_attributes,
)
diff --git a/agentops/instrumentation/google_generativeai/instrumentor.py b/agentops/instrumentation/google_genai/instrumentor.py
similarity index 96%
rename from agentops/instrumentation/google_generativeai/instrumentor.py
rename to agentops/instrumentation/google_genai/instrumentor.py
index 85d93e972..023cd5add 100644
--- a/agentops/instrumentation/google_generativeai/instrumentor.py
+++ b/agentops/instrumentation/google_genai/instrumentor.py
@@ -16,12 +16,12 @@
from agentops.logging import logger
from agentops.instrumentation.common.wrappers import WrapConfig, wrap, unwrap
-from agentops.instrumentation.google_generativeai import LIBRARY_NAME, LIBRARY_VERSION
-from agentops.instrumentation.google_generativeai.attributes.model import (
+from agentops.instrumentation.google_genai import LIBRARY_NAME, LIBRARY_VERSION
+from agentops.instrumentation.google_genai.attributes.model import (
get_generate_content_attributes,
get_token_counting_attributes,
)
-from agentops.instrumentation.google_generativeai.stream_wrapper import (
+from agentops.instrumentation.google_genai.stream_wrapper import (
generate_content_stream_wrapper,
generate_content_stream_async_wrapper,
)
@@ -96,7 +96,7 @@
]
-class GoogleGenerativeAIInstrumentor(BaseInstrumentor):
+class GoogleGenAIInstrumentor(BaseInstrumentor):
"""An instrumentor for Google Generative AI (Gemini) API.
This class provides instrumentation for Google's Generative AI API by wrapping key methods
diff --git a/agentops/instrumentation/google_generativeai/stream_wrapper.py b/agentops/instrumentation/google_genai/stream_wrapper.py
similarity index 98%
rename from agentops/instrumentation/google_generativeai/stream_wrapper.py
rename to agentops/instrumentation/google_genai/stream_wrapper.py
index 61868ecbc..9b61cee62 100644
--- a/agentops/instrumentation/google_generativeai/stream_wrapper.py
+++ b/agentops/instrumentation/google_genai/stream_wrapper.py
@@ -14,11 +14,11 @@
from agentops.semconv import SpanAttributes, LLMRequestTypeValues, CoreAttributes, MessageAttributes
from agentops.instrumentation.common.wrappers import _with_tracer_wrapper
-from agentops.instrumentation.google_generativeai.attributes.model import (
+from agentops.instrumentation.google_genai.attributes.model import (
get_generate_content_attributes,
get_stream_attributes,
)
-from agentops.instrumentation.google_generativeai.attributes.common import (
+from agentops.instrumentation.google_genai.attributes.common import (
extract_request_attributes,
)
diff --git a/docs/mint.json b/docs/mint.json
index 3c40b208e..f2c0865f7 100644
--- a/docs/mint.json
+++ b/docs/mint.json
@@ -167,8 +167,9 @@
{
"group": "Integrations",
"pages": [
- "v2/integrations/anthropic",
+ "v2/integrations/ag2",
"v2/integrations/autogen",
+ "v2/integrations/anthropic",
"v2/integrations/crewai",
"v2/integrations/google_adk",
"v2/integrations/gemini",
@@ -188,7 +189,9 @@
"v2/usage/advanced-configuration",
"v2/usage/tracking-llm-calls",
"v2/usage/tracking-agents",
- "v2/usage/recording-operations"
+ "v2/usage/recording-operations",
+ "v2/usage/trace-decorator",
+ "v2/usage/manual-trace-control"
],
"version": "v2"
},
diff --git a/docs/v1/examples/examples.mdx b/docs/v1/examples/examples.mdx
index 040c3369b..7466084b9 100644
--- a/docs/v1/examples/examples.mdx
+++ b/docs/v1/examples/examples.mdx
@@ -21,7 +21,7 @@ mode: "wide"
Manage multiple sessions at the same time
- } iconType="image" href="/v1/integrations/openai" href="/v1/examples/openai_assistants">
+ } iconType="image" href="/v1/integrations/openai" href="/v1/examples/openai_assistants">
Observe OpenAI Assistants
@@ -33,19 +33,19 @@ mode: "wide"
Integration with AI21's language models
- } iconType="image" href="/v1/integrations/anthropic">
+ } iconType="image" href="/v1/integrations/anthropic">
Track observations from Claude, Haiku and Sonnet series of models
- } iconType="image" href="/v1/integrations/autogen">
+ } iconType="image" href="/v1/integrations/autogen">
AG2 (Formerly AutoGen) multi-agent conversible workflow with tool usage
- } iconType="image" href="/v1/examples/camel">
+ } iconType="image" href="/v1/examples/camel">
Track and analyze CAMEL agents
- } iconType="image" href="/v1/integrations/cohere">
+ } iconType="image" href="/v1/integrations/cohere">
First class support for Command-R-Plus and chat streaming
@@ -57,15 +57,15 @@ mode: "wide"
Ultra-fast LLM inference with Groq Cloud
- } iconType="image" href="/v1/integrations/gemini">
+ } iconType="image" href="/v1/integrations/gemini">
Explore Google DeepMind's Gemini with observation via AgentOps
- } iconType="image" href="/v1/integrations/haystack">
+ } iconType="image" href="/v1/integrations/haystack">
Monitor your Haystack agents with AgentOps
- } iconType="image" href="/v1/examples/langchain">
+ } iconType="image" href="/v1/examples/langchain">
Jupyter Notebook with a sample LangChain integration
@@ -77,7 +77,7 @@ mode: "wide"
Unified interface for multiple LLM providers
- } iconType="image" href="/v1/integrations/mistral">
+ } iconType="image" href="/v1/integrations/mistral">
Support for Mistral AI's open-weight models
@@ -85,11 +85,11 @@ mode: "wide"
Create an autonomous browser agent capable of navigating the web and extracting information
- } iconType="image" href="/v1/examples/ollama">
+ } iconType="image" href="/v1/examples/ollama">
Simple Ollama integration with AgentOps
- } iconType="image" href="/v1/integrations/openai">
+ } iconType="image" href="/v1/integrations/openai">
First class support for GPT family of models
@@ -97,19 +97,19 @@ mode: "wide"
Create a REST server that performs and observes agent tasks
- } iconType="image" iconType="solid" href="/v1/integrations/smolagents">
+ } iconType="image" iconType="solid" href="/v1/integrations/smolagents">
Track HuggingFace's smolagents with AgentOps seamlessly
- } iconType="image" href="/v1/integrations/swarmzero">
+ } iconType="image" href="/v1/integrations/swarmzero">
SwarmZero multi-agent framework for AI Agents and AI Swarms with AgentOps support
- } iconType="image" href="/v1/integrations/taskweaver">
+ } iconType="image" href="/v1/integrations/taskweaver">
First class support for Microsoft TaskWeaver
- } iconType="image" href="/v1/integrations/xai">
+ } iconType="image" href="/v1/integrations/xai">
Observe the power of Grok and Grok Vision with AgentOps
diff --git a/docs/v1/introduction.mdx b/docs/v1/introduction.mdx
index 470a6b405..38f563c01 100644
--- a/docs/v1/introduction.mdx
+++ b/docs/v1/introduction.mdx
@@ -7,13 +7,13 @@ mode: "wide"
## Integrate with developer favorite agent frameworks
- } iconType="image" href="/v1/integrations/agentssdk" />
- } iconType="image" href="/v1/integrations/crewai" />
- } iconType="image" href="/v1/integrations/autogen" />
- } iconType="image" href="/v1/integrations/autogen" />
- } iconType="image" href="/v1/integrations/anthropic" />
- } iconType="image" href="/v1/integrations/ollama" />
- } iconType="image" href="/v1/integrations/cohere" />
+ } iconType="image" href="/v1/integrations/agentssdk" />
+ } iconType="image" href="/v1/integrations/crewai" />
+ } iconType="image" href="/v1/integrations/autogen" />
+ } iconType="image" href="/v1/integrations/autogen" />
+ } iconType="image" href="/v1/integrations/anthropic" />
+ } iconType="image" href="/v1/integrations/ollama" />
+ } iconType="image" href="/v1/integrations/cohere" />
@@ -42,12 +42,12 @@ You also get helpful debugging info such as any SDK versions you were on if you'
LLM calls are presented as a familiar chat history view, and charts give you a breakdown of the types of events that were called and how long they took.
-
+
Find any past sessions from your Session Drawer.
-
+
Most powerful of all is the Session Waterfall. On the left, a time visualization of all your LLM calls, Action events, Tool calls, and Errors.
@@ -55,14 +55,14 @@ On the right, specific details about the event you've selected on the waterfall.
Most of which has been automatically recorded for you.
-
+
### Session Overview
View a meta-analysis of all of your sessions in a single view.
-
+
diff --git a/docs/v2/examples/examples.mdx b/docs/v2/examples/examples.mdx
index 7fca0428d..b36f990d5 100644
--- a/docs/v2/examples/examples.mdx
+++ b/docs/v2/examples/examples.mdx
@@ -13,7 +13,7 @@ mode: "wide"
Tracking operations from multiple different agents
- } iconType="image" href="/v2/examples/openai_assistants">
+ } iconType="image" href="/v2/examples/openai_assistants">
Observe OpenAI Assistants with AgentOps
@@ -21,7 +21,7 @@ mode: "wide"
Basic usage with OpenAI API, perfect for getting started
- } iconType="image" href="/v2/integrations/agentssdk">
+ } iconType="image" href="/v2/integrations/agentssdk">
Monitor multi-agent workflows with handoffs and tool usage
@@ -29,11 +29,11 @@ mode: "wide"
### Other Integrations Documentation
- } iconType="image" href="/v2/integrations/anthropic">
+ } iconType="image" href="/v2/integrations/anthropic">
Track observations from Claude, Haiku and Sonnet series of models
- } iconType="image" href="/v2/integrations/autogen">
+ } iconType="image" href="/v2/integrations/autogen">
Autogen/AG2 multi-agent conversible workflow with tool usage
@@ -41,19 +41,19 @@ mode: "wide"
CrewAI multi-agent framework with AgentOps support
- } iconType="image" href="/v2/integrations/gemini">
+ } iconType="image" href="/v2/integrations/gemini">
Explore Google DeepMind's Gemini with observation via AgentOps
- } iconType="image" href="/v2/integrations/google_adk">
+ } iconType="image" href="/v2/integrations/google_adk">
Track and analyze your Google Agent Development Kit (ADK) AI agents
- } iconType="image" href="/v2/integrations/ibm_watsonx_ai">
+ } iconType="image" href="/v2/integrations/ibm_watsonx_ai">
Track and analyze your IBM Watsonx.ai model interactions
- } iconType="image" href="/v2/integrations/langchain">
+ } iconType="image" href="/v2/integrations/langchain">
First-class support for LangChain agents and chains
@@ -61,11 +61,11 @@ mode: "wide"
Unified interface for multiple LLM providers
- } iconType="image" href="/v2/integrations/openai">
+ } iconType="image" href="/v2/integrations/openai">
First class support for GPT family of models
- } iconType="image" href="/v2/integrations/agentssdk">
+ } iconType="image" href="/v2/integrations/agentssdk">
Monitor OpenAI Agents SDK multi-agent workflows with handoffs
diff --git a/docs/v2/integrations/ag2.mdx b/docs/v2/integrations/ag2.mdx
new file mode 100644
index 000000000..33a243777
--- /dev/null
+++ b/docs/v2/integrations/ag2.mdx
@@ -0,0 +1,159 @@
+---
+title: AG2
+description: "Track and analyze your AG2 agents with AgentOps"
+---
+
+import CodeTooltip from '/snippets/add-code-tooltip.mdx'
+import EnvTooltip from '/snippets/add-env-tooltip.mdx'
+
+## Installation
+
+
+ ```bash pip
+ pip install agentops pyautogen
+ ```
+ ```bash poetry
+ poetry add agentops pyautogen
+ ```
+
+
+## Usage
+
+Initialize AgentOps at the beginning of your application to automatically track all AG2 agent interactions:
+
+
+```python Python
+import agentops
+import autogen
+
+# Initialize AgentOps
+agentops.init()
+
+# Configure your AG2 agents
+config_list = [
+ {
+ "model": "gpt-4",
+ "api_key": ""
+ }
+]
+
+llm_config = {
+ "config_list": config_list,
+ "timeout": 60,
+}
+
+# Create AG2 agents
+assistant = autogen.AssistantAgent(
+ name="assistant",
+ llm_config=llm_config,
+ system_message="You are a helpful AI assistant."
+)
+
+user_proxy = autogen.UserProxyAgent(
+ name="user_proxy",
+ human_input_mode="TERMINATE",
+ max_consecutive_auto_reply=10,
+ is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
+ code_execution_config={"last_n_messages": 3, "work_dir": "coding"},
+)
+
+# Initiate a conversation
+user_proxy.initiate_chat(
+ assistant,
+ message="How can I implement a basic web scraper in Python?"
+)
+
+# All agent interactions are automatically tracked by AgentOps
+```
+
+
+## Multi-Agent Conversation Example
+
+AgentOps tracks interactions across multiple AG2 agents:
+
+
+```python Python
+import agentops
+import autogen
+
+# Initialize AgentOps
+agentops.init()
+
+# Configure LLM
+config_list = [
+ {
+ "model": "gpt-4",
+ "api_key": ""
+ }
+]
+
+llm_config = {
+ "config_list": config_list,
+ "timeout": A 60,
+}
+
+# Create a team of agents
+researcher = autogen.AssistantAgent(
+ name="researcher",
+ llm_config=llm_config,
+ system_message="You are a researcher who specializes in finding accurate information."
+)
+
+coder = autogen.AssistantAgent(
+ name="coder",
+ llm_config=llm_config,
+ system_message="You are an expert programmer who writes clean, efficient code."
+)
+
+critic = autogen.AssistantAgent(
+ name="critic",
+ llm_config=llm_config,
+ system_message="You review solutions and provide constructive feedback."
+)
+
+user_proxy = autogen.UserProxyAgent(
+ name="user_proxy",
+ human_input_mode="TERMINATE",
+ max_consecutive_auto_reply=10,
+ is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
+ code_execution_config={"last_n_messages": 3, "work_dir": "coding"},
+)
+
+# Create a group chat
+groupchat = autogen.GroupChat(
+ agents=[user_proxy, researcher, coder, critic],
+ messages=[],
+ max_round=12
+)
+
+manager = autogen.GroupChatManager(
+ groupchat=groupchat,
+ llm_config=llm_config
+)
+
+# Initiate the group chat
+user_proxy.initiate_chat(
+ manager,
+ message="Create a Python program to analyze sentiment from Twitter data."
+)
+
+# All agent interactions across the group chat are automatically tracked by AgentOps
+```
+
+
+## Environment Variables
+
+
+
+ ```python .env
+ AGENTOPS_API_KEY=
+ OPENAI_API_KEY=
+ ```
+
+
+Read more about environment variables in [Advanced Configuration](/v2/usage/advanced-configuration)
+
+
+
+
+
diff --git a/docs/v2/integrations/autogen.mdx b/docs/v2/integrations/autogen.mdx
index a8bf137bc..ed5fabfcf 100644
--- a/docs/v2/integrations/autogen.mdx
+++ b/docs/v2/integrations/autogen.mdx
@@ -1,159 +1,454 @@
---
-title: AutoGen
-description: "Track and analyze your AutoGen agents with AgentOps"
+title: "AutoGen"
+description: "Integrate AgentOps with Microsoft AutoGen for multi-agent workflow tracking"
---
-import CodeTooltip from '/snippets/add-code-tooltip.mdx'
-import EnvTooltip from '/snippets/add-env-tooltip.mdx'
+[AutoGen](https://microsoft.github.io/autogen/stable/) is Microsoft's framework for building multi-agent conversational AI systems. AgentOps provides seamless integration with AutoGen to track and monitor your multi-agent workflows.
-## Installation
+## Quick Start
- ```bash pip
- pip install agentops pyautogen
- ```
- ```bash poetry
- poetry add agentops pyautogen
- ```
+```bash pip
+pip install agentops autogen-core python-dotenv
+```
+```bash poetry
+poetry add agentops autogen-core python-dotenv
+```
-## Usage
+## Basic Integration
-Initialize AgentOps at the beginning of your application to automatically track all AutoGen agent interactions:
+AgentOps automatically instruments AutoGen agents and tracks their interactions. Simply initialize AgentOps before creating your AutoGen agents:
-
-```python Python
+
+**š Automatic Telemetry Integration**: AgentOps automatically picks up spans from AutoGen's built-in OpenTelemetry integration. No additional instrumentation is required - just initialize AgentOps and your AutoGen agents will be automatically tracked.
+
+
+```python
+import asyncio
+from dataclasses import dataclass
+from typing import Callable
+from dotenv import load_dotenv
import agentops
-import autogen
-# Initialize AgentOps
-agentops.init()
-
-# Configure your AutoGen agents
-config_list = [
- {
- "model": "gpt-4",
- "api_key": ""
- }
-]
-
-llm_config = {
- "config_list": config_list,
- "timeout": 60,
-}
-
-# Create AutoGen agents
-assistant = autogen.AssistantAgent(
- name="assistant",
- llm_config=llm_config,
- system_message="You are a helpful AI assistant."
+from autogen_core import (
+ DefaultTopicId,
+ MessageContext,
+ RoutedAgent,
+ default_subscription,
+ message_handler,
+ AgentId,
+ SingleThreadedAgentRuntime
)
-user_proxy = autogen.UserProxyAgent(
- name="user_proxy",
- human_input_mode="TERMINATE",
- max_consecutive_auto_reply=10,
- is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
- code_execution_config={"last_n_messages": 3, "work_dir": "coding"},
-)
+# Load environment variables
+load_dotenv()
-# Initiate a conversation
-user_proxy.initiate_chat(
- assistant,
- message="How can I implement a basic web scraper in Python?"
-)
+# Initialize AgentOps - this will automatically track AutoGen agents
+agentops.init()
+
+@dataclass
+class CountdownMessage:
+ """Message containing a number for countdown operations"""
+ content: int
+
+@default_subscription
+class ModifierAgent(RoutedAgent):
+ """Agent that modifies numbers by applying a transformation function"""
+
+ def __init__(self, modify_val: Callable[[int], int]) -> None:
+ super().__init__("A modifier agent that transforms numbers.")
+ self._modify_val = modify_val
+
+ @message_handler
+ async def handle_message(self, message: CountdownMessage, ctx: MessageContext) -> None:
+ """Handle incoming messages and apply modification"""
+ original_val = message.content
+ modified_val = self._modify_val(original_val)
+
+ print(f"š§ ModifierAgent: Transformed {original_val} ā {modified_val}")
+
+ # Publish the modified value to continue the workflow
+ await self.publish_message(
+ CountdownMessage(content=modified_val),
+ DefaultTopicId()
+ )
+
+@default_subscription
+class CheckerAgent(RoutedAgent):
+ """Agent that checks if a condition is met and decides whether to continue"""
+
+ def __init__(self, stop_condition: Callable[[int], bool]) -> None:
+ super().__init__("A checker agent that validates conditions.")
+ self._stop_condition = stop_condition
+
+ @message_handler
+ async def handle_message(self, message: CountdownMessage, ctx: MessageContext) -> None:
+ """Handle incoming messages and check stopping condition"""
+ value = message.content
+
+ if not self._stop_condition(value):
+ print(f"ā
CheckerAgent: {value} passed validation, continuing workflow")
+ # Continue the workflow by publishing the message
+ await self.publish_message(
+ CountdownMessage(content=value),
+ DefaultTopicId()
+ )
+ else:
+ print(f"š CheckerAgent: {value} failed validation, stopping workflow")
+ print("š Countdown completed successfully!")
-# All agent interactions are automatically tracked by AgentOps
+async def run_countdown_workflow():
+ """Run a countdown workflow from 10 to 1 using AutoGen agents"""
+
+ print("š Starting AutoGen Countdown Workflow")
+ print("=" * 50)
+
+ # Create the AutoGen runtime
+ runtime = SingleThreadedAgentRuntime()
+
+ # Register the modifier agent (subtracts 1 from each number)
+ await ModifierAgent.register(
+ runtime,
+ "modifier",
+ lambda: ModifierAgent(modify_val=lambda x: x - 1),
+ )
+
+ # Register the checker agent (stops when value <= 1)
+ await CheckerAgent.register(
+ runtime,
+ "checker",
+ lambda: CheckerAgent(stop_condition=lambda x: x <= 1),
+ )
+
+ # Start the runtime
+ runtime.start()
+ print("š¤ AutoGen runtime started")
+ print("šØ Sending initial message with value: 10")
+
+ # Send initial message to start the countdown
+ await runtime.send_message(
+ CountdownMessage(10),
+ AgentId("checker", "default")
+ )
+
+ # Wait for the workflow to complete
+ await runtime.stop_when_idle()
+
+ print("=" * 50)
+ print("⨠Workflow completed! Check your AgentOps dashboard for detailed traces.")
+
+# Run the workflow
+if __name__ == "__main__":
+ asyncio.run(run_countdown_workflow())
```
-
-## Multi-Agent Conversation Example
+## Advanced Multi-Agent Example
-AgentOps tracks interactions across multiple AutoGen agents:
+Here's a more complex example showing a data processing pipeline with multiple specialized agents:
-
-```python Python
+```python
+import asyncio
+from dataclasses import dataclass
+from typing import List, Dict, Any
+from dotenv import load_dotenv
import agentops
-import autogen
-# Initialize AgentOps
-agentops.init()
-
-# Configure LLM
-config_list = [
- {
- "model": "gpt-4",
- "api_key": ""
- }
-]
-
-llm_config = {
- "config_list": config_list,
- "timeout": A 60,
-}
-
-# Create a team of agents
-researcher = autogen.AssistantAgent(
- name="researcher",
- llm_config=llm_config,
- system_message="You are a researcher who specializes in finding accurate information."
+from autogen_core import (
+ DefaultTopicId,
+ MessageContext,
+ RoutedAgent,
+ default_subscription,
+ message_handler,
+ AgentId,
+ SingleThreadedAgentRuntime
)
-coder = autogen.AssistantAgent(
- name="coder",
- llm_config=llm_config,
- system_message="You are an expert programmer who writes clean, efficient code."
-)
+# Load environment variables
+load_dotenv()
-critic = autogen.AssistantAgent(
- name="critic",
- llm_config=llm_config,
- system_message="You review solutions and provide constructive feedback."
-)
+# Initialize AgentOps
+agentops.init()
-user_proxy = autogen.UserProxyAgent(
- name="user_proxy",
- human_input_mode="TERMINATE",
- max_consecutive_auto_reply=10,
- is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
- code_execution_config={"last_n_messages": 3, "work_dir": "coding"},
-)
+@dataclass
+class DataMessage:
+ """Message containing data to be processed"""
+ data: List[Dict[str, Any]]
+ stage: str
+ metadata: Dict[str, Any]
-# Create a group chat
-groupchat = autogen.GroupChat(
- agents=[user_proxy, researcher, coder, critic],
- messages=[],
- max_round=12
-)
+@default_subscription
+class DataCollectorAgent(RoutedAgent):
+ """Agent responsible for collecting and preparing initial data"""
+
+ def __init__(self) -> None:
+ super().__init__("Data collector agent that gathers initial dataset.")
-manager = autogen.GroupChatManager(
- groupchat=groupchat,
- llm_config=llm_config
-)
+ @message_handler
+ async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None:
+ print(f"š DataCollector: Collecting data for {message.metadata.get('source', 'unknown')}")
+
+ # Simulate data collection
+ collected_data = [
+ {"id": 1, "value": 100, "category": "A"},
+ {"id": 2, "value": 200, "category": "B"},
+ {"id": 3, "value": 150, "category": "A"},
+ {"id": 4, "value": 300, "category": "C"},
+ ]
+
+ print(f"ā
DataCollector: Collected {len(collected_data)} records")
+
+ # Send to processor
+ await self.publish_message(
+ DataMessage(
+ data=collected_data,
+ stage="processing",
+ metadata={**message.metadata, "collected_count": len(collected_data)}
+ ),
+ DefaultTopicId()
+ )
-# Initiate the group chat
-user_proxy.initiate_chat(
- manager,
- message="Create a Python program to analyze sentiment from Twitter data."
-)
+@default_subscription
+class DataProcessorAgent(RoutedAgent):
+ """Agent that processes and transforms data"""
+
+ def __init__(self) -> None:
+ super().__init__("Data processor agent that transforms collected data.")
+
+ @message_handler
+ async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None:
+ if message.stage != "processing":
+ return
+
+ print(f"āļø DataProcessor: Processing {len(message.data)} records")
+
+ # Process data - add calculated fields
+ processed_data = []
+ for item in message.data:
+ processed_item = {
+ **item,
+ "processed_value": item["value"] * 1.1, # 10% increase
+ "status": "processed"
+ }
+ processed_data.append(processed_item)
+
+ print(f"ā
DataProcessor: Processed {len(processed_data)} records")
+
+ # Send to analyzer
+ await self.publish_message(
+ DataMessage(
+ data=processed_data,
+ stage="analysis",
+ metadata={**message.metadata, "processed_count": len(processed_data)}
+ ),
+ DefaultTopicId()
+ )
-# All agent interactions across the group chat are automatically tracked by AgentOps
+@default_subscription
+class DataAnalyzerAgent(RoutedAgent):
+ """Agent that analyzes processed data and generates insights"""
+
+ def __init__(self) -> None:
+ super().__init__("Data analyzer agent that generates insights.")
+
+ @message_handler
+ async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None:
+ if message.stage != "analysis":
+ return
+
+ print(f"š§ DataAnalyzer: Analyzing {len(message.data)} records")
+
+ # Perform analysis
+ total_value = sum(item["processed_value"] for item in message.data)
+ avg_value = total_value / len(message.data)
+ categories = set(item["category"] for item in message.data)
+
+ analysis_results = {
+ "total_records": len(message.data),
+ "total_value": total_value,
+ "average_value": avg_value,
+ "unique_categories": len(categories),
+ "categories": list(categories)
+ }
+
+ print(f"š DataAnalyzer: Analysis complete")
+ print(f" ⢠Total records: {analysis_results['total_records']}")
+ print(f" ⢠Average value: {analysis_results['average_value']:.2f}")
+ print(f" ⢠Categories: {', '.join(analysis_results['categories'])}")
+
+ # Send to reporter
+ await self.publish_message(
+ DataMessage(
+ data=message.data,
+ stage="reporting",
+ metadata={
+ **message.metadata,
+ "analysis": analysis_results
+ }
+ ),
+ DefaultTopicId()
+ )
+
+@default_subscription
+class ReportGeneratorAgent(RoutedAgent):
+ """Agent that generates final reports"""
+
+ def __init__(self) -> None:
+ super().__init__("Report generator agent that creates final output.")
+
+ @message_handler
+ async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None:
+ if message.stage != "reporting":
+ return
+
+ print(f"š ReportGenerator: Generating final report")
+
+ analysis = message.metadata.get("analysis", {})
+
+ report = f"""
+šÆ DATA PROCESSING REPORT
+========================
+Source: {message.metadata.get('source', 'Unknown')}
+Processing Date: {message.metadata.get('timestamp', 'Unknown')}
+
+š SUMMARY STATISTICS:
+⢠Total Records Processed: {analysis.get('total_records', 0)}
+⢠Total Value: ${analysis.get('total_value', 0):,.2f}
+⢠Average Value: ${analysis.get('average_value', 0):,.2f}
+⢠Unique Categories: {analysis.get('unique_categories', 0)}
+⢠Categories Found: {', '.join(analysis.get('categories', []))}
+
+ā
Processing pipeline completed successfully!
+ """
+
+ print(report)
+ print("š Multi-agent data processing workflow completed!")
+
+async def run_data_processing_pipeline():
+ """Run a complete data processing pipeline using multiple AutoGen agents"""
+
+ print("š Starting AutoGen Data Processing Pipeline")
+ print("=" * 60)
+
+ # Create runtime
+ runtime = SingleThreadedAgentRuntime()
+
+ # Register all agents
+ await DataCollectorAgent.register(
+ runtime,
+ "collector",
+ lambda: DataCollectorAgent(),
+ )
+
+ await DataProcessorAgent.register(
+ runtime,
+ "processor",
+ lambda: DataProcessorAgent(),
+ )
+
+ await DataAnalyzerAgent.register(
+ runtime,
+ "analyzer",
+ lambda: DataAnalyzerAgent(),
+ )
+
+ await ReportGeneratorAgent.register(
+ runtime,
+ "reporter",
+ lambda: ReportGeneratorAgent(),
+ )
+
+ # Start runtime
+ runtime.start()
+ print("š¤ AutoGen runtime with 4 agents started")
+
+ # Trigger the pipeline
+ initial_message = DataMessage(
+ data=[],
+ stage="collection",
+ metadata={
+ "source": "customer_database",
+ "timestamp": "2024-01-15T10:30:00Z",
+ "pipeline_id": "data_proc_001"
+ }
+ )
+
+ print("šØ Triggering data processing pipeline...")
+ await runtime.send_message(
+ initial_message,
+ AgentId("collector", "default")
+ )
+
+ # Wait for completion
+ await runtime.stop_when_idle()
+
+ print("=" * 60)
+ print("⨠Pipeline completed! Check AgentOps dashboard for detailed agent traces.")
+
+# Run the pipeline
+if __name__ == "__main__":
+ asyncio.run(run_data_processing_pipeline())
```
-
-## Environment Variables
+## What AgentOps Tracks
-
-
- ```python .env
- AGENTOPS_API_KEY=
- OPENAI_API_KEY=
- ```
-
+AgentOps leverages AutoGen's built-in OpenTelemetry integration to automatically capture comprehensive telemetry data:
+
+### Automatic Span Collection
+- **Agent Operations**: All agent message handling and processing operations
+- **Message Flow**: Complete trace of message routing between agents
+- **Runtime Events**: Agent registration, startup, and shutdown events
+- **Error Handling**: Automatic capture of exceptions and error states
+
+### Built-in Telemetry Integration
+AutoGen Core includes native OpenTelemetry support, and AgentOps seamlessly integrates with this telemetry system to provide:
+
+- **Zero-Configuration Tracking**: No manual instrumentation required
+- **Complete Workflow Visibility**: End-to-end trace of multi-agent interactions
+- **Performance Metrics**: Timing data for each agent operation and message exchange
+- **Distributed Tracing**: Support for agents running across different processes or machines
+
+## Best Practices
+
+1. **Initialize Early**: Call `agentops.init()` before creating any AutoGen agents
+2. **Use Descriptive Names**: Give your agents meaningful names for better dashboard visibility
+3. **Structure Messages**: Use well-defined message classes for better tracking
+4. **Handle Errors**: Implement proper error handling in your message handlers
+5. **Monitor Performance**: Use the AgentOps dashboard to identify bottlenecks in your agent workflows
+
+## Dashboard Features
+
+The AgentOps dashboard provides:
+
+- **Agent Network Visualization**: See how your agents communicate
+- **Message Trace Timeline**: Follow the complete flow of messages
+- **Performance Analytics**: Identify slow agents or bottlenecks
+- **Error Tracking**: Monitor and debug agent failures
+- **Cost Analysis**: Track computational costs across your agent network
+
+## Example Output
+
+When you run the examples above, you'll see output like:
+
+```
+š Starting AutoGen Countdown Workflow
+==================================================
+š¤ AutoGen runtime started
+šØ Sending initial message with value: 10
+ā
CheckerAgent: 10 passed validation, continuing workflow
+š§ ModifierAgent: Transformed 10 ā 9
+ā
CheckerAgent: 9 passed validation, continuing workflow
+š§ ModifierAgent: Transformed 9 ā 8
+...
+š CheckerAgent: 1 failed validation, stopping workflow
+š Countdown completed successfully!
+==================================================
+⨠Workflow completed! Check your AgentOps dashboard for detailed traces.
+```
-Read more about environment variables in [Advanced Configuration](/v2/usage/advanced-configuration)
+Visit your [AgentOps Dashboard](https://app.agentops.ai) to see detailed traces of your AutoGen agent interactions, performance metrics, and workflow analytics.
-
+
\ No newline at end of file
diff --git a/docs/v2/integrations/google_adk.mdx b/docs/v2/integrations/google_adk.mdx
index 9978d78dc..36cac1852 100644
--- a/docs/v2/integrations/google_adk.mdx
+++ b/docs/v2/integrations/google_adk.mdx
@@ -21,73 +21,162 @@ AgentOps provides seamless integration with [Google Agent Development Kit (ADK)]
```
-## Basic Usage
+## Usage
Initialize AgentOps at the beginning of your application to automatically track all Google ADK agent interactions:
-```python Basic Usage
-import asyncio
-import uuid
-import os
-from google.genai import types
+```python Usage
+# --- Full example code demonstrating LlmAgent with Tools vs. Output Schema ---
+import json # Needed for pretty printing dicts
+from google.adk.agents import LlmAgent
+from google.adk.runners import Runner
+from google.adk.sessions import InMemorySessionService
+from google.genai import types
+from pydantic import BaseModel, Field
+import asyncio
import agentops
-from google.adk.agents import Agent
-from google.adk.runners import InMemoryRunner
-from google.adk.agents.run_config import RunConfig, StreamingMode
-
-# Initialize AgentOps
-agentops.init(api_key="")
-
-# Create a simple agent with no tools
-agent = Agent(
- name="simple_agent",
- model="gemini-1.5-flash",
- instruction="You are a helpful assistant that provides clear and concise answers.",
+
+agentops.init("your-api-key")
+
+# --- 1. Define Constants ---
+APP_NAME = "agent_comparison_app"
+USER_ID = "test_user_456"
+SESSION_ID_TOOL_AGENT = "session_tool_agent_xyz"
+SESSION_ID_SCHEMA_AGENT = "session_schema_agent_xyz"
+MODEL_NAME = "gemini-2.0-flash"
+
+# --- 2. Define Schemas ---
+
+# Input schema used by both agents
+class CountryInput(BaseModel):
+ country: str = Field(description="The country to get information about.")
+
+# Output schema ONLY for the second agent
+class CapitalInfoOutput(BaseModel):
+ capital: str = Field(description="The capital city of the country.")
+ # Note: Population is illustrative; the LLM will infer or estimate this
+ # as it cannot use tools when output_schema is set.
+ population_estimate: str = Field(description="An estimated population of the capital city.")
+
+# --- 3. Define the Tool (Only for the first agent) ---
+def get_capital_city(country: str) -> str:
+ """Retrieves the capital city of a given country."""
+ print(f"\n-- Tool Call: get_capital_city(country='{country}') --")
+ country_capitals = {
+ "united states": "Washington, D.C.",
+ "canada": "Ottawa",
+ "france": "Paris",
+ "japan": "Tokyo",
+ }
+ result = country_capitals.get(country.lower(), f"Sorry, I couldn't find the capital for {country}.")
+ print(f"-- Tool Result: '{result}' --")
+ return result
+
+# --- 4. Configure Agents ---
+
+# Agent 1: Uses a tool and output_key
+capital_agent_with_tool = LlmAgent(
+ model=MODEL_NAME,
+ name="capital_agent_tool",
+ description="Retrieves the capital city using a specific tool.",
+ instruction="""You are a helpful agent that provides the capital city of a country using a tool.
+The user will provide the country name in a JSON format like {"country": "country_name"}.
+1. Extract the country name.
+2. Use the `get_capital_city` tool to find the capital.
+3. Respond clearly to the user, stating the capital city found by the tool.
+""",
+ tools=[get_capital_city],
+ input_schema=CountryInput,
+ output_key="capital_tool_result", # Store final text response
)
-# Create a runner
-runner = InMemoryRunner(
- agent=agent,
- app_name="simple-example",
+# Agent 2: Uses output_schema (NO tools possible)
+structured_info_agent_schema = LlmAgent(
+ model=MODEL_NAME,
+ name="structured_info_agent_schema",
+ description="Provides capital and estimated population in a specific JSON format.",
+ instruction=f"""You are an agent that provides country information.
+The user will provide the country name in a JSON format like {{"country": "country_name"}}.
+Respond ONLY with a JSON object matching this exact schema:
+{json.dumps(CapitalInfoOutput.model_json_schema(), indent=2)}
+Use your knowledge to determine the capital and estimate the population. Do not use any tools.
+""",
+ # *** NO tools parameter here - using output_schema prevents tool use ***
+ input_schema=CountryInput,
+ output_schema=CapitalInfoOutput, # Enforce JSON output structure
+ output_key="structured_info_result", # Store final JSON response
)
-# Setup session
-user_id = f"user-{uuid.uuid4().hex[:8]}"
-session_id = f"session-{uuid.uuid4().hex[:8]}"
-runner.session_service.create_session(
- app_name="simple-example",
- user_id=user_id,
- session_id=session_id,
+# --- 5. Set up Session Management and Runners ---
+session_service = InMemorySessionService()
+
+# Create a runner for EACH agent
+capital_runner = Runner(
+ agent=capital_agent_with_tool,
+ app_name=APP_NAME,
+ session_service=session_service
+)
+structured_runner = Runner(
+ agent=structured_info_agent_schema,
+ app_name=APP_NAME,
+ session_service=session_service
)
-# Run the agent with a user message
-async def run_agent():
- message = "What are three benefits of artificial intelligence?"
-
- content = types.Content(
- role="user",
- parts=[types.Part(text=message)],
- )
-
- run_config = RunConfig(
- streaming_mode=StreamingMode.NONE,
- )
+# --- 6. Define Agent Interaction Logic ---
+async def call_agent_and_print(
+ runner_instance: Runner,
+ agent_instance: LlmAgent,
+ session_id: str,
+ query_json: str
+):
+ """Sends a query to the specified agent/runner and prints results."""
+ print(f"\n>>> Calling Agent: '{agent_instance.name}' | Query: {query_json}")
+
+ user_content = types.Content(role='user', parts=[types.Part(text=query_json)])
+
+ final_response_content = "No final response received."
+ async for event in runner_instance.run_async(user_id=USER_ID, session_id=session_id, new_message=user_content):
+ # print(f"Event: {event.type}, Author: {event.author}") # Uncomment for detailed logging
+ if event.is_final_response() and event.content and event.content.parts:
+ # For output_schema, the content is the JSON string itself
+ final_response_content = event.content.parts[0].text
+
+ print(f"<<< Agent '{agent_instance.name}' Response: {final_response_content}")
+
+ current_session = await session_service.get_session(app_name=APP_NAME,
+ user_id=USER_ID,
+ session_id=session_id)
+ stored_output = current_session.state.get(agent_instance.output_key)
+
+ # Pretty print if the stored output looks like JSON (likely from output_schema)
+ print(f"--- Session State ['{agent_instance.output_key}']: ", end="")
+ try:
+ # Attempt to parse and pretty print if it's JSON
+ parsed_output = json.loads(stored_output)
+ print(json.dumps(parsed_output, indent=2))
+ except (json.JSONDecodeError, TypeError):
+ # Otherwise, print as string
+ print(stored_output)
+ print("-" * 30)
+
+
+# --- 7. Run Interactions ---
+async def main():
+ # Create sessions
+ await session_service.create_session(app_name=APP_NAME, user_id=USER_ID, session_id=SESSION_ID_TOOL_AGENT)
+ await session_service.create_session(app_name=APP_NAME, user_id=USER_ID, session_id=SESSION_ID_SCHEMA_AGENT)
- async for event in runner.run_async(
- user_id=user_id,
- session_id=session_id,
- new_message=content,
- run_config=run_config,
- ):
- if hasattr(event, 'content') and event.content and event.content.parts:
- for part in event.content.parts:
- if hasattr(part, 'text') and part.text:
- print(part.text)
-
-# Run the agent
-asyncio.run(run_agent())
+ print("--- Testing Agent with Tool ---")
+ await call_agent_and_print(capital_runner, capital_agent_with_tool, SESSION_ID_TOOL_AGENT, '{"country": "France"}')
+ await call_agent_and_print(capital_runner, capital_agent_with_tool, SESSION_ID_TOOL_AGENT, '{"country": "Canada"}')
+
+ print("\n\n--- Testing Agent with Output Schema (No Tool Use) ---")
+ await call_agent_and_print(structured_runner, structured_info_agent_schema, SESSION_ID_SCHEMA_AGENT, '{"country": "France"}')
+ await call_agent_and_print(structured_runner, structured_info_agent_schema, SESSION_ID_SCHEMA_AGENT, '{"country": "Japan"}')
+
+asyncio.run(main())
```
diff --git a/docs/v2/introduction.mdx b/docs/v2/introduction.mdx
index aa33e9188..9e0d1a900 100644
--- a/docs/v2/introduction.mdx
+++ b/docs/v2/introduction.mdx
@@ -19,7 +19,8 @@ mode: "wide"
} iconType="image" href="/v2/integrations/agentssdk" />
} iconType="image" href="/v2/integrations/crewai" />
- } iconType="image" href="/v2/integrations/autogen" />
+ } iconType="image" href="/v2/integrations/ag2" />
+ } iconType="image" href="/v1/integrations/autogen" />
} iconType="image" href="/v2/integrations/openai" />
} iconType="image" href="/v2/integrations/anthropic" />
} iconType="image" href="/v2/integrations/langchain" />
@@ -35,22 +36,31 @@ Observability and monitoring for your AI agents and LLM apps. And we do it all i
... that logs everything back to your AgentOps Dashboard.
-That's it! AgentOps will automatically instrument your code and start tracking sessions.
+That's it! AgentOps will automatically instrument your code and start tracking traces.
-Need more control? You can disable automatic session creation and manage sessions explicitly:
+Need more control? You can create custom traces using the `@trace` decorator (recommended) or manage traces manually for advanced use cases:
```python python
import agentops
- agentops.init(, auto_start_session=False)
+ from agentops.sdk.decorators import trace
- # Later, when you're ready to start a session:
- agentops.start_session("my-workflow-session")
+ agentops.init(, auto_start_session=False)
- # Your code here
- # ...
+ @trace(name="my-workflow", tags=["production"])
+ def my_workflow():
+ # Your code here
+ return "Workflow completed"
- # Sessions automatically end when your program exits
+ ```
+
+
+You can also set a custom trace name during initialization:
+
+
+ ```python python
+ import agentops
+ agentops.init(, trace_name="custom-trace-name")
```
diff --git a/docs/v2/quickstart.mdx b/docs/v2/quickstart.mdx
index ef912d4e4..86ef56c0c 100644
--- a/docs/v2/quickstart.mdx
+++ b/docs/v2/quickstart.mdx
@@ -25,7 +25,7 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx'
agentops.init()
```
- That's it! These two lines automatically instrument your code and start tracking sessions. Sessions automatically end when your program exits.
+ That's it! These two lines automatically instrument your code and start tracking traces. Traces automatically end when your program exits.
@@ -84,21 +84,43 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx'
```
-
- Create a session to group all your agent operations by using the `@session` decorator. Sessions serve as the root span for all operations.
+
+ Track tool usage and costs with the `@tool` decorator. You can specify costs to get total cost tracking directly in your dashboard summary.
```python python
- # Create a session
- from agentops.sdk.decorators import session
+ # Track tool usage with cost
+ from agentops.sdk.decorators import tool
- @session
+ @tool(cost=0.05)
+ def web_search(query):
+ # Tool logic here
+ return f"Search results for: {query}"
+
+ @tool
+ def calculate(expression):
+ # Tool without cost tracking
+ return eval(expression)
+ ```
+
+
+
+ Create custom traces to group operations using the `@trace` decorator, or manage traces manually for more control.
+ ```python python
+ # Create a trace with decorator
+ from agentops.sdk.decorators import trace
+
+ @trace
def my_workflow():
- # Your session code here
+ # Your workflow code here
agent = MyAgent("research-agent")
result = agent.perform_task("data analysis")
return result
- # Run the session
- my_workflow()
+ # Or manage traces manually
+ import agentops
+
+ trace = agentops.start_trace("custom-trace")
+ # Your code here
+ agentops.end_trace(trace, "Success")
```
@@ -110,10 +132,15 @@ Here is the complete code from the sections above
```python python
import agentops
-from agentops.sdk.decorators import agent, operation
+from agentops.sdk.decorators import agent, operation, tool, trace
+
+# Initialize AgentOps without auto-starting session since we use @trace
+agentops.init(, auto_start_session=False)
-# Initialize AgentOps
-agentops.init()
+# Create a tool with cost tracking
+@tool(cost=0.05)
+def web_search(query):
+ return f"Search results for: {query}"
# Create an agent class
@agent
@@ -123,9 +150,11 @@ class MyAgent:
@operation
def perform_task(self, task):
- # Agent task logic here
- return f"Completed {task}"
+ # Use a tool within the agent
+ search_results = web_search(f"research {task}")
+ return f"Completed {task} with results: {search_results}"
+@trace(name="research-workflow", tags=["research", "analysis"])
def run_agent_task(task_name):
agent = MyAgent("research-agent")
result = agent.perform_task(task_name)
diff --git a/docs/v2/usage/manual-trace-control.mdx b/docs/v2/usage/manual-trace-control.mdx
new file mode 100644
index 000000000..7086904bb
--- /dev/null
+++ b/docs/v2/usage/manual-trace-control.mdx
@@ -0,0 +1,184 @@
+---
+title: "Manual Trace Control"
+description: "Advanced trace management with start_trace and end_trace methods"
+---
+
+## Basic Manual Trace Control
+
+### Starting and Ending Traces
+
+The most basic form of manual trace control involves starting a trace, executing your code, and then ending the trace with a specific state:
+
+```python
+import agentops
+
+# Initialize without automatic session creation
+agentops.init("your-api-key", auto_start_session=False)
+
+# Start a trace manually
+trace = agentops.start_trace("my-workflow")
+
+try:
+ # Your application logic here
+ result = perform_some_operation()
+
+ # End the trace successfully
+ agentops.end_trace(trace, "Success")
+except Exception as e:
+ # End the trace with failure state
+ agentops.end_trace(trace, "Failure")
+```
+
+### Trace Names and Tags
+
+You can provide meaningful names and tags when starting traces:
+
+```python
+# Start a trace with custom name and tags
+trace = agentops.start_trace(
+ trace_name="customer-service-workflow",
+ tags=["customer-123", "priority-high", "support"]
+)
+```
+
+### Batch Processing with Selective Trace Ending
+
+For batch processing scenarios, you can selectively end traces based on processing results:
+
+```python
+import agentops
+
+# Initialize AgentOps
+agentops.init("your-api-key", auto_start_session=False)
+
+# Sample batch items to process
+batch_items = [
+ {"id": 1, "data": "item_1_data", "valid": True},
+ {"id": 2, "data": "item_2_data", "valid": False},
+ {"id": 3, "data": "item_3_data", "valid": True},
+]
+@agentops.operation(name="process_item")
+def process_item(item):
+ """Simulate processing an item"""
+ if not item.get("valid", False):
+ raise ValueError(f"Invalid item: {item['id']}")
+ return {"processed": True, "result": f"Processed {item['data']}"}
+
+# Start traces for batch items
+for i, item in enumerate(batch_items):
+ trace = agentops.start_trace(f"batch_item_{i+1}")
+ try:
+ result = process_item(item)
+ if result.get("processed"):
+ agentops.end_trace(trace, "Success")
+ else:
+ agentops.end_trace(trace, "Failure")
+ except Exception as e:
+ agentops.end_trace(trace, "Error")
+```
+
+## Integration with Decorators
+
+Manual trace control works seamlessly with AgentOps decorators:
+
+```python
+import agentops
+from agentops.sdk.decorators import agent, operation, tool
+
+agentops.init("your-api-key", auto_start_session=False)
+
+@agent
+class CustomerServiceAgent:
+ @operation
+ def analyze_request(self, request):
+ return f"Analyzed: {request}"
+
+ @tool(cost=0.02)
+ def lookup_customer(self, customer_id):
+ return f"Customer data for {customer_id}"
+
+# Manual trace with decorated components
+trace = agentops.start_trace("customer-service")
+
+try:
+ agent = CustomerServiceAgent()
+ customer_data = agent.lookup_customer("CUST_123")
+ analysis = agent.analyze_request("billing issue")
+
+ agentops.end_trace(trace, "Success")
+except Exception as e:
+ agentops.end_trace(trace, "Error")
+```
+
+## Real-World Example
+
+Here's a comprehensive example showing manual trace control in a customer service application:
+
+```python
+import agentops
+from agentops.sdk.decorators import agent, operation, tool
+from openai import OpenAI
+
+agentops.init(auto_start_session=False)
+client = OpenAI()
+
+@operation
+def analyze_sentiment(text):
+ response = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": f"Analyze sentiment: {text}"}]
+ )
+ return response.choices[0].message.content.strip()
+
+@tool(cost=0.01)
+def lookup_order(order_id):
+ return f"Order {order_id} details"
+
+def process_customer_requests(requests):
+ """Process multiple customer requests with individual trace tracking"""
+ results = []
+ for i, request in enumerate(requests):
+ trace = agentops.start_trace(
+ f"customer_request_{i+1}",
+ tags=["customer-service", request.get("priority", "normal")]
+ )
+ try:
+ sentiment = analyze_sentiment(request["message"])
+
+ if "order" in request:
+ order_info = lookup_order(request["order"])
+
+ if "positive" in sentiment.lower() or "neutral" in sentiment.lower():
+ agentops.end_trace(trace, "Success")
+ results.append({"status": "resolved", "sentiment": sentiment})
+ else:
+ agentops.end_trace(trace, "Escalation_Required")
+ results.append({"status": "escalated", "sentiment": sentiment})
+
+ except Exception as e:
+ agentops.end_trace(trace, "Error")
+ results.append({"status": "error", "error": str(e)})
+
+ return results
+
+customer_requests = [
+ {"message": "I love this product!", "priority": "low"},
+ {"message": "My order is completely wrong!", "order": "12345", "priority": "high"},
+ {"message": "When will my package arrive?", "order": "67890", "priority": "normal"}
+]
+
+results = process_customer_requests(customer_requests)
+print(f"Processed {len(results)} customer requests")
+```
+
+This example demonstrates:
+- Individual trace management for each customer request
+- Integration with decorated agents and tools
+- Different end states based on business logic
+- Proper error handling with appropriate trace states
+- Use of tags for categorization
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/v2/usage/recording-operations.mdx b/docs/v2/usage/recording-operations.mdx
index 93ebed9e9..23075baec 100644
--- a/docs/v2/usage/recording-operations.mdx
+++ b/docs/v2/usage/recording-operations.mdx
@@ -21,6 +21,15 @@ That's it! This single line of code will:
- Intercept and track all LLM calls to supported providers (OpenAI, Anthropic, etc.)
- Record relevant metrics such as token counts, costs, and response times
+You can also set a custom trace name during initialization:
+
+```python
+import agentops
+
+# Initialize with custom trace name
+agentops.init("your-api-key", trace_name="my-custom-workflow")
+```
+
## Automatic Instrumentation
AgentOps automatically instruments calls to popular LLM providers without requiring any additional code:
@@ -86,33 +95,53 @@ def research_workflow(topic):
results = research_workflow("quantum computing")
```
-### Advanced Session Management
+### `@tool` Decorator
-If you need more control over session lifecycle, you can disable automatic session creation:
+Track tool usage and costs with the `@tool` decorator. You can specify costs to get total cost tracking directly in your dashboard summary:
+
+```python
+from agentops.sdk.decorators import tool
+
+@tool(cost=0.05)
+def web_search(query):
+ # Tool implementation
+ return f"Search results for: {query}"
+
+@tool
+def calculator(expression):
+ # Tool without cost tracking
+ return eval(expression)
+```
+
+### `@trace` Decorator
+
+Create custom traces to group related operations using the `@trace` decorator. This is the recommended approach for most applications:
```python
import agentops
-from agentops.sdk.decorators import session
+from agentops.sdk.decorators import trace, agent, operation
-# Disable automatic session creation
+# Initialize AgentOps without auto-starting session since we use @trace
agentops.init("your-api-key", auto_start_session=False)
-@session
-def my_workflow():
- # Your code here
- pass
-
-# Run the workflow to create a session
-my_workflow()
+@trace(name="customer-service-workflow", tags=["customer-support"])
+def customer_service_workflow(customer_id):
+ agent = ResearchAgent()
+ results = agent.search(f"customer {customer_id}")
+ return results
```
+
## Best Practices
1. **Keep it Simple**: For most applications, just initializing AgentOps with `agentops.init()` is sufficient.
-2. **Use Decorators Sparingly**: Only add decorators when you need more detailed tracking of specific operations.
+2. **Use @trace for Custom Workflows**: When you need to group operations, use the `@trace` decorator instead of manual trace management.
+
+3. **Meaningful Names and Tags**: When using decorators, choose descriptive names and relevant tags to make them easier to identify in the dashboard.
+
+4. **Cost Tracking**: Use the `@tool` decorator with cost parameters to track tool usage costs in your dashboard.
-3. **Meaningful Operation Names**: When using decorators, choose descriptive names to make them easier to identify in the dashboard.
diff --git a/docs/v2/usage/sdk-reference.mdx b/docs/v2/usage/sdk-reference.mdx
index 6852bc7e9..de29cd30c 100644
--- a/docs/v2/usage/sdk-reference.mdx
+++ b/docs/v2/usage/sdk-reference.mdx
@@ -33,6 +33,7 @@ Initializes the AgentOps SDK and automatically starts tracking your application.
- `fail_safe` (bool, optional): Whether to suppress errors and continue execution when possible. Defaults to False.
- `exporter_endpoint` (str, optional): Endpoint for the exporter. If not provided, will be read from the `AGENTOPS_EXPORTER_ENDPOINT` environment variable. Defaults to 'https://otlp.agentops.ai/v1/traces'.
- `export_flush_interval` (int, optional): Time interval in milliseconds between automatic exports of telemetry data. Defaults to 1000.
+- `trace_name` (str, optional): Custom name for the automatically created trace. If not provided, a default name will be used.
**Returns**:
@@ -45,6 +46,9 @@ import agentops
# Basic initialization with automatic session creation
agentops.init("your-api-key")
+
+# Initialize with custom trace name
+agentops.init("your-api-key", trace_name="my-workflow")
```
### `configure()`
@@ -70,6 +74,7 @@ Updates client configuration after initialization. Supports the same parameters
- `processor` (object, optional): Custom span processor for OpenTelemetry trace data.
- `exporter_endpoint` (str, optional): Endpoint for the exporter.
- `export_flush_interval` (int, optional): Time interval in milliseconds between automatic exports of telemetry data.
+- `trace_name` (str, optional): Custom name for traces.
**Example**:
@@ -83,7 +88,8 @@ agentops.init()
agentops.configure(
max_wait_time=10000,
max_queue_size=200,
- default_tags=["production", "gpt-4"]
+ default_tags=["production", "gpt-4"],
+ trace_name="production-workflow"
)
```
@@ -95,17 +101,22 @@ Gets the singleton client instance. Most users won't need to use this function d
- The AgentOps client instance.
-## Session Management
+## Trace Management
-These functions help you manage the lifecycle of tracking sessions.
+These functions help you manage the lifecycle of tracking traces.
-### `start_session()`
+### `start_trace()`
-Starts a new AgentOps session manually. This is useful when you've disabled automatic session creation or need multiple separate sessions.
+Starts a new AgentOps trace manually. This is useful when you've disabled automatic session creation or need multiple separate traces.
**Parameters**:
-- `tags` (Union[Dict[str, Any], List[str]], optional): Optional tags to attach to the session, useful for filtering in the dashboard. Can be a list of strings or a dict of key-value pairs.
+- `trace_name` (str, optional): Name for the trace. If not provided, a default name will be used.
+- `tags` (Union[Dict[str, Any], List[str]], optional): Optional tags to attach to the trace, useful for filtering in the dashboard. Can be a list of strings or a dict of key-value pairs.
+
+**Returns**:
+
+- TraceContext object representing the started trace.
**Example**:
@@ -115,44 +126,85 @@ import agentops
# Initialize without auto-starting a session
agentops.init("your-api-key", auto_start_session=False)
-# Later, manually start a session
-session = agentops.start_session(tags=["customer-query"])
+# Start a trace manually
+trace = agentops.start_trace("customer-service-workflow", tags=["customer-query"])
+```
+
+### `end_trace()`
+
+Ends a specific trace or all active traces.
+
+**Parameters**:
+
+- `trace` (TraceContext, optional): The specific trace to end. If not provided, all active traces will be ended.
+- `end_state` (str, optional): The end state for the trace(s). You can use any descriptive string that makes sense for your application (e.g., "Success", "Failure", "Error", "Timeout", etc.).
+
+**Example**:
+
+```python
+import agentops
+
+# End a specific trace
+trace = agentops.start_trace("my-workflow")
+# ... your code ...
+agentops.end_trace(trace, "Success")
+
+# End all active traces
+agentops.end_trace(end_state="Emergency_Shutdown")
```
+
## Decorators for Detailed Instrumentation
-For more granular control, AgentOps provides decorators that explicitly track different components of your application. These decorators are imported from `agentops.sdk.decorators`.
+For more granular control, AgentOps provides decorators that explicitly track different components of your application. **The `@trace` decorator is the recommended approach for creating custom traces**, especially in multi-threaded environments. These decorators are imported from `agentops.sdk.decorators`.
```python
import agentops
-from agentops.sdk.decorators import session, agent, operation, workflow, task
+from agentops.sdk.decorators import trace, agent, operation, tool
# Initialize without automatic session creation
agentops.init("your-api-key", auto_start_session=False)
-# Create and run a session using the decorator
-@session
+# Create and run a trace using the decorator
+@trace
def my_workflow():
# Your workflow code here
pass
-# Run the workflow, which creates and manages the session
+# Run the workflow, which creates and manages the trace
my_workflow()
```
### Available Decorators
-- `@session`: Creates a session span, which serves as the root for all other spans
+- `@trace`: Creates a trace span for grouping related operations
- `@agent`: Creates an agent span for tracking agent operations
- `@operation` / `@task`: Creates operation/task spans for tracking specific operations (these are aliases)
- `@workflow`: Creates workflow spans for organizing related operations
- `@tool`: Creates tool spans for tracking tool usage and cost in agent operations. Supports cost parameter for tracking tool usage costs.
+**Tool Decorator Example**:
+
+```python
+from agentops.sdk.decorators import tool
+
+@tool(cost=0.05)
+def web_search(query):
+ # Tool implementation with cost tracking
+ return f"Search results for: {query}"
+
+@tool
+def calculator(expression):
+ # Tool without cost tracking
+ return eval(expression)
+```
+
See [Decorators](/v2/concepts/decorators) for more detailed documentation on using these decorators.
## Legacy Functions
The following functions are maintained for backward compatibility with older versions of the SDK and integrations. New code should use the functions and decorators described above instead.
+- `start_session()`: Legacy function for starting sessions. Use `@trace` decorator or `start_trace()` instead.
- `record(event)`: Legacy function to record an event. Replaced by decorator-based tracing.
- `track_agent()`: Legacy decorator for marking agents. Replaced by the `@agent` decorator.
- `track_tool()`: Legacy decorator for marking tools. Replaced by the `@tool` decorator.
diff --git a/docs/v2/usage/trace-decorator.mdx b/docs/v2/usage/trace-decorator.mdx
new file mode 100644
index 000000000..c4523e039
--- /dev/null
+++ b/docs/v2/usage/trace-decorator.mdx
@@ -0,0 +1,644 @@
+---
+title: "Trace Decorator"
+description: "Create custom traces with the @trace decorator"
+---
+
+## Basic Usage
+
+### Simple Trace Creation
+
+The `@trace` decorator automatically creates a trace span that encompasses the entire function execution. You can optionally specify custom names and tags to better organize and categorize your traces:
+
+```python
+from agentops.sdk.decorators import trace
+import agentops
+
+# Initialize AgentOps
+agentops.init("your-api-key", auto_start_session=False)
+
+@trace(name="customer-workflow", tags=["production", "customer-service"])
+def my_workflow():
+ """A simple workflow wrapped in a trace"""
+ print("š Starting customer workflow...")
+ print("š Processing customer request...")
+ # Your application logic here
+ print("ā
Customer workflow completed successfully!")
+ return "Workflow completed"
+
+# Run the function - this creates and manages the trace automatically
+print("š¬ Running traced workflow...")
+result = my_workflow()
+print(f"š Result: {result}")
+```
+
+Both `name` and `tags` parameters are optional. If no name is provided, the function name will be used as the trace name.
+
+### Custom Trace Names
+
+You can specify custom names for your traces:
+
+```python
+@trace(name="customer-onboarding-flow")
+def onboard_customer(customer_data):
+ """Customer onboarding process"""
+ print(f"š Onboarding customer: {customer_data['name']}")
+ print("š Creating customer profile...")
+ print("š§ Sending welcome email...")
+ print("ā
Customer onboarding complete!")
+ return f"Onboarded customer: {customer_data['name']}"
+
+@trace(name="data-processing-pipeline")
+def process_data(input_data):
+ """Data processing workflow"""
+ print(f"š Processing {len(input_data)} data items...")
+ print("š Applying transformations...")
+ print("ā
Data processing complete!")
+ return f"Processed {len(input_data)} items"
+
+# Usage examples
+customer = {"name": "Alice Johnson", "email": "alice@example.com"}
+result1 = onboard_customer(customer)
+print(f"š Onboarding result: {result1}")
+
+data_items = ["item1", "item2", "item3", "item4", "item5"]
+result2 = process_data(data_items)
+print(f"š Processing result: {result2}")
+```
+
+### Adding Tags to Traces
+
+Tags help categorize and filter traces in your dashboard:
+
+```python
+@trace(tags=["production", "high-priority"])
+def critical_workflow():
+ """Critical production workflow"""
+ print("šØ Executing critical production workflow...")
+ print("ā” High priority processing...")
+ print("ā
Critical task completed successfully!")
+ return "Critical task completed"
+
+@trace(name="user-analysis", tags=["analytics", "user-behavior"])
+def analyze_user_behavior(user_id):
+ """Analyze user behavior patterns"""
+ print(f"š Analyzing behavior for user: {user_id}")
+ print("š Gathering user interaction data...")
+ print("š§ Running behavior analysis algorithms...")
+ print("ā
User behavior analysis complete!")
+ return f"Analysis complete for user {user_id}"
+
+# Usage examples
+print("š¬ Running critical workflow...")
+result1 = critical_workflow()
+print(f"š Critical workflow result: {result1}")
+
+print("\nš¬ Running user analysis...")
+result2 = analyze_user_behavior("user_12345")
+print(f"š Analysis result: {result2}")
+```
+
+## Integration with Other Decorators
+
+### Combining with Agent and Operation Decorators
+
+The `@trace` decorator works seamlessly with other AgentOps decorators:
+
+```python
+import agentops
+from agentops.sdk.decorators import trace, agent, operation, tool
+
+# Initialize AgentOps without auto-starting session since we use @trace
+agentops.init("your-api-key", auto_start_session=False)
+
+@agent
+class DataAnalysisAgent:
+ def __init__(self):
+ print("š¤ DataAnalysisAgent initialized")
+
+ @operation
+ def collect_data(self, source):
+ print(f"š Collecting data from {source}...")
+ data = f"Data collected from {source}"
+ print(f"ā
Data collection complete: {data}")
+ return data
+
+ @tool(cost=0.05)
+ def analyze_data(self, data):
+ print(f"š§ Analyzing data: {data}")
+ analysis = f"Analysis of {data}"
+ print(f"ā
Analysis complete: {analysis}")
+ return analysis
+
+ @operation
+ def generate_report(self, analysis):
+ print(f"š Generating report from: {analysis}")
+ report = f"Report: {analysis}"
+ print(f"ā
Report generated: {report}")
+ return report
+
+@trace(name="complete-analysis-workflow")
+def run_analysis_workflow(data_source):
+ """Complete data analysis workflow"""
+ print(f"š Starting analysis workflow for: {data_source}")
+ print("=" * 50)
+
+ agent = DataAnalysisAgent()
+
+ # Collect data
+ print("\nš Step 1: Data Collection")
+ data = agent.collect_data(data_source)
+
+ # Analyze data
+ print("\nš Step 2: Data Analysis")
+ analysis = agent.analyze_data(data)
+
+ # Generate report
+ print("\nš Step 3: Report Generation")
+ report = agent.generate_report(analysis)
+
+ print("\nš Workflow completed successfully!")
+ print("=" * 50)
+
+ return {
+ "source": data_source,
+ "report": report
+ }
+
+# Usage
+print("š¬ Running complete analysis workflow...")
+result = run_analysis_workflow("customer_database")
+print(f"\nš Final Result:")
+print(f" Source: {result['source']}")
+print(f" Report: {result['report']}")
+```
+
+## Async Function Support
+
+The `@trace` decorator fully supports async functions:
+
+```python
+import asyncio
+import agentops
+from agentops.sdk.decorators import trace, operation
+
+# Initialize AgentOps without auto-starting session since we use @trace
+agentops.init("your-api-key", auto_start_session=False)
+
+@operation
+async def fetch_user_data(user_id):
+ """Simulate async data fetching"""
+ print(f"š Fetching data for user: {user_id}")
+ await asyncio.sleep(1) # Simulate API call
+ data = f"User data for {user_id}"
+ print(f"ā
Data fetched: {data}")
+ return data
+
+@operation
+async def process_user_data(user_data):
+ """Simulate async data processing"""
+ print(f"āļø Processing user data: {user_data}")
+ await asyncio.sleep(0.5) # Simulate processing
+ processed = f"Processed: {user_data}"
+ print(f"ā
Processing complete: {processed}")
+ return processed
+
+@trace(name="async-user-workflow")
+async def async_user_workflow(user_id):
+ """Async workflow for user processing"""
+ print(f"š Starting async workflow for user: {user_id}")
+ print("=" * 45)
+
+ print("\nš Step 1: Fetching user data")
+ user_data = await fetch_user_data(user_id)
+
+ print("\nš Step 2: Processing user data")
+ processed_data = await process_user_data(user_data)
+
+ print("\nš Async workflow completed!")
+ print("=" * 45)
+
+ return processed_data
+
+# Usage
+async def main():
+ print("š¬ Running async user workflow...")
+ result = await async_user_workflow("user_123")
+ print(f"\nš Final Result: {result}")
+ print("⨠Check your AgentOps dashboard to see the traced async workflow!")
+
+# Run the async workflow
+print("š Starting async demo...")
+asyncio.run(main())
+```
+
+## Error Handling and Trace States
+
+### Automatic Error Handling
+
+The `@trace` decorator automatically handles exceptions and sets appropriate trace states:
+
+```python
+import agentops
+from agentops.sdk.decorators import trace
+
+# Initialize AgentOps without auto-starting session since we use @trace
+agentops.init("your-api-key", auto_start_session=False)
+
+@trace(name="error-prone-workflow")
+def risky_operation():
+ """Operation that might fail"""
+ import random
+
+ print("š² Running risky operation...")
+ print("ā ļø This operation has a 50% chance of failure")
+
+ if random.random() < 0.5:
+ print("ā Operation failed!")
+ raise ValueError("Random failure occurred")
+
+ print("ā
Operation succeeded!")
+ return "Operation succeeded"
+
+# The trace will automatically be marked with failure state if an exception occurs
+print("š¬ Testing automatic error handling...")
+for i in range(3):
+ print(f"\nš Attempt {i+1}:")
+ try:
+ result = risky_operation()
+ print(f"š Success: {result}")
+ break
+ except ValueError as e:
+ print(f"š Operation failed: {e}")
+ print("š Trace automatically ended with error state")
+```
+
+### Custom Error Handling
+
+You can implement custom error handling within traced functions:
+
+```python
+@trace(name="robust-workflow")
+def robust_operation(data):
+ """Operation with custom error handling"""
+ print(f"š Starting robust operation with data: {data}")
+
+ try:
+ # Risky operation
+ if not data:
+ print("ā ļø No data provided!")
+ raise ValueError("No data provided")
+
+ # Process data
+ print("āļø Processing data...")
+ result = f"Processed: {data}"
+ print(f"ā
Processing successful: {result}")
+ return {"success": True, "result": result}
+
+ except ValueError as e:
+ # Handle specific errors
+ print(f"ā Validation error: {e}")
+ return {"success": False, "error": str(e)}
+ except Exception as e:
+ # Handle unexpected errors
+ print(f"š„ Unexpected error: {e}")
+ return {"success": False, "error": f"Unexpected error: {str(e)}"}
+
+# Usage examples
+print("\nš¬ Testing custom error handling...")
+
+print("\nš Test 1: Valid data")
+result1 = robust_operation("valid_data")
+print(f"š Result: {result1}")
+
+print("\nš Test 2: Empty data")
+result2 = robust_operation("")
+print(f"š Result: {result2}")
+
+print("\nš Test 3: None data")
+result3 = robust_operation(None)
+print(f"š Result: {result3}")
+```
+
+## Real-World Examples
+
+### E-commerce Order Processing
+
+```python
+from agentops.sdk.decorators import trace, agent, operation, tool
+from openai import OpenAI
+import agentops
+
+agentops.init("your-api-key", auto_start_session=False)
+
+@agent
+class OrderProcessor:
+ def __init__(self):
+ print("š OrderProcessor initialized")
+
+ @tool(cost=0.01)
+ def validate_payment(self, payment_info):
+ """Payment validation service"""
+ print(f"š³ Validating payment: {payment_info['card']}")
+ result = {"valid": True, "transaction_id": "txn_123"}
+ print(f"ā
Payment validation successful: {result['transaction_id']}")
+ return result
+
+ @tool(cost=0.02)
+ def check_inventory(self, product_id, quantity):
+ """Inventory check service"""
+ print(f"š¦ Checking inventory for {product_id} (qty: {quantity})")
+ result = {"available": True, "reserved": quantity}
+ print(f"ā
Inventory check complete: {quantity} units available")
+ return result
+
+ @operation
+ def calculate_shipping(self, address, items):
+ """Calculate shipping costs"""
+ print(f"š Calculating shipping to {address['city']}, {address['state']}")
+ result = {"cost": 9.99, "method": "standard"}
+ print(f"ā
Shipping calculated: ${result['cost']} ({result['method']})")
+ return result
+
+ @tool(cost=0.005)
+ def send_confirmation_email(self, email, order_details):
+ """Email service"""
+ print(f"š§ Sending confirmation email to {email}")
+ result = f"Confirmation sent to {email}"
+ print(f"ā
Email sent successfully")
+ return result
+
+@trace(name="order-processing", tags=["ecommerce", "orders"])
+def process_order(order_data):
+ """Complete order processing workflow"""
+ print(f"š Starting order processing for {order_data['customer_email']}")
+ print("=" * 60)
+
+ processor = OrderProcessor()
+
+ try:
+ # Validate payment
+ print("\nš Step 1: Payment Validation")
+ payment_result = processor.validate_payment(order_data["payment"])
+ if not payment_result["valid"]:
+ print("ā Payment validation failed!")
+ return {"success": False, "error": "Payment validation failed"}
+
+ # Check inventory for all items
+ print("\nš Step 2: Inventory Check")
+ for item in order_data["items"]:
+ inventory_result = processor.check_inventory(
+ item["product_id"],
+ item["quantity"]
+ )
+ if not inventory_result["available"]:
+ print(f"ā Item {item['product_id']} not available!")
+ return {"success": False, "error": f"Item {item['product_id']} not available"}
+
+ # Calculate shipping
+ print("\nš Step 3: Shipping Calculation")
+ shipping = processor.calculate_shipping(
+ order_data["shipping_address"],
+ order_data["items"]
+ )
+
+ # Send confirmation
+ print("\nš Step 4: Confirmation Email")
+ confirmation = processor.send_confirmation_email(
+ order_data["customer_email"],
+ {
+ "items": order_data["items"],
+ "shipping": shipping,
+ "payment": payment_result
+ }
+ )
+
+ print("\nš Order processing completed successfully!")
+ print("=" * 60)
+
+ return {
+ "success": True,
+ "order_id": "ORD_12345",
+ "payment": payment_result,
+ "shipping": shipping,
+ "confirmation": confirmation
+ }
+
+ except Exception as e:
+ print(f"š„ Order processing failed: {e}")
+ return {"success": False, "error": str(e)}
+
+# Usage
+print("š¬ Running e-commerce order processing demo...")
+
+order = {
+ "customer_email": "customer@example.com",
+ "payment": {"card": "****1234", "amount": 99.99},
+ "items": [{"product_id": "PROD_001", "quantity": 2}],
+ "shipping_address": {"city": "New York", "state": "NY"}
+}
+
+result = process_order(order)
+
+print(f"\nš ORDER PROCESSING RESULT:")
+print(f" Success: {result['success']}")
+if result['success']:
+ print(f" Order ID: {result['order_id']}")
+ print(f" Transaction: {result['payment']['transaction_id']}")
+ print(f" Shipping: ${result['shipping']['cost']}")
+else:
+ print(f" Error: {result['error']}")
+```
+
+### Data Analysis Workflow
+
+```python
+from agentops.sdk.decorators import trace, agent, operation, tool
+from openai import OpenAI
+import agentops
+
+agentops.init("your-api-key", auto_start_session=False)
+
+@agent
+class DataAnalysisAgent:
+ def __init__(self):
+ self.client = OpenAI()
+ print("š¤ DataAnalysisAgent initialized")
+
+ @operation
+ def collect_data(self, source):
+ """Simulate data collection"""
+ print(f"š Collecting data from {source}...")
+ data = f"Raw data collected from {source}: [sample_data_1, sample_data_2, sample_data_3]"
+ print(f"ā
Data collection complete: {len(data)} characters collected")
+ return data
+
+ @operation
+ def analyze_data_with_llm(self, data):
+ """Use LLM to analyze the collected data"""
+ print("š§ Analyzing data with LLM...")
+ response = self.client.chat.completions.create(
+ model="gpt-4o",
+ messages=[
+ {"role": "system", "content": "You are a data analyst. Analyze the provided data and give insights."},
+ {"role": "user", "content": f"Please analyze this data: {data}"}
+ ]
+ )
+ analysis = response.choices[0].message.content
+ print(f"ā
LLM analysis complete: {len(analysis)} characters generated")
+ return analysis
+
+ @tool(cost=0.05)
+ def generate_visualization(self, analysis):
+ """Generate data visualization"""
+ print("š Generating visualization...")
+ visualization = f"Chart generated for: {analysis[:50]}..."
+ print(f"ā
Visualization generated: {visualization}")
+ return visualization
+
+ @operation
+ def generate_report(self, analysis, visualization):
+ """Generate final report using LLM"""
+ print("š Generating final report with LLM...")
+ response = self.client.chat.completions.create(
+ model="gpt-4o",
+ messages=[
+ {"role": "system", "content": "You are a report writer. Create a professional data analysis report."},
+ {"role": "user", "content": f"Create a report based on this analysis: {analysis} and visualization: {visualization}"}
+ ]
+ )
+ report = response.choices[0].message.content
+ print(f"ā
Final report generated: {len(report)} characters")
+ return report
+
+@trace(name="data-analysis-workflow", tags=["analytics", "reporting"])
+def run_data_analysis(data_source):
+ """Complete data analysis workflow with LLM integration"""
+ print(f"š Starting data analysis workflow for: {data_source}")
+ print("=" * 60)
+
+ agent = DataAnalysisAgent()
+
+ # Collect data
+ print("\nš Step 1: Data Collection")
+ raw_data = agent.collect_data(data_source)
+
+ # Analyze data using LLM
+ print("\nš Step 2: LLM Analysis")
+ analysis = agent.analyze_data_with_llm(raw_data)
+
+ # Generate visualization
+ print("\nš Step 3: Visualization Generation")
+ visualization = agent.generate_visualization(analysis)
+
+ # Generate final report using LLM
+ print("\nš Step 4: Report Generation")
+ report = agent.generate_report(analysis, visualization)
+
+ print("\nš Workflow completed successfully!")
+ print("=" * 60)
+
+ return {
+ "source": data_source,
+ "raw_data": raw_data,
+ "analysis": analysis,
+ "visualization": visualization,
+ "final_report": report
+ }
+
+# Usage
+print("š¬ Running data analysis workflow demo...")
+
+result = run_data_analysis("customer_database")
+
+print(f"\nš ANALYSIS RESULTS:")
+print(f" Data Source: {result['source']}")
+print(f" Raw Data: {result['raw_data'][:80]}...")
+print(f" Analysis Preview: {result['analysis'][:100]}...")
+print(f" Visualization: {result['visualization']}")
+print(f" Final Report Preview: {result['final_report'][:150]}...")
+
+print(f"\n⨠Analysis complete! Check your AgentOps dashboard to see the traced workflow.")
+```
+
+## Best Practices
+
+### 1. Use Meaningful Names
+
+Choose descriptive names that clearly indicate what the trace represents:
+
+```python
+# Good
+@trace(name="user-authentication-flow")
+def authenticate_user(credentials):
+ pass
+
+@trace(name="payment-processing-pipeline")
+def process_payment(payment_data):
+ pass
+
+# Less descriptive
+@trace(name="trace1")
+def some_function():
+ pass
+```
+
+### 2. Add Relevant Tags
+
+Use tags to categorize traces for easier filtering and analysis:
+
+```python
+@trace(name="order-fulfillment", tags=["ecommerce", "fulfillment", "high-priority"])
+def fulfill_order(order_id):
+ pass
+
+@trace(name="data-sync", tags=["background-job", "data-processing"])
+def sync_data():
+ pass
+```
+
+### 3. Keep Traces Focused
+
+Each trace should represent a logical unit of work:
+
+```python
+# Good - focused on a single workflow
+@trace(name="customer-onboarding")
+def onboard_customer(customer_data):
+ validate_customer(customer_data)
+ create_account(customer_data)
+ send_welcome_email(customer_data)
+
+# Less focused - mixing different concerns
+@trace(name="mixed-operations")
+def do_everything():
+ onboard_customer(data1)
+ process_orders(data2)
+ generate_reports(data3)
+```
+
+### 4. Handle Errors Appropriately
+
+Implement proper error handling within traced functions:
+
+```python
+@trace(name="data-processing")
+def process_data(data):
+ try:
+ # Main processing logic
+ result = complex_processing(data)
+ return {"success": True, "result": result}
+ except ValidationError as e:
+ # Expected errors
+ return {"success": False, "error": "validation_failed", "details": str(e)}
+ except Exception as e:
+ # Unexpected errors
+ logger.error(f"Unexpected error in data processing: {e}")
+ return {"success": False, "error": "processing_failed"}
+```
+
+The `@trace` decorator provides a powerful and flexible way to organize your application's telemetry data. By creating logical groupings of operations, you can better understand your application's behavior and performance characteristics in the AgentOps dashboard.
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/v2/usage/tracking-agents.mdx b/docs/v2/usage/tracking-agents.mdx
index 7ee48f3fa..b1331cc22 100644
--- a/docs/v2/usage/tracking-agents.mdx
+++ b/docs/v2/usage/tracking-agents.mdx
@@ -3,43 +3,19 @@ title: "Tracking Agents"
description: "Associate operations with specific named agents"
---
-AgentOps automatically tracks LLM interactions in your application. For more detailed tracking, especially in multi-agent systems, you can use additional features to associate operations with specific agents.
+AgentOps automatically tracks LLM interactions in your application. For more detailed tracking, especially in multi-agent systems, you can use the `@agent` decorator to associate operations with specific agents.
-## Basic Agent Tracking
+## Using the Agent Decorator
-For simple applications, AgentOps will automatically track your LLM calls without additional configuration:
+For structured tracking in complex applications, you can use the `@agent` decorator to explicitly identify different agents in your system:
```python
import agentops
+from agentops.sdk.decorators import agent, operation, trace
from openai import OpenAI
-# Initialize AgentOps
-agentops.init("your-api-key")
-
-# Create a simple agent function
-def research_agent(query):
- client = OpenAI()
- response = client.chat.completions.create(
- model="gpt-4o",
- messages=[{"role": "user", "content": f"Research about: {query}"}]
- )
- return response.choices[0].message.content
-
-# Use your agent - all LLM calls will be tracked automatically
-result = research_agent("quantum computing")
-```
-
-## Advanced: Using the Agent Decorator
-
-For more structured tracking in complex applications, you can use the `@agent` decorator to explicitly identify different agents in your system:
-
-```python
-import agentops
-from agentops.sdk.decorators import agent, operation
-from openai import OpenAI
-
-# Initialize AgentOps
-agentops.init("your-api-key")
+# Initialize AgentOps without auto-starting session since we use @trace
+agentops.init("your-api-key", auto_start_session=False)
# Create a decorated agent class
@agent(name='ResearchAgent')
@@ -55,7 +31,8 @@ class MyAgent:
)
return response.choices[0].message.content
-# Create and use the agent within a function
+# Create a trace to group the agent operations
+@trace(name="research-workflow")
def research_workflow(topic):
agent = MyAgent()
result = agent.search(topic)
@@ -74,6 +51,153 @@ class ResearchAgent:
pass
```
+## Basic Agent Tracking (Simple Applications)
+
+For simple applications, AgentOps will automatically track your LLM calls without additional configuration:
+
+```python
+import agentops
+from openai import OpenAI
+
+# Initialize AgentOps
+agentops.init("your-api-key")
+
+# Create a simple agent function
+def research_agent(query):
+ client = OpenAI()
+ response = client.chat.completions.create(
+ model="gpt-4o",
+ messages=[{"role": "user", "content": f"Research about: {query}"}]
+ )
+ return response.choices[0].message.content
+
+# Use your agent - all LLM calls will be tracked automatically
+result = research_agent("quantum computing")
+```
+
+## Multi-Agent Systems
+
+For complex multi-agent systems, you can organize multiple agents within a single trace:
+
+```python
+import agentops
+from agentops.sdk.decorators import agent, operation, tool, trace
+
+# Initialize AgentOps without auto-starting session since we use @trace
+agentops.init("your-api-key", auto_start_session=False)
+
+@agent
+class DataCollectionAgent:
+ @tool(cost=0.02)
+ def fetch_data(self, source):
+ return f"Data from {source}"
+
+@agent
+class AnalysisAgent:
+ @operation
+ def analyze_data(self, data):
+ return f"Analysis of {data}"
+
+@agent
+class ReportingAgent:
+ @tool(cost=0.01)
+ def generate_report(self, analysis):
+ return f"Report: {analysis}"
+
+@trace(name="multi-agent-workflow")
+def collaborative_workflow(data_source):
+ """Workflow using multiple specialized agents"""
+
+ # Data collection
+ collector = DataCollectionAgent()
+ raw_data = collector.fetch_data(data_source)
+
+ # Analysis
+ analyzer = AnalysisAgent()
+ analysis = analyzer.analyze_data(raw_data)
+
+ # Reporting
+ reporter = ReportingAgent()
+ report = reporter.generate_report(analysis)
+
+ return {
+ "source": data_source,
+ "analysis": analysis,
+ "report": report
+ }
+
+# Run the collaborative workflow
+result = collaborative_workflow("customer_database")
+```
+
+## Agent Communication and Coordination
+
+You can track complex agent interactions and communication patterns:
+
+```python
+import agentops
+from agentops.sdk.decorators import agent, operation, tool, trace
+
+# Initialize AgentOps without auto-starting session since we use @trace
+agentops.init("your-api-key", auto_start_session=False)
+
+@agent
+class CoordinatorAgent:
+ def __init__(self):
+ self.task_queue = []
+
+ @operation
+ def assign_task(self, task, agent_type):
+ self.task_queue.append({"task": task, "agent": agent_type})
+ return f"Task assigned to {agent_type}: {task}"
+
+ @operation
+ def collect_results(self, results):
+ return f"Collected {len(results)} results"
+
+@agent
+class WorkerAgent:
+ def __init__(self, agent_id):
+ self.agent_id = agent_id
+
+ @tool(cost=0.05)
+ def process_task(self, task):
+ return f"Agent {self.agent_id} processed: {task}"
+
+@trace(name="coordinated-processing")
+def coordinated_processing_workflow(tasks):
+ """Workflow with agent coordination"""
+ coordinator = CoordinatorAgent()
+ workers = [WorkerAgent(f"worker_{i}") for i in range(3)]
+
+ # Assign tasks
+ assignments = []
+ for i, task in enumerate(tasks):
+ worker_type = f"worker_{i % len(workers)}"
+ assignment = coordinator.assign_task(task, worker_type)
+ assignments.append(assignment)
+
+ # Process tasks
+ results = []
+ for i, task in enumerate(tasks):
+ worker = workers[i % len(workers)]
+ result = worker.process_task(task)
+ results.append(result)
+
+ # Collect results
+ summary = coordinator.collect_results(results)
+
+ return {
+ "assignments": assignments,
+ "results": results,
+ "summary": summary
+ }
+
+# Run coordinated workflow
+tasks = ["analyze_data", "generate_report", "send_notification"]
+result = coordinated_processing_workflow(tasks)
+```
+
## Dashboard Visualization
All operations are automatically associated with the agent that originated them. Agents are given a name which is what you will see in the dashboard.
@@ -88,4 +212,34 @@ All operations are automatically associated with the agent that originated them.
2. **Use Decorators When Needed**: Add the `@agent` decorator when you need to clearly distinguish between multiple agents in your system.
-3. **Meaningful Names**: Choose descriptive names for your agents to make them easier to identify in the dashboard.
\ No newline at end of file
+3. **Meaningful Names**: Choose descriptive names for your agents to make them easier to identify in the dashboard.
+
+4. **Organize with Traces**: Use the `@trace` decorator to group related agent operations into logical workflows.
+
+5. **Track Costs**: Use the `@tool` decorator with cost parameters to track the expenses associated with agent operations.
+
+6. **Agent Specialization**: Create specialized agents for different types of tasks to improve observability and maintainability.
+
+## Migration from Session Decorator
+
+If you're migrating from the legacy `@session` decorator, replace it with the `@trace` decorator:
+
+```python
+# New approach (recommended)
+from agentops.sdk.decorators import trace, agent
+
+@trace(name="my-workflow")
+def my_workflow():
+ # workflow code
+ pass
+
+# Old approach (deprecated)
+from agentops.sdk.decorators import session, agent
+
+@session
+def my_workflow():
+ # workflow code
+ pass
+```
+
+The `@trace` decorator provides the same functionality as the legacy `@session` decorator but with more flexibility and better integration with the new trace management features.
\ No newline at end of file