From 93cb9b01740eee42f2cc3d056ab4e97d90e4b2ac Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Wed, 28 May 2025 04:52:26 +0530 Subject: [PATCH 1/4] Enhance documentation and functionality for trace management - Updated `mint.json` to include new usage paths for trace decorators and manual trace control. - Revised `introduction.mdx` to reflect changes in session tracking terminology, replacing "sessions" with "traces" for clarity. - Expanded `quickstart.mdx` to introduce the `@trace` decorator and manual trace management examples. - Added new documentation files for `manual-trace-control.mdx` and `trace-decorator.mdx`, detailing advanced trace management techniques and usage patterns. - Updated `sdk-reference.mdx` to include new parameters for trace management and improved examples for initializing the AgentOps SDK. - Enhanced `tracking-agents.mdx` to demonstrate multi-agent workflows and coordination using the `@trace` decorator. These changes improve the clarity and usability of the AgentOps SDK, particularly around trace management and agent tracking. --- docs/mint.json | 4 +- docs/v2/integrations/google_adk.mdx | 197 ++++++++--- docs/v2/introduction.mdx | 20 +- docs/v2/quickstart.mdx | 55 ++- docs/v2/usage/manual-trace-control.mdx | 288 ++++++++++++++++ docs/v2/usage/recording-operations.mdx | 70 +++- docs/v2/usage/sdk-reference.mdx | 78 ++++- docs/v2/usage/trace-decorator.mdx | 453 +++++++++++++++++++++++++ docs/v2/usage/tracking-agents.mdx | 154 ++++++++- 9 files changed, 1218 insertions(+), 101 deletions(-) create mode 100644 docs/v2/usage/manual-trace-control.mdx create mode 100644 docs/v2/usage/trace-decorator.mdx diff --git a/docs/mint.json b/docs/mint.json index 3c40b208e..2d106dab7 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -188,7 +188,9 @@ "v2/usage/advanced-configuration", "v2/usage/tracking-llm-calls", "v2/usage/tracking-agents", - "v2/usage/recording-operations" + "v2/usage/recording-operations", + "v2/usage/trace-decorator", + "v2/usage/manual-trace-control" ], "version": "v2" }, diff --git a/docs/v2/integrations/google_adk.mdx b/docs/v2/integrations/google_adk.mdx index 9978d78dc..36cac1852 100644 --- a/docs/v2/integrations/google_adk.mdx +++ b/docs/v2/integrations/google_adk.mdx @@ -21,73 +21,162 @@ AgentOps provides seamless integration with [Google Agent Development Kit (ADK)] ``` -## Basic Usage +## Usage Initialize AgentOps at the beginning of your application to automatically track all Google ADK agent interactions: -```python Basic Usage -import asyncio -import uuid -import os -from google.genai import types +```python Usage +# --- Full example code demonstrating LlmAgent with Tools vs. Output Schema --- +import json # Needed for pretty printing dicts +from google.adk.agents import LlmAgent +from google.adk.runners import Runner +from google.adk.sessions import InMemorySessionService +from google.genai import types +from pydantic import BaseModel, Field +import asyncio import agentops -from google.adk.agents import Agent -from google.adk.runners import InMemoryRunner -from google.adk.agents.run_config import RunConfig, StreamingMode - -# Initialize AgentOps -agentops.init(api_key="") - -# Create a simple agent with no tools -agent = Agent( - name="simple_agent", - model="gemini-1.5-flash", - instruction="You are a helpful assistant that provides clear and concise answers.", + +agentops.init("your-api-key") + +# --- 1. Define Constants --- +APP_NAME = "agent_comparison_app" +USER_ID = "test_user_456" +SESSION_ID_TOOL_AGENT = "session_tool_agent_xyz" +SESSION_ID_SCHEMA_AGENT = "session_schema_agent_xyz" +MODEL_NAME = "gemini-2.0-flash" + +# --- 2. Define Schemas --- + +# Input schema used by both agents +class CountryInput(BaseModel): + country: str = Field(description="The country to get information about.") + +# Output schema ONLY for the second agent +class CapitalInfoOutput(BaseModel): + capital: str = Field(description="The capital city of the country.") + # Note: Population is illustrative; the LLM will infer or estimate this + # as it cannot use tools when output_schema is set. + population_estimate: str = Field(description="An estimated population of the capital city.") + +# --- 3. Define the Tool (Only for the first agent) --- +def get_capital_city(country: str) -> str: + """Retrieves the capital city of a given country.""" + print(f"\n-- Tool Call: get_capital_city(country='{country}') --") + country_capitals = { + "united states": "Washington, D.C.", + "canada": "Ottawa", + "france": "Paris", + "japan": "Tokyo", + } + result = country_capitals.get(country.lower(), f"Sorry, I couldn't find the capital for {country}.") + print(f"-- Tool Result: '{result}' --") + return result + +# --- 4. Configure Agents --- + +# Agent 1: Uses a tool and output_key +capital_agent_with_tool = LlmAgent( + model=MODEL_NAME, + name="capital_agent_tool", + description="Retrieves the capital city using a specific tool.", + instruction="""You are a helpful agent that provides the capital city of a country using a tool. +The user will provide the country name in a JSON format like {"country": "country_name"}. +1. Extract the country name. +2. Use the `get_capital_city` tool to find the capital. +3. Respond clearly to the user, stating the capital city found by the tool. +""", + tools=[get_capital_city], + input_schema=CountryInput, + output_key="capital_tool_result", # Store final text response ) -# Create a runner -runner = InMemoryRunner( - agent=agent, - app_name="simple-example", +# Agent 2: Uses output_schema (NO tools possible) +structured_info_agent_schema = LlmAgent( + model=MODEL_NAME, + name="structured_info_agent_schema", + description="Provides capital and estimated population in a specific JSON format.", + instruction=f"""You are an agent that provides country information. +The user will provide the country name in a JSON format like {{"country": "country_name"}}. +Respond ONLY with a JSON object matching this exact schema: +{json.dumps(CapitalInfoOutput.model_json_schema(), indent=2)} +Use your knowledge to determine the capital and estimate the population. Do not use any tools. +""", + # *** NO tools parameter here - using output_schema prevents tool use *** + input_schema=CountryInput, + output_schema=CapitalInfoOutput, # Enforce JSON output structure + output_key="structured_info_result", # Store final JSON response ) -# Setup session -user_id = f"user-{uuid.uuid4().hex[:8]}" -session_id = f"session-{uuid.uuid4().hex[:8]}" -runner.session_service.create_session( - app_name="simple-example", - user_id=user_id, - session_id=session_id, +# --- 5. Set up Session Management and Runners --- +session_service = InMemorySessionService() + +# Create a runner for EACH agent +capital_runner = Runner( + agent=capital_agent_with_tool, + app_name=APP_NAME, + session_service=session_service +) +structured_runner = Runner( + agent=structured_info_agent_schema, + app_name=APP_NAME, + session_service=session_service ) -# Run the agent with a user message -async def run_agent(): - message = "What are three benefits of artificial intelligence?" - - content = types.Content( - role="user", - parts=[types.Part(text=message)], - ) - - run_config = RunConfig( - streaming_mode=StreamingMode.NONE, - ) +# --- 6. Define Agent Interaction Logic --- +async def call_agent_and_print( + runner_instance: Runner, + agent_instance: LlmAgent, + session_id: str, + query_json: str +): + """Sends a query to the specified agent/runner and prints results.""" + print(f"\n>>> Calling Agent: '{agent_instance.name}' | Query: {query_json}") + + user_content = types.Content(role='user', parts=[types.Part(text=query_json)]) + + final_response_content = "No final response received." + async for event in runner_instance.run_async(user_id=USER_ID, session_id=session_id, new_message=user_content): + # print(f"Event: {event.type}, Author: {event.author}") # Uncomment for detailed logging + if event.is_final_response() and event.content and event.content.parts: + # For output_schema, the content is the JSON string itself + final_response_content = event.content.parts[0].text + + print(f"<<< Agent '{agent_instance.name}' Response: {final_response_content}") + + current_session = await session_service.get_session(app_name=APP_NAME, + user_id=USER_ID, + session_id=session_id) + stored_output = current_session.state.get(agent_instance.output_key) + + # Pretty print if the stored output looks like JSON (likely from output_schema) + print(f"--- Session State ['{agent_instance.output_key}']: ", end="") + try: + # Attempt to parse and pretty print if it's JSON + parsed_output = json.loads(stored_output) + print(json.dumps(parsed_output, indent=2)) + except (json.JSONDecodeError, TypeError): + # Otherwise, print as string + print(stored_output) + print("-" * 30) + + +# --- 7. Run Interactions --- +async def main(): + # Create sessions + await session_service.create_session(app_name=APP_NAME, user_id=USER_ID, session_id=SESSION_ID_TOOL_AGENT) + await session_service.create_session(app_name=APP_NAME, user_id=USER_ID, session_id=SESSION_ID_SCHEMA_AGENT) - async for event in runner.run_async( - user_id=user_id, - session_id=session_id, - new_message=content, - run_config=run_config, - ): - if hasattr(event, 'content') and event.content and event.content.parts: - for part in event.content.parts: - if hasattr(part, 'text') and part.text: - print(part.text) - -# Run the agent -asyncio.run(run_agent()) + print("--- Testing Agent with Tool ---") + await call_agent_and_print(capital_runner, capital_agent_with_tool, SESSION_ID_TOOL_AGENT, '{"country": "France"}') + await call_agent_and_print(capital_runner, capital_agent_with_tool, SESSION_ID_TOOL_AGENT, '{"country": "Canada"}') + + print("\n\n--- Testing Agent with Output Schema (No Tool Use) ---") + await call_agent_and_print(structured_runner, structured_info_agent_schema, SESSION_ID_SCHEMA_AGENT, '{"country": "France"}') + await call_agent_and_print(structured_runner, structured_info_agent_schema, SESSION_ID_SCHEMA_AGENT, '{"country": "Japan"}') + +asyncio.run(main()) ``` diff --git a/docs/v2/introduction.mdx b/docs/v2/introduction.mdx index aa33e9188..796e03b21 100644 --- a/docs/v2/introduction.mdx +++ b/docs/v2/introduction.mdx @@ -35,22 +35,32 @@ Observability and monitoring for your AI agents and LLM apps. And we do it all i ... that logs everything back to your AgentOps Dashboard. -That's it! AgentOps will automatically instrument your code and start tracking sessions. +That's it! AgentOps will automatically instrument your code and start tracking traces. -Need more control? You can disable automatic session creation and manage sessions explicitly: +Need more control? You can disable automatic session creation and manage traces manually: ```python python import agentops agentops.init(, auto_start_session=False) - # Later, when you're ready to start a session: - agentops.start_session("my-workflow-session") + # Later, when you're ready to start a trace: + trace = agentops.start_trace("my-workflow-trace") # Your code here # ... - # Sessions automatically end when your program exits + # End the trace when done + agentops.end_trace(trace, "Success") + ``` + + +You can also set a custom trace name during initialization: + + + ```python python + import agentops + agentops.init(, trace_name="custom-trace-name") ``` diff --git a/docs/v2/quickstart.mdx b/docs/v2/quickstart.mdx index ef912d4e4..3339b0cb8 100644 --- a/docs/v2/quickstart.mdx +++ b/docs/v2/quickstart.mdx @@ -25,7 +25,7 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx' agentops.init() ``` - That's it! These two lines automatically instrument your code and start tracking sessions. Sessions automatically end when your program exits. + That's it! These two lines automatically instrument your code and start tracking traces. Traces automatically end when your program exits. @@ -84,21 +84,43 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx' ``` - - Create a session to group all your agent operations by using the `@session` decorator. Sessions serve as the root span for all operations. + + Track tool usage and costs with the `@tool` decorator. You can specify costs to get total cost tracking directly in your dashboard summary. ```python python - # Create a session - from agentops.sdk.decorators import session + # Track tool usage with cost + from agentops.sdk.decorators import tool - @session + @tool(cost=0.05) + def web_search(query): + # Tool logic here + return f"Search results for: {query}" + + @tool + def calculate(expression): + # Tool without cost tracking + return eval(expression) + ``` + + + + Create custom traces to group operations using the `@trace` decorator, or manage traces manually for more control. + ```python python + # Create a trace with decorator + from agentops.sdk.decorators import trace + + @trace def my_workflow(): - # Your session code here + # Your workflow code here agent = MyAgent("research-agent") result = agent.perform_task("data analysis") return result - # Run the session - my_workflow() + # Or manage traces manually + import agentops + + trace = agentops.start_trace("custom-trace") + # Your code here + agentops.end_trace(trace, "Success") ``` @@ -110,10 +132,15 @@ Here is the complete code from the sections above ```python python import agentops -from agentops.sdk.decorators import agent, operation +from agentops.sdk.decorators import agent, operation, tool, trace # Initialize AgentOps -agentops.init() +agentops.init(, auto_start_session=False) + +# Create a tool with cost tracking +@tool(cost=0.05) +def web_search(query): + return f"Search results for: {query}" # Create an agent class @agent @@ -123,9 +150,11 @@ class MyAgent: @operation def perform_task(self, task): - # Agent task logic here - return f"Completed {task}" + # Use a tool within the agent + search_results = web_search(f"research {task}") + return f"Completed {task} with results: {search_results}" +@trace def run_agent_task(task_name): agent = MyAgent("research-agent") result = agent.perform_task(task_name) diff --git a/docs/v2/usage/manual-trace-control.mdx b/docs/v2/usage/manual-trace-control.mdx new file mode 100644 index 000000000..bc2524c11 --- /dev/null +++ b/docs/v2/usage/manual-trace-control.mdx @@ -0,0 +1,288 @@ +--- +title: "Manual Trace Control" +description: "Advanced trace management with start_trace and end_trace methods" +--- + +AgentOps 0.4.13 introduces powerful manual trace control capabilities, allowing you to precisely manage trace lifecycles and states. This is particularly useful for complex workflows, batch processing, and scenarios requiring fine-grained control over trace boundaries. + +## Basic Manual Trace Control + +### Starting and Ending Traces + +The most basic form of manual trace control involves starting a trace, executing your code, and then ending the trace with a specific state: + +```python +import agentops + +# Initialize without automatic session creation +agentops.init("your-api-key", auto_start_session=False) + +# Start a trace manually +trace = agentops.start_trace("my-workflow") + +try: + # Your application logic here + result = perform_some_operation() + + # End the trace successfully + agentops.end_trace(trace, "Success") +except Exception as e: + # End the trace with failure state + agentops.end_trace(trace, "Failure") +``` + +### Trace Names and Tags + +You can provide meaningful names and tags when starting traces: + +```python +# Start a trace with custom name and tags +trace = agentops.start_trace( + trace_name="customer-service-workflow", + tags=["customer-123", "priority-high", "support"] +) +``` + +### Batch Processing with Selective Trace Ending + +For batch processing scenarios, you can selectively end traces based on processing results: + +```python +import agentops + +agentops.init("your-api-key", auto_start_session=False) + +# Start traces for batch items +batch_traces = [] +for i, item in enumerate(batch_items): + trace = agentops.start_trace(f"batch_item_{i+1}") + batch_traces.append((trace, item)) + +# Process each item and end traces individually +for trace, item in batch_traces: + try: + result = process_item(item) + if result.success: + agentops.end_trace(trace, "Success") + else: + agentops.end_trace(trace, "Failure") + except Exception as e: + agentops.end_trace(trace, "Error") +``` + +## Emergency Shutdown + +### Ending All Active Traces + +In emergency situations or when you need to quickly shut down all active traces, you can end all traces at once: + +```python +# End all active traces with emergency state +agentops.end_trace(end_state="Emergency_Shutdown") + +# Or with a different state +agentops.end_trace(end_state="Timeout") +``` + +This is particularly useful for: +- Timeout scenarios +- Resource exhaustion +- Critical errors requiring immediate shutdown +- Graceful application termination + +## Common End States + +AgentOps supports various end states to categorize how traces completed: + +```python +# Success states +agentops.end_trace(trace, "Success") +agentops.end_trace(trace, "Completed") + +# Failure states +agentops.end_trace(trace, "Failure") +agentops.end_trace(trace, "Error") +agentops.end_trace(trace, "Timeout") + +# Business logic states +agentops.end_trace(trace, "Invalid_Input") +agentops.end_trace(trace, "Rate_Limited") +agentops.end_trace(trace, "Unauthorized") + +# Custom states +agentops.end_trace(trace, "Custom_Business_State") +``` + +## Integration with Decorators + +Manual trace control works seamlessly with AgentOps decorators: + +```python +import agentops +from agentops.sdk.decorators import agent, operation, tool + +agentops.init("your-api-key", auto_start_session=False) + +@agent +class CustomerServiceAgent: + @operation + def analyze_request(self, request): + return f"Analyzed: {request}" + + @tool(cost=0.02) + def lookup_customer(self, customer_id): + return f"Customer data for {customer_id}" + +# Manual trace with decorated components +trace = agentops.start_trace("customer-service") + +try: + agent = CustomerServiceAgent() + customer_data = agent.lookup_customer("CUST_123") + analysis = agent.analyze_request("billing issue") + + agentops.end_trace(trace, "Success") +except Exception as e: + agentops.end_trace(trace, "Error") +``` + +## Best Practices + +### 1. Always End Traces + +Ensure every started trace is properly ended to avoid resource leaks: + +```python +trace = agentops.start_trace("my-workflow") +try: + # Your code here + agentops.end_trace(trace, "Success") +except Exception as e: + agentops.end_trace(trace, "Error") +finally: + # Ensure trace is ended even if something unexpected happens + # (This is redundant if you properly handle exceptions above) + pass +``` + +### 2. Use Meaningful Names and States + +Choose descriptive names and appropriate end states: + +```python +# Good +trace = agentops.start_trace("user-authentication-flow") +agentops.end_trace(trace, "Authentication_Failed") + +# Less descriptive +trace = agentops.start_trace("trace1") +agentops.end_trace(trace, "Failed") +``` + +### 3. Use Emergency Shutdown Sparingly + +Reserve emergency shutdown for truly exceptional circumstances: + +```python +try: + # Normal processing + for item in large_batch: + trace = agentops.start_trace(f"item-{item.id}") + process_item(item) + agentops.end_trace(trace, "Success") +except KeyboardInterrupt: + # User interrupted - emergency shutdown + agentops.end_trace(end_state="User_Interrupted") +except MemoryError: + # System resource exhaustion - emergency shutdown + agentops.end_trace(end_state="Resource_Exhausted") +``` + +## Real-World Example + +Here's a comprehensive example showing manual trace control in a customer service application: + +```python +import agentops +from agentops.sdk.decorators import agent, operation, tool +from openai import OpenAI + +agentops.init("your-api-key", auto_start_session=False) +client = OpenAI() + +@agent +class CustomerServiceAgent: + @operation + def analyze_sentiment(self, text): + response = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": f"Analyze sentiment: {text}"}] + ) + return response.choices[0].message.content.strip() + + @tool(cost=0.01) + def lookup_order(self, order_id): + # Simulate order lookup + return f"Order {order_id} details" + +def process_customer_requests(requests): + """Process multiple customer requests with individual trace tracking""" + + # Start traces for each request + request_traces = [] + for i, request in enumerate(requests): + trace = agentops.start_trace( + f"customer_request_{i+1}", + tags=["customer-service", request.get("priority", "normal")] + ) + request_traces.append((trace, request)) + + agent = CustomerServiceAgent() + results = [] + + # Process each request + for trace, request in request_traces: + try: + # Analyze the request + sentiment = agent.analyze_sentiment(request["message"]) + + # Look up order if needed + if "order" in request: + order_info = agent.lookup_order(request["order"]) + + # Determine success based on sentiment + if "positive" in sentiment.lower() or "neutral" in sentiment.lower(): + agentops.end_trace(trace, "Success") + results.append({"status": "resolved", "sentiment": sentiment}) + else: + agentops.end_trace(trace, "Escalation_Required") + results.append({"status": "escalated", "sentiment": sentiment}) + + except Exception as e: + agentops.end_trace(trace, "Error") + results.append({"status": "error", "error": str(e)}) + + return results + +# Example usage +customer_requests = [ + {"message": "I love this product!", "priority": "low"}, + {"message": "My order is completely wrong!", "order": "12345", "priority": "high"}, + {"message": "When will my package arrive?", "order": "67890", "priority": "normal"} +] + +results = process_customer_requests(customer_requests) +print(f"Processed {len(results)} customer requests") +``` + +This example demonstrates: +- Individual trace management for each customer request +- Integration with decorated agents and tools +- Different end states based on business logic +- Proper error handling with appropriate trace states +- Use of tags for categorization + + + + + \ No newline at end of file diff --git a/docs/v2/usage/recording-operations.mdx b/docs/v2/usage/recording-operations.mdx index 93ebed9e9..df6510c9b 100644 --- a/docs/v2/usage/recording-operations.mdx +++ b/docs/v2/usage/recording-operations.mdx @@ -21,6 +21,15 @@ That's it! This single line of code will: - Intercept and track all LLM calls to supported providers (OpenAI, Anthropic, etc.) - Record relevant metrics such as token counts, costs, and response times +You can also set a custom trace name during initialization: + +```python +import agentops + +# Initialize with custom trace name +agentops.init("your-api-key", trace_name="my-custom-workflow") +``` + ## Automatic Instrumentation AgentOps automatically instruments calls to popular LLM providers without requiring any additional code: @@ -86,24 +95,59 @@ def research_workflow(topic): results = research_workflow("quantum computing") ``` -### Advanced Session Management +### `@tool` Decorator + +Track tool usage and costs with the `@tool` decorator. You can specify costs to get total cost tracking directly in your dashboard summary: + +```python +from agentops.sdk.decorators import tool + +@tool(cost=0.05) +def web_search(query): + # Tool implementation + return f"Search results for: {query}" + +@tool +def calculator(expression): + # Tool without cost tracking + return eval(expression) +``` + +### `@trace` Decorator -If you need more control over session lifecycle, you can disable automatic session creation: +Create custom traces to group related operations: + +```python +from agentops.sdk.decorators import trace, agent, operation + +@trace +def customer_service_workflow(customer_id): + agent = ResearchAgent() + results = agent.search(f"customer {customer_id}") + return results +``` + +### Manual Trace Management + +For more control over trace lifecycle, you can manage traces manually: ```python import agentops -from agentops.sdk.decorators import session -# Disable automatic session creation +# Initialize without automatic session creation agentops.init("your-api-key", auto_start_session=False) -@session -def my_workflow(): - # Your code here - pass - -# Run the workflow to create a session -my_workflow() +# Start a trace manually +trace = agentops.start_trace("my-custom-trace") + +# Your code here +# ... + +# End the trace with a specific state +agentops.end_trace(trace, "Success") + +# Or end all active traces +agentops.end_trace(end_state="Emergency_Shutdown") ``` ## Best Practices @@ -114,6 +158,10 @@ my_workflow() 3. **Meaningful Operation Names**: When using decorators, choose descriptive names to make them easier to identify in the dashboard. +4. **Cost Tracking**: Use the `@tool` decorator with cost parameters to track tool usage costs in your dashboard. + +5. **Trace Management**: Use manual trace management when you need precise control over trace boundaries and states. + diff --git a/docs/v2/usage/sdk-reference.mdx b/docs/v2/usage/sdk-reference.mdx index 6852bc7e9..4eedec73a 100644 --- a/docs/v2/usage/sdk-reference.mdx +++ b/docs/v2/usage/sdk-reference.mdx @@ -33,6 +33,7 @@ Initializes the AgentOps SDK and automatically starts tracking your application. - `fail_safe` (bool, optional): Whether to suppress errors and continue execution when possible. Defaults to False. - `exporter_endpoint` (str, optional): Endpoint for the exporter. If not provided, will be read from the `AGENTOPS_EXPORTER_ENDPOINT` environment variable. Defaults to 'https://otlp.agentops.ai/v1/traces'. - `export_flush_interval` (int, optional): Time interval in milliseconds between automatic exports of telemetry data. Defaults to 1000. +- `trace_name` (str, optional): Custom name for the automatically created trace. If not provided, a default name will be used. **Returns**: @@ -45,6 +46,9 @@ import agentops # Basic initialization with automatic session creation agentops.init("your-api-key") + +# Initialize with custom trace name +agentops.init("your-api-key", trace_name="my-workflow") ``` ### `configure()` @@ -70,6 +74,7 @@ Updates client configuration after initialization. Supports the same parameters - `processor` (object, optional): Custom span processor for OpenTelemetry trace data. - `exporter_endpoint` (str, optional): Endpoint for the exporter. - `export_flush_interval` (int, optional): Time interval in milliseconds between automatic exports of telemetry data. +- `trace_name` (str, optional): Custom name for traces. **Example**: @@ -83,7 +88,8 @@ agentops.init() agentops.configure( max_wait_time=10000, max_queue_size=200, - default_tags=["production", "gpt-4"] + default_tags=["production", "gpt-4"], + trace_name="production-workflow" ) ``` @@ -95,17 +101,22 @@ Gets the singleton client instance. Most users won't need to use this function d - The AgentOps client instance. -## Session Management +## Trace Management -These functions help you manage the lifecycle of tracking sessions. +These functions help you manage the lifecycle of tracking traces. -### `start_session()` +### `start_trace()` -Starts a new AgentOps session manually. This is useful when you've disabled automatic session creation or need multiple separate sessions. +Starts a new AgentOps trace manually. This is useful when you've disabled automatic session creation or need multiple separate traces. **Parameters**: -- `tags` (Union[Dict[str, Any], List[str]], optional): Optional tags to attach to the session, useful for filtering in the dashboard. Can be a list of strings or a dict of key-value pairs. +- `trace_name` (str, optional): Name for the trace. If not provided, a default name will be used. +- `tags` (Union[Dict[str, Any], List[str]], optional): Optional tags to attach to the trace, useful for filtering in the dashboard. Can be a list of strings or a dict of key-value pairs. + +**Returns**: + +- TraceContext object representing the started trace. **Example**: @@ -115,44 +126,85 @@ import agentops # Initialize without auto-starting a session agentops.init("your-api-key", auto_start_session=False) -# Later, manually start a session -session = agentops.start_session(tags=["customer-query"]) +# Start a trace manually +trace = agentops.start_trace("customer-service-workflow", tags=["customer-query"]) +``` + +### `end_trace()` + +Ends a specific trace or all active traces. + +**Parameters**: + +- `trace` (TraceContext, optional): The specific trace to end. If not provided, all active traces will be ended. +- `end_state` (str, optional): The end state for the trace(s). Common values include "Success", "Failure", "Error", "Timeout", etc. + +**Example**: + +```python +import agentops + +# End a specific trace +trace = agentops.start_trace("my-workflow") +# ... your code ... +agentops.end_trace(trace, "Success") + +# End all active traces +agentops.end_trace(end_state="Emergency_Shutdown") ``` + ## Decorators for Detailed Instrumentation For more granular control, AgentOps provides decorators that explicitly track different components of your application. These decorators are imported from `agentops.sdk.decorators`. ```python import agentops -from agentops.sdk.decorators import session, agent, operation, workflow, task +from agentops.sdk.decorators import trace, agent, operation, tool # Initialize without automatic session creation agentops.init("your-api-key", auto_start_session=False) -# Create and run a session using the decorator -@session +# Create and run a trace using the decorator +@trace def my_workflow(): # Your workflow code here pass -# Run the workflow, which creates and manages the session +# Run the workflow, which creates and manages the trace my_workflow() ``` ### Available Decorators -- `@session`: Creates a session span, which serves as the root for all other spans +- `@trace`: Creates a trace span for grouping related operations - `@agent`: Creates an agent span for tracking agent operations - `@operation` / `@task`: Creates operation/task spans for tracking specific operations (these are aliases) - `@workflow`: Creates workflow spans for organizing related operations - `@tool`: Creates tool spans for tracking tool usage and cost in agent operations. Supports cost parameter for tracking tool usage costs. +**Tool Decorator Example**: + +```python +from agentops.sdk.decorators import tool + +@tool(cost=0.05) +def web_search(query): + # Tool implementation with cost tracking + return f"Search results for: {query}" + +@tool +def calculator(expression): + # Tool without cost tracking + return eval(expression) +``` + See [Decorators](/v2/concepts/decorators) for more detailed documentation on using these decorators. ## Legacy Functions The following functions are maintained for backward compatibility with older versions of the SDK and integrations. New code should use the functions and decorators described above instead. +- `start_session()`: Legacy function for starting sessions. Use `start_trace()` instead. - `record(event)`: Legacy function to record an event. Replaced by decorator-based tracing. - `track_agent()`: Legacy decorator for marking agents. Replaced by the `@agent` decorator. - `track_tool()`: Legacy decorator for marking tools. Replaced by the `@tool` decorator. diff --git a/docs/v2/usage/trace-decorator.mdx b/docs/v2/usage/trace-decorator.mdx new file mode 100644 index 000000000..b12e54580 --- /dev/null +++ b/docs/v2/usage/trace-decorator.mdx @@ -0,0 +1,453 @@ +--- +title: "Trace Decorator" +description: "Create custom traces with the @trace decorator" +--- + +AgentOps 0.4.13 introduces the `@trace` decorator, which provides a clean and intuitive way to create custom traces for grouping related operations. This decorator serves as a replacement for the legacy `@session` decorator and offers more flexibility for organizing your application's telemetry data. + +## Basic Usage + +### Simple Trace Creation + +The `@trace` decorator automatically creates a trace span that encompasses the entire function execution: + +```python +from agentops.sdk.decorators import trace +import agentops + +# Initialize AgentOps +agentops.init("your-api-key", auto_start_session=False) + +@trace +def my_workflow(): + """A simple workflow wrapped in a trace""" + print("Executing workflow...") + # Your application logic here + return "Workflow completed" + +# Run the function - this creates and manages the trace automatically +result = my_workflow() +``` + +### Custom Trace Names + +You can specify custom names for your traces: + +```python +@trace(name="customer-onboarding-flow") +def onboard_customer(customer_data): + """Customer onboarding process""" + # Process customer data + return f"Onboarded customer: {customer_data['name']}" + +@trace(name="data-processing-pipeline") +def process_data(input_data): + """Data processing workflow""" + # Process the data + return f"Processed {len(input_data)} items" +``` + +### Adding Tags to Traces + +Tags help categorize and filter traces in your dashboard: + +```python +@trace(tags=["production", "high-priority"]) +def critical_workflow(): + """Critical production workflow""" + return "Critical task completed" + +@trace(name="user-analysis", tags=["analytics", "user-behavior"]) +def analyze_user_behavior(user_id): + """Analyze user behavior patterns""" + return f"Analysis for user {user_id}" +``` + +## Integration with Other Decorators + +### Combining with Agent and Operation Decorators + +The `@trace` decorator works seamlessly with other AgentOps decorators: + +```python +from agentops.sdk.decorators import trace, agent, operation, tool + +@agent +class DataAnalysisAgent: + @operation + def collect_data(self, source): + return f"Data collected from {source}" + + @tool(cost=0.05) + def analyze_data(self, data): + return f"Analysis of {data}" + + @operation + def generate_report(self, analysis): + return f"Report: {analysis}" + +@trace(name="complete-analysis-workflow") +def run_analysis_workflow(data_source): + """Complete data analysis workflow""" + agent = DataAnalysisAgent() + + # Collect data + data = agent.collect_data(data_source) + + # Analyze data + analysis = agent.analyze_data(data) + + # Generate report + report = agent.generate_report(analysis) + + return { + "source": data_source, + "report": report + } + +# Usage +result = run_analysis_workflow("customer_database") +``` + +## Async Function Support + +The `@trace` decorator fully supports async functions: + +```python +import asyncio +from agentops.sdk.decorators import trace, operation + +@operation +async def fetch_user_data(user_id): + """Simulate async data fetching""" + await asyncio.sleep(1) # Simulate API call + return f"User data for {user_id}" + +@operation +async def process_user_data(user_data): + """Simulate async data processing""" + await asyncio.sleep(0.5) # Simulate processing + return f"Processed: {user_data}" + +@trace(name="async-user-workflow") +async def async_user_workflow(user_id): + """Async workflow for user processing""" + user_data = await fetch_user_data(user_id) + processed_data = await process_user_data(user_data) + return processed_data + +# Usage +async def main(): + result = await async_user_workflow("user_123") + print(result) + +# Run the async workflow +asyncio.run(main()) +``` + +## Error Handling and Trace States + +### Automatic Error Handling + +The `@trace` decorator automatically handles exceptions and sets appropriate trace states: + +```python +@trace(name="error-prone-workflow") +def risky_operation(): + """Operation that might fail""" + import random + + if random.random() < 0.5: + raise ValueError("Random failure occurred") + + return "Operation succeeded" + +# The trace will automatically be marked with failure state if an exception occurs +try: + result = risky_operation() + print(f"Success: {result}") +except ValueError as e: + print(f"Operation failed: {e}") + # Trace is automatically ended with error state +``` + +### Custom Error Handling + +You can implement custom error handling within traced functions: + +```python +@trace(name="robust-workflow") +def robust_operation(data): + """Operation with custom error handling""" + try: + # Risky operation + if not data: + raise ValueError("No data provided") + + # Process data + result = f"Processed: {data}" + return {"success": True, "result": result} + + except ValueError as e: + # Handle specific errors + return {"success": False, "error": str(e)} + except Exception as e: + # Handle unexpected errors + return {"success": False, "error": f"Unexpected error: {str(e)}"} + +# Usage +result1 = robust_operation("valid_data") # Success trace +result2 = robust_operation("") # Failure trace with custom handling +``` + +## Real-World Examples + +### E-commerce Order Processing + +```python +from agentops.sdk.decorators import trace, agent, operation, tool +import agentops + +agentops.init("your-api-key", auto_start_session=False) + +@agent +class OrderProcessor: + @tool(cost=0.01) + def validate_payment(self, payment_info): + """Payment validation service""" + return {"valid": True, "transaction_id": "txn_123"} + + @tool(cost=0.02) + def check_inventory(self, product_id, quantity): + """Inventory check service""" + return {"available": True, "reserved": quantity} + + @operation + def calculate_shipping(self, address, items): + """Calculate shipping costs""" + return {"cost": 9.99, "method": "standard"} + + @tool(cost=0.005) + def send_confirmation_email(self, email, order_details): + """Email service""" + return f"Confirmation sent to {email}" + +@trace(name="order-processing", tags=["ecommerce", "orders"]) +def process_order(order_data): + """Complete order processing workflow""" + processor = OrderProcessor() + + try: + # Validate payment + payment_result = processor.validate_payment(order_data["payment"]) + if not payment_result["valid"]: + return {"success": False, "error": "Payment validation failed"} + + # Check inventory for all items + for item in order_data["items"]: + inventory_result = processor.check_inventory( + item["product_id"], + item["quantity"] + ) + if not inventory_result["available"]: + return {"success": False, "error": f"Item {item['product_id']} not available"} + + # Calculate shipping + shipping = processor.calculate_shipping( + order_data["shipping_address"], + order_data["items"] + ) + + # Send confirmation + confirmation = processor.send_confirmation_email( + order_data["customer_email"], + { + "items": order_data["items"], + "shipping": shipping, + "payment": payment_result + } + ) + + return { + "success": True, + "order_id": "ORD_12345", + "payment": payment_result, + "shipping": shipping, + "confirmation": confirmation + } + + except Exception as e: + return {"success": False, "error": str(e)} + +# Usage +order = { + "customer_email": "customer@example.com", + "payment": {"card": "****1234", "amount": 99.99}, + "items": [{"product_id": "PROD_001", "quantity": 2}], + "shipping_address": {"city": "New York", "state": "NY"} +} + +result = process_order(order) +``` + +### Machine Learning Pipeline + +```python +from agentops.sdk.decorators import trace, operation, tool + +@tool(cost=0.10) +def load_dataset(dataset_path): + """Data loading service""" + return f"Dataset loaded from {dataset_path}" + +@operation +def preprocess_data(raw_data): + """Data preprocessing""" + return f"Preprocessed: {raw_data}" + +@tool(cost=0.50) +def train_model(processed_data, model_config): + """Model training service""" + return { + "model_id": "model_v1.0", + "accuracy": 0.95, + "training_time": "2h 30m" + } + +@operation +def evaluate_model(model, test_data): + """Model evaluation""" + return { + "accuracy": 0.94, + "precision": 0.93, + "recall": 0.95 + } + +@tool(cost=0.05) +def deploy_model(model, deployment_config): + """Model deployment service""" + return { + "endpoint": "https://api.example.com/model/v1", + "status": "deployed" + } + +@trace(name="ml-pipeline", tags=["machine-learning", "training"]) +def ml_training_pipeline(dataset_path, model_config, deployment_config): + """Complete ML training and deployment pipeline""" + + # Load and preprocess data + raw_data = load_dataset(dataset_path) + processed_data = preprocess_data(raw_data) + + # Train model + model = train_model(processed_data, model_config) + + # Evaluate model + evaluation = evaluate_model(model, processed_data) + + # Deploy if evaluation meets criteria + if evaluation["accuracy"] > 0.90: + deployment = deploy_model(model, deployment_config) + return { + "success": True, + "model": model, + "evaluation": evaluation, + "deployment": deployment + } + else: + return { + "success": False, + "reason": "Model accuracy below threshold", + "evaluation": evaluation + } + +# Usage +pipeline_result = ml_training_pipeline( + dataset_path="/data/training_set.csv", + model_config={"algorithm": "random_forest", "max_depth": 10}, + deployment_config={"environment": "production", "replicas": 3} +) +``` + +## Best Practices + +### 1. Use Meaningful Names + +Choose descriptive names that clearly indicate what the trace represents: + +```python +# Good +@trace(name="user-authentication-flow") +def authenticate_user(credentials): + pass + +@trace(name="payment-processing-pipeline") +def process_payment(payment_data): + pass + +# Less descriptive +@trace(name="trace1") +def some_function(): + pass +``` + +### 2. Add Relevant Tags + +Use tags to categorize traces for easier filtering and analysis: + +```python +@trace(name="order-fulfillment", tags=["ecommerce", "fulfillment", "high-priority"]) +def fulfill_order(order_id): + pass + +@trace(name="data-sync", tags=["background-job", "data-processing"]) +def sync_data(): + pass +``` + +### 3. Keep Traces Focused + +Each trace should represent a logical unit of work: + +```python +# Good - focused on a single workflow +@trace(name="customer-onboarding") +def onboard_customer(customer_data): + validate_customer(customer_data) + create_account(customer_data) + send_welcome_email(customer_data) + +# Less focused - mixing different concerns +@trace(name="mixed-operations") +def do_everything(): + onboard_customer(data1) + process_orders(data2) + generate_reports(data3) +``` + +### 4. Handle Errors Appropriately + +Implement proper error handling within traced functions: + +```python +@trace(name="data-processing") +def process_data(data): + try: + # Main processing logic + result = complex_processing(data) + return {"success": True, "result": result} + except ValidationError as e: + # Expected errors + return {"success": False, "error": "validation_failed", "details": str(e)} + except Exception as e: + # Unexpected errors + logger.error(f"Unexpected error in data processing: {e}") + return {"success": False, "error": "processing_failed"} +``` + +The `@trace` decorator provides a powerful and flexible way to organize your application's telemetry data. By creating logical groupings of operations, you can better understand your application's behavior and performance characteristics in the AgentOps dashboard. + + + + + \ No newline at end of file diff --git a/docs/v2/usage/tracking-agents.mdx b/docs/v2/usage/tracking-agents.mdx index 7ee48f3fa..096ecf54c 100644 --- a/docs/v2/usage/tracking-agents.mdx +++ b/docs/v2/usage/tracking-agents.mdx @@ -35,11 +35,11 @@ For more structured tracking in complex applications, you can use the `@agent` d ```python import agentops -from agentops.sdk.decorators import agent, operation +from agentops.sdk.decorators import agent, operation, trace from openai import OpenAI # Initialize AgentOps -agentops.init("your-api-key") +agentops.init("your-api-key", auto_start_session=False) # Create a decorated agent class @agent(name='ResearchAgent') @@ -55,7 +55,8 @@ class MyAgent: ) return response.choices[0].message.content -# Create and use the agent within a function +# Create a trace to group the agent operations +@trace(name="research-workflow") def research_workflow(topic): agent = MyAgent() result = agent.search(topic) @@ -74,6 +75,121 @@ class ResearchAgent: pass ``` +## Multi-Agent Systems + +For complex multi-agent systems, you can organize multiple agents within a single trace: + +```python +from agentops.sdk.decorators import agent, operation, tool, trace + +@agent +class DataCollectionAgent: + @tool(cost=0.02) + def fetch_data(self, source): + return f"Data from {source}" + +@agent +class AnalysisAgent: + @operation + def analyze_data(self, data): + return f"Analysis of {data}" + +@agent +class ReportingAgent: + @tool(cost=0.01) + def generate_report(self, analysis): + return f"Report: {analysis}" + +@trace(name="multi-agent-workflow") +def collaborative_workflow(data_source): + """Workflow using multiple specialized agents""" + + # Data collection + collector = DataCollectionAgent() + raw_data = collector.fetch_data(data_source) + + # Analysis + analyzer = AnalysisAgent() + analysis = analyzer.analyze_data(raw_data) + + # Reporting + reporter = ReportingAgent() + report = reporter.generate_report(analysis) + + return { + "source": data_source, + "analysis": analysis, + "report": report + } + +# Run the collaborative workflow +result = collaborative_workflow("customer_database") +``` + +## Agent Communication and Coordination + +You can track complex agent interactions and communication patterns: + +```python +from agentops.sdk.decorators import agent, operation, tool, trace + +@agent +class CoordinatorAgent: + def __init__(self): + self.task_queue = [] + + @operation + def assign_task(self, task, agent_type): + self.task_queue.append({"task": task, "agent": agent_type}) + return f"Task assigned to {agent_type}: {task}" + + @operation + def collect_results(self, results): + return f"Collected {len(results)} results" + +@agent +class WorkerAgent: + def __init__(self, agent_id): + self.agent_id = agent_id + + @tool(cost=0.05) + def process_task(self, task): + return f"Agent {self.agent_id} processed: {task}" + +@trace(name="coordinated-processing") +def coordinated_processing_workflow(tasks): + """Workflow with agent coordination""" + coordinator = CoordinatorAgent() + workers = [WorkerAgent(f"worker_{i}") for i in range(3)] + + # Assign tasks + assignments = [] + for i, task in enumerate(tasks): + worker_type = f"worker_{i % len(workers)}" + assignment = coordinator.assign_task(task, worker_type) + assignments.append(assignment) + + # Process tasks + results = [] + for i, task in enumerate(tasks): + worker = workers[i % len(workers)] + result = worker.process_task(task) + results.append(result) + + # Collect results + summary = coordinator.collect_results(results) + + return { + "assignments": assignments, + "results": results, + "summary": summary + } + +# Run coordinated workflow +tasks = ["analyze_data", "generate_report", "send_notification"] +result = coordinated_processing_workflow(tasks) +``` + ## Dashboard Visualization All operations are automatically associated with the agent that originated them. Agents are given a name which is what you will see in the dashboard. @@ -88,4 +204,34 @@ All operations are automatically associated with the agent that originated them. 2. **Use Decorators When Needed**: Add the `@agent` decorator when you need to clearly distinguish between multiple agents in your system. -3. **Meaningful Names**: Choose descriptive names for your agents to make them easier to identify in the dashboard. \ No newline at end of file +3. **Meaningful Names**: Choose descriptive names for your agents to make them easier to identify in the dashboard. + +4. **Organize with Traces**: Use the `@trace` decorator to group related agent operations into logical workflows. + +5. **Track Costs**: Use the `@tool` decorator with cost parameters to track the expenses associated with agent operations. + +6. **Agent Specialization**: Create specialized agents for different types of tasks to improve observability and maintainability. + +## Migration from Session Decorator + +If you're migrating from the legacy `@session` decorator, replace it with the `@trace` decorator: + +```python +# Old approach (deprecated) +from agentops.sdk.decorators import session, agent + +@session +def my_workflow(): + # workflow code + pass + +# New approach (recommended) +from agentops.sdk.decorators import trace, agent + +@trace(name="my-workflow") +def my_workflow(): + # workflow code + pass +``` + +The `@trace` decorator provides the same functionality as the legacy `@session` decorator but with more flexibility and better integration with the new trace management features. \ No newline at end of file From e7e1d34f9dfa3ed1cdc8e235d1303a0e5203060c Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Thu, 29 May 2025 02:09:30 +0530 Subject: [PATCH 2/4] Enhance documentation for trace management and agent tracking - Updated `introduction.mdx` to clarify the use of the `@trace` decorator for custom trace creation. - Revised `quickstart.mdx` to include examples of initializing AgentOps with manual trace control. - Improved `examples.mdx` by updating image links to use raw GitHub URLs for better accessibility. - Enhanced `manual-trace-control.mdx` to provide detailed examples of managing traces manually. - Updated `recording-operations.mdx` to emphasize the use of the `@trace` decorator for grouping operations. - Refined `sdk-reference.mdx` to clarify parameters for ending traces and the use of decorators. - Improved `tracking-agents.mdx` to demonstrate structured tracking with the `@agent` decorator in multi-agent systems. These changes improve the clarity and usability of the AgentOps SDK, particularly around trace management and agent tracking. --- docs/v2/examples/examples.mdx | 20 +- docs/v2/introduction.mdx | 15 +- docs/v2/quickstart.mdx | 4 +- docs/v2/usage/manual-trace-control.mdx | 152 ++-------- docs/v2/usage/recording-operations.mdx | 35 +-- docs/v2/usage/sdk-reference.mdx | 6 +- docs/v2/usage/trace-decorator.mdx | 379 +++++++++++++++++++------ docs/v2/usage/tracking-agents.mdx | 76 ++--- 8 files changed, 381 insertions(+), 306 deletions(-) diff --git a/docs/v2/examples/examples.mdx b/docs/v2/examples/examples.mdx index 7fca0428d..b36f990d5 100644 --- a/docs/v2/examples/examples.mdx +++ b/docs/v2/examples/examples.mdx @@ -13,7 +13,7 @@ mode: "wide" Tracking operations from multiple different agents - } iconType="image" href="/v2/examples/openai_assistants"> + } iconType="image" href="/v2/examples/openai_assistants"> Observe OpenAI Assistants with AgentOps @@ -21,7 +21,7 @@ mode: "wide" Basic usage with OpenAI API, perfect for getting started - } iconType="image" href="/v2/integrations/agentssdk"> + } iconType="image" href="/v2/integrations/agentssdk"> Monitor multi-agent workflows with handoffs and tool usage @@ -29,11 +29,11 @@ mode: "wide" ### Other Integrations Documentation - } iconType="image" href="/v2/integrations/anthropic"> + } iconType="image" href="/v2/integrations/anthropic"> Track observations from Claude, Haiku and Sonnet series of models - } iconType="image" href="/v2/integrations/autogen"> + } iconType="image" href="/v2/integrations/autogen"> Autogen/AG2 multi-agent conversible workflow with tool usage @@ -41,19 +41,19 @@ mode: "wide" CrewAI multi-agent framework with AgentOps support - } iconType="image" href="/v2/integrations/gemini"> + } iconType="image" href="/v2/integrations/gemini"> Explore Google DeepMind's Gemini with observation via AgentOps - } iconType="image" href="/v2/integrations/google_adk"> + } iconType="image" href="/v2/integrations/google_adk"> Track and analyze your Google Agent Development Kit (ADK) AI agents - } iconType="image" href="/v2/integrations/ibm_watsonx_ai"> + } iconType="image" href="/v2/integrations/ibm_watsonx_ai"> Track and analyze your IBM Watsonx.ai model interactions - } iconType="image" href="/v2/integrations/langchain"> + } iconType="image" href="/v2/integrations/langchain"> First-class support for LangChain agents and chains @@ -61,11 +61,11 @@ mode: "wide" Unified interface for multiple LLM providers - } iconType="image" href="/v2/integrations/openai"> + } iconType="image" href="/v2/integrations/openai"> First class support for GPT family of models - } iconType="image" href="/v2/integrations/agentssdk"> + } iconType="image" href="/v2/integrations/agentssdk"> Monitor OpenAI Agents SDK multi-agent workflows with handoffs diff --git a/docs/v2/introduction.mdx b/docs/v2/introduction.mdx index 796e03b21..dd9a2bd71 100644 --- a/docs/v2/introduction.mdx +++ b/docs/v2/introduction.mdx @@ -37,21 +37,20 @@ Observability and monitoring for your AI agents and LLM apps. And we do it all i That's it! AgentOps will automatically instrument your code and start tracking traces. -Need more control? You can disable automatic session creation and manage traces manually: +Need more control? You can create custom traces using the `@trace` decorator (recommended) or manage traces manually for advanced use cases: ```python python import agentops - agentops.init(, auto_start_session=False) + from agentops.sdk.decorators import trace - # Later, when you're ready to start a trace: - trace = agentops.start_trace("my-workflow-trace") + agentops.init(, auto_start_session=False) - # Your code here - # ... + @trace(name="my-workflow", tags=["production"]) + def my_workflow(): + # Your code here + return "Workflow completed" - # End the trace when done - agentops.end_trace(trace, "Success") ``` diff --git a/docs/v2/quickstart.mdx b/docs/v2/quickstart.mdx index 3339b0cb8..86ef56c0c 100644 --- a/docs/v2/quickstart.mdx +++ b/docs/v2/quickstart.mdx @@ -134,7 +134,7 @@ Here is the complete code from the sections above import agentops from agentops.sdk.decorators import agent, operation, tool, trace -# Initialize AgentOps +# Initialize AgentOps without auto-starting session since we use @trace agentops.init(, auto_start_session=False) # Create a tool with cost tracking @@ -154,7 +154,7 @@ class MyAgent: search_results = web_search(f"research {task}") return f"Completed {task} with results: {search_results}" -@trace +@trace(name="research-workflow", tags=["research", "analysis"]) def run_agent_task(task_name): agent = MyAgent("research-agent") result = agent.perform_task(task_name) diff --git a/docs/v2/usage/manual-trace-control.mdx b/docs/v2/usage/manual-trace-control.mdx index bc2524c11..7086904bb 100644 --- a/docs/v2/usage/manual-trace-control.mdx +++ b/docs/v2/usage/manual-trace-control.mdx @@ -3,8 +3,6 @@ title: "Manual Trace Control" description: "Advanced trace management with start_trace and end_trace methods" --- -AgentOps 0.4.13 introduces powerful manual trace control capabilities, allowing you to precisely manage trace lifecycles and states. This is particularly useful for complex workflows, batch processing, and scenarios requiring fine-grained control over trace boundaries. - ## Basic Manual Trace Control ### Starting and Ending Traces @@ -50,19 +48,28 @@ For batch processing scenarios, you can selectively end traces based on processi ```python import agentops +# Initialize AgentOps agentops.init("your-api-key", auto_start_session=False) +# Sample batch items to process +batch_items = [ + {"id": 1, "data": "item_1_data", "valid": True}, + {"id": 2, "data": "item_2_data", "valid": False}, + {"id": 3, "data": "item_3_data", "valid": True}, +] +@agentops.operation(name="process_item") +def process_item(item): + """Simulate processing an item""" + if not item.get("valid", False): + raise ValueError(f"Invalid item: {item['id']}") + return {"processed": True, "result": f"Processed {item['data']}"} + # Start traces for batch items -batch_traces = [] for i, item in enumerate(batch_items): trace = agentops.start_trace(f"batch_item_{i+1}") - batch_traces.append((trace, item)) - -# Process each item and end traces individually -for trace, item in batch_traces: try: result = process_item(item) - if result.success: + if result.get("processed"): agentops.end_trace(trace, "Success") else: agentops.end_trace(trace, "Failure") @@ -70,49 +77,6 @@ for trace, item in batch_traces: agentops.end_trace(trace, "Error") ``` -## Emergency Shutdown - -### Ending All Active Traces - -In emergency situations or when you need to quickly shut down all active traces, you can end all traces at once: - -```python -# End all active traces with emergency state -agentops.end_trace(end_state="Emergency_Shutdown") - -# Or with a different state -agentops.end_trace(end_state="Timeout") -``` - -This is particularly useful for: -- Timeout scenarios -- Resource exhaustion -- Critical errors requiring immediate shutdown -- Graceful application termination - -## Common End States - -AgentOps supports various end states to categorize how traces completed: - -```python -# Success states -agentops.end_trace(trace, "Success") -agentops.end_trace(trace, "Completed") - -# Failure states -agentops.end_trace(trace, "Failure") -agentops.end_trace(trace, "Error") -agentops.end_trace(trace, "Timeout") - -# Business logic states -agentops.end_trace(trace, "Invalid_Input") -agentops.end_trace(trace, "Rate_Limited") -agentops.end_trace(trace, "Unauthorized") - -# Custom states -agentops.end_trace(trace, "Custom_Business_State") -``` - ## Integration with Decorators Manual trace control works seamlessly with AgentOps decorators: @@ -146,58 +110,6 @@ except Exception as e: agentops.end_trace(trace, "Error") ``` -## Best Practices - -### 1. Always End Traces - -Ensure every started trace is properly ended to avoid resource leaks: - -```python -trace = agentops.start_trace("my-workflow") -try: - # Your code here - agentops.end_trace(trace, "Success") -except Exception as e: - agentops.end_trace(trace, "Error") -finally: - # Ensure trace is ended even if something unexpected happens - # (This is redundant if you properly handle exceptions above) - pass -``` - -### 2. Use Meaningful Names and States - -Choose descriptive names and appropriate end states: - -```python -# Good -trace = agentops.start_trace("user-authentication-flow") -agentops.end_trace(trace, "Authentication_Failed") - -# Less descriptive -trace = agentops.start_trace("trace1") -agentops.end_trace(trace, "Failed") -``` - -### 3. Use Emergency Shutdown Sparingly - -Reserve emergency shutdown for truly exceptional circumstances: - -```python -try: - # Normal processing - for item in large_batch: - trace = agentops.start_trace(f"item-{item.id}") - process_item(item) - agentops.end_trace(trace, "Success") -except KeyboardInterrupt: - # User interrupted - emergency shutdown - agentops.end_trace(end_state="User_Interrupted") -except MemoryError: - # System resource exhaustion - emergency shutdown - agentops.end_trace(end_state="Resource_Exhausted") -``` - ## Real-World Example Here's a comprehensive example showing manual trace control in a customer service application: @@ -207,50 +119,35 @@ import agentops from agentops.sdk.decorators import agent, operation, tool from openai import OpenAI -agentops.init("your-api-key", auto_start_session=False) +agentops.init(auto_start_session=False) client = OpenAI() -@agent -class CustomerServiceAgent: - @operation - def analyze_sentiment(self, text): +@operation +def analyze_sentiment(text): response = client.chat.completions.create( model="gpt-3.5-turbo", messages=[{"role": "user", "content": f"Analyze sentiment: {text}"}] ) return response.choices[0].message.content.strip() - @tool(cost=0.01) - def lookup_order(self, order_id): - # Simulate order lookup - return f"Order {order_id} details" +@tool(cost=0.01) +def lookup_order(order_id): + return f"Order {order_id} details" def process_customer_requests(requests): """Process multiple customer requests with individual trace tracking""" - - # Start traces for each request - request_traces = [] + results = [] for i, request in enumerate(requests): trace = agentops.start_trace( f"customer_request_{i+1}", tags=["customer-service", request.get("priority", "normal")] ) - request_traces.append((trace, request)) - - agent = CustomerServiceAgent() - results = [] - - # Process each request - for trace, request in request_traces: try: - # Analyze the request - sentiment = agent.analyze_sentiment(request["message"]) + sentiment = analyze_sentiment(request["message"]) - # Look up order if needed if "order" in request: - order_info = agent.lookup_order(request["order"]) + order_info = lookup_order(request["order"]) - # Determine success based on sentiment if "positive" in sentiment.lower() or "neutral" in sentiment.lower(): agentops.end_trace(trace, "Success") results.append({"status": "resolved", "sentiment": sentiment}) @@ -264,7 +161,6 @@ def process_customer_requests(requests): return results -# Example usage customer_requests = [ {"message": "I love this product!", "priority": "low"}, {"message": "My order is completely wrong!", "order": "12345", "priority": "high"}, diff --git a/docs/v2/usage/recording-operations.mdx b/docs/v2/usage/recording-operations.mdx index df6510c9b..23075baec 100644 --- a/docs/v2/usage/recording-operations.mdx +++ b/docs/v2/usage/recording-operations.mdx @@ -115,52 +115,33 @@ def calculator(expression): ### `@trace` Decorator -Create custom traces to group related operations: +Create custom traces to group related operations using the `@trace` decorator. This is the recommended approach for most applications: ```python +import agentops from agentops.sdk.decorators import trace, agent, operation -@trace +# Initialize AgentOps without auto-starting session since we use @trace +agentops.init("your-api-key", auto_start_session=False) + +@trace(name="customer-service-workflow", tags=["customer-support"]) def customer_service_workflow(customer_id): agent = ResearchAgent() results = agent.search(f"customer {customer_id}") return results ``` -### Manual Trace Management - -For more control over trace lifecycle, you can manage traces manually: - -```python -import agentops - -# Initialize without automatic session creation -agentops.init("your-api-key", auto_start_session=False) - -# Start a trace manually -trace = agentops.start_trace("my-custom-trace") - -# Your code here -# ... - -# End the trace with a specific state -agentops.end_trace(trace, "Success") - -# Or end all active traces -agentops.end_trace(end_state="Emergency_Shutdown") -``` ## Best Practices 1. **Keep it Simple**: For most applications, just initializing AgentOps with `agentops.init()` is sufficient. -2. **Use Decorators Sparingly**: Only add decorators when you need more detailed tracking of specific operations. +2. **Use @trace for Custom Workflows**: When you need to group operations, use the `@trace` decorator instead of manual trace management. -3. **Meaningful Operation Names**: When using decorators, choose descriptive names to make them easier to identify in the dashboard. +3. **Meaningful Names and Tags**: When using decorators, choose descriptive names and relevant tags to make them easier to identify in the dashboard. 4. **Cost Tracking**: Use the `@tool` decorator with cost parameters to track tool usage costs in your dashboard. -5. **Trace Management**: Use manual trace management when you need precise control over trace boundaries and states. diff --git a/docs/v2/usage/sdk-reference.mdx b/docs/v2/usage/sdk-reference.mdx index 4eedec73a..de29cd30c 100644 --- a/docs/v2/usage/sdk-reference.mdx +++ b/docs/v2/usage/sdk-reference.mdx @@ -137,7 +137,7 @@ Ends a specific trace or all active traces. **Parameters**: - `trace` (TraceContext, optional): The specific trace to end. If not provided, all active traces will be ended. -- `end_state` (str, optional): The end state for the trace(s). Common values include "Success", "Failure", "Error", "Timeout", etc. +- `end_state` (str, optional): The end state for the trace(s). You can use any descriptive string that makes sense for your application (e.g., "Success", "Failure", "Error", "Timeout", etc.). **Example**: @@ -155,7 +155,7 @@ agentops.end_trace(end_state="Emergency_Shutdown") ## Decorators for Detailed Instrumentation -For more granular control, AgentOps provides decorators that explicitly track different components of your application. These decorators are imported from `agentops.sdk.decorators`. +For more granular control, AgentOps provides decorators that explicitly track different components of your application. **The `@trace` decorator is the recommended approach for creating custom traces**, especially in multi-threaded environments. These decorators are imported from `agentops.sdk.decorators`. ```python import agentops @@ -204,7 +204,7 @@ See [Decorators](/v2/concepts/decorators) for more detailed documentation on usi The following functions are maintained for backward compatibility with older versions of the SDK and integrations. New code should use the functions and decorators described above instead. -- `start_session()`: Legacy function for starting sessions. Use `start_trace()` instead. +- `start_session()`: Legacy function for starting sessions. Use `@trace` decorator or `start_trace()` instead. - `record(event)`: Legacy function to record an event. Replaced by decorator-based tracing. - `track_agent()`: Legacy decorator for marking agents. Replaced by the `@agent` decorator. - `track_tool()`: Legacy decorator for marking tools. Replaced by the `@tool` decorator. diff --git a/docs/v2/usage/trace-decorator.mdx b/docs/v2/usage/trace-decorator.mdx index b12e54580..c4523e039 100644 --- a/docs/v2/usage/trace-decorator.mdx +++ b/docs/v2/usage/trace-decorator.mdx @@ -3,13 +3,11 @@ title: "Trace Decorator" description: "Create custom traces with the @trace decorator" --- -AgentOps 0.4.13 introduces the `@trace` decorator, which provides a clean and intuitive way to create custom traces for grouping related operations. This decorator serves as a replacement for the legacy `@session` decorator and offers more flexibility for organizing your application's telemetry data. - ## Basic Usage ### Simple Trace Creation -The `@trace` decorator automatically creates a trace span that encompasses the entire function execution: +The `@trace` decorator automatically creates a trace span that encompasses the entire function execution. You can optionally specify custom names and tags to better organize and categorize your traces: ```python from agentops.sdk.decorators import trace @@ -18,17 +16,23 @@ import agentops # Initialize AgentOps agentops.init("your-api-key", auto_start_session=False) -@trace +@trace(name="customer-workflow", tags=["production", "customer-service"]) def my_workflow(): """A simple workflow wrapped in a trace""" - print("Executing workflow...") + print("šŸš€ Starting customer workflow...") + print("šŸ“‹ Processing customer request...") # Your application logic here + print("āœ… Customer workflow completed successfully!") return "Workflow completed" # Run the function - this creates and manages the trace automatically +print("šŸŽ¬ Running traced workflow...") result = my_workflow() +print(f"šŸ“Š Result: {result}") ``` +Both `name` and `tags` parameters are optional. If no name is provided, the function name will be used as the trace name. + ### Custom Trace Names You can specify custom names for your traces: @@ -37,14 +41,28 @@ You can specify custom names for your traces: @trace(name="customer-onboarding-flow") def onboard_customer(customer_data): """Customer onboarding process""" - # Process customer data + print(f"šŸ‘‹ Onboarding customer: {customer_data['name']}") + print("šŸ“ Creating customer profile...") + print("šŸ“§ Sending welcome email...") + print("āœ… Customer onboarding complete!") return f"Onboarded customer: {customer_data['name']}" @trace(name="data-processing-pipeline") def process_data(input_data): """Data processing workflow""" - # Process the data + print(f"šŸ“Š Processing {len(input_data)} data items...") + print("šŸ”„ Applying transformations...") + print("āœ… Data processing complete!") return f"Processed {len(input_data)} items" + +# Usage examples +customer = {"name": "Alice Johnson", "email": "alice@example.com"} +result1 = onboard_customer(customer) +print(f"šŸ“‹ Onboarding result: {result1}") + +data_items = ["item1", "item2", "item3", "item4", "item5"] +result2 = process_data(data_items) +print(f"šŸ“‹ Processing result: {result2}") ``` ### Adding Tags to Traces @@ -55,12 +73,28 @@ Tags help categorize and filter traces in your dashboard: @trace(tags=["production", "high-priority"]) def critical_workflow(): """Critical production workflow""" + print("🚨 Executing critical production workflow...") + print("⚔ High priority processing...") + print("āœ… Critical task completed successfully!") return "Critical task completed" @trace(name="user-analysis", tags=["analytics", "user-behavior"]) def analyze_user_behavior(user_id): """Analyze user behavior patterns""" - return f"Analysis for user {user_id}" + print(f"šŸ” Analyzing behavior for user: {user_id}") + print("šŸ“ˆ Gathering user interaction data...") + print("🧠 Running behavior analysis algorithms...") + print("āœ… User behavior analysis complete!") + return f"Analysis complete for user {user_id}" + +# Usage examples +print("šŸŽ¬ Running critical workflow...") +result1 = critical_workflow() +print(f"šŸ“Š Critical workflow result: {result1}") + +print("\nšŸŽ¬ Running user analysis...") +result2 = analyze_user_behavior("user_12345") +print(f"šŸ“Š Analysis result: {result2}") ``` ## Integration with Other Decorators @@ -70,43 +104,72 @@ def analyze_user_behavior(user_id): The `@trace` decorator works seamlessly with other AgentOps decorators: ```python +import agentops from agentops.sdk.decorators import trace, agent, operation, tool +# Initialize AgentOps without auto-starting session since we use @trace +agentops.init("your-api-key", auto_start_session=False) + @agent class DataAnalysisAgent: + def __init__(self): + print("šŸ¤– DataAnalysisAgent initialized") + @operation def collect_data(self, source): - return f"Data collected from {source}" + print(f"šŸ“Š Collecting data from {source}...") + data = f"Data collected from {source}" + print(f"āœ… Data collection complete: {data}") + return data @tool(cost=0.05) def analyze_data(self, data): - return f"Analysis of {data}" + print(f"🧠 Analyzing data: {data}") + analysis = f"Analysis of {data}" + print(f"āœ… Analysis complete: {analysis}") + return analysis @operation def generate_report(self, analysis): - return f"Report: {analysis}" + print(f"šŸ“ Generating report from: {analysis}") + report = f"Report: {analysis}" + print(f"āœ… Report generated: {report}") + return report @trace(name="complete-analysis-workflow") def run_analysis_workflow(data_source): """Complete data analysis workflow""" + print(f"šŸš€ Starting analysis workflow for: {data_source}") + print("=" * 50) + agent = DataAnalysisAgent() # Collect data + print("\nšŸ“‹ Step 1: Data Collection") data = agent.collect_data(data_source) # Analyze data + print("\nšŸ“‹ Step 2: Data Analysis") analysis = agent.analyze_data(data) # Generate report + print("\nšŸ“‹ Step 3: Report Generation") report = agent.generate_report(analysis) + print("\nšŸŽ‰ Workflow completed successfully!") + print("=" * 50) + return { "source": data_source, "report": report } # Usage +print("šŸŽ¬ Running complete analysis workflow...") result = run_analysis_workflow("customer_database") +print(f"\nšŸ“Š Final Result:") +print(f" Source: {result['source']}") +print(f" Report: {result['report']}") ``` ## Async Function Support @@ -115,33 +178,56 @@ The `@trace` decorator fully supports async functions: ```python import asyncio +import agentops from agentops.sdk.decorators import trace, operation +# Initialize AgentOps without auto-starting session since we use @trace +agentops.init("your-api-key", auto_start_session=False) + @operation async def fetch_user_data(user_id): """Simulate async data fetching""" + print(f"🌐 Fetching data for user: {user_id}") await asyncio.sleep(1) # Simulate API call - return f"User data for {user_id}" + data = f"User data for {user_id}" + print(f"āœ… Data fetched: {data}") + return data @operation async def process_user_data(user_data): """Simulate async data processing""" + print(f"āš™ļø Processing user data: {user_data}") await asyncio.sleep(0.5) # Simulate processing - return f"Processed: {user_data}" + processed = f"Processed: {user_data}" + print(f"āœ… Processing complete: {processed}") + return processed @trace(name="async-user-workflow") async def async_user_workflow(user_id): """Async workflow for user processing""" + print(f"šŸš€ Starting async workflow for user: {user_id}") + print("=" * 45) + + print("\nšŸ“‹ Step 1: Fetching user data") user_data = await fetch_user_data(user_id) + + print("\nšŸ“‹ Step 2: Processing user data") processed_data = await process_user_data(user_data) + + print("\nšŸŽ‰ Async workflow completed!") + print("=" * 45) + return processed_data # Usage async def main(): + print("šŸŽ¬ Running async user workflow...") result = await async_user_workflow("user_123") - print(result) + print(f"\nšŸ“Š Final Result: {result}") + print("✨ Check your AgentOps dashboard to see the traced async workflow!") # Run the async workflow +print("šŸ”„ Starting async demo...") asyncio.run(main()) ``` @@ -152,23 +238,38 @@ asyncio.run(main()) The `@trace` decorator automatically handles exceptions and sets appropriate trace states: ```python +import agentops +from agentops.sdk.decorators import trace + +# Initialize AgentOps without auto-starting session since we use @trace +agentops.init("your-api-key", auto_start_session=False) + @trace(name="error-prone-workflow") def risky_operation(): """Operation that might fail""" import random + print("šŸŽ² Running risky operation...") + print("āš ļø This operation has a 50% chance of failure") + if random.random() < 0.5: + print("āŒ Operation failed!") raise ValueError("Random failure occurred") + print("āœ… Operation succeeded!") return "Operation succeeded" # The trace will automatically be marked with failure state if an exception occurs -try: - result = risky_operation() - print(f"Success: {result}") -except ValueError as e: - print(f"Operation failed: {e}") - # Trace is automatically ended with error state +print("šŸŽ¬ Testing automatic error handling...") +for i in range(3): + print(f"\nšŸ”„ Attempt {i+1}:") + try: + result = risky_operation() + print(f"šŸ“Š Success: {result}") + break + except ValueError as e: + print(f"šŸ“Š Operation failed: {e}") + print("šŸ” Trace automatically ended with error state") ``` ### Custom Error Handling @@ -179,25 +280,43 @@ You can implement custom error handling within traced functions: @trace(name="robust-workflow") def robust_operation(data): """Operation with custom error handling""" + print(f"šŸš€ Starting robust operation with data: {data}") + try: # Risky operation if not data: + print("āš ļø No data provided!") raise ValueError("No data provided") # Process data + print("āš™ļø Processing data...") result = f"Processed: {data}" + print(f"āœ… Processing successful: {result}") return {"success": True, "result": result} except ValueError as e: # Handle specific errors + print(f"āŒ Validation error: {e}") return {"success": False, "error": str(e)} except Exception as e: # Handle unexpected errors + print(f"šŸ’„ Unexpected error: {e}") return {"success": False, "error": f"Unexpected error: {str(e)}"} -# Usage -result1 = robust_operation("valid_data") # Success trace -result2 = robust_operation("") # Failure trace with custom handling +# Usage examples +print("\nšŸŽ¬ Testing custom error handling...") + +print("\nšŸ“‹ Test 1: Valid data") +result1 = robust_operation("valid_data") +print(f"šŸ“Š Result: {result1}") + +print("\nšŸ“‹ Test 2: Empty data") +result2 = robust_operation("") +print(f"šŸ“Š Result: {result2}") + +print("\nšŸ“‹ Test 3: None data") +result3 = robust_operation(None) +print(f"šŸ“Š Result: {result3}") ``` ## Real-World Examples @@ -206,59 +325,84 @@ result2 = robust_operation("") # Failure trace with custom handling ```python from agentops.sdk.decorators import trace, agent, operation, tool +from openai import OpenAI import agentops agentops.init("your-api-key", auto_start_session=False) @agent class OrderProcessor: + def __init__(self): + print("šŸ›’ OrderProcessor initialized") + @tool(cost=0.01) def validate_payment(self, payment_info): """Payment validation service""" - return {"valid": True, "transaction_id": "txn_123"} + print(f"šŸ’³ Validating payment: {payment_info['card']}") + result = {"valid": True, "transaction_id": "txn_123"} + print(f"āœ… Payment validation successful: {result['transaction_id']}") + return result @tool(cost=0.02) def check_inventory(self, product_id, quantity): """Inventory check service""" - return {"available": True, "reserved": quantity} + print(f"šŸ“¦ Checking inventory for {product_id} (qty: {quantity})") + result = {"available": True, "reserved": quantity} + print(f"āœ… Inventory check complete: {quantity} units available") + return result @operation def calculate_shipping(self, address, items): """Calculate shipping costs""" - return {"cost": 9.99, "method": "standard"} + print(f"🚚 Calculating shipping to {address['city']}, {address['state']}") + result = {"cost": 9.99, "method": "standard"} + print(f"āœ… Shipping calculated: ${result['cost']} ({result['method']})") + return result @tool(cost=0.005) def send_confirmation_email(self, email, order_details): """Email service""" - return f"Confirmation sent to {email}" + print(f"šŸ“§ Sending confirmation email to {email}") + result = f"Confirmation sent to {email}" + print(f"āœ… Email sent successfully") + return result @trace(name="order-processing", tags=["ecommerce", "orders"]) def process_order(order_data): """Complete order processing workflow""" + print(f"šŸš€ Starting order processing for {order_data['customer_email']}") + print("=" * 60) + processor = OrderProcessor() try: # Validate payment + print("\nšŸ“‹ Step 1: Payment Validation") payment_result = processor.validate_payment(order_data["payment"]) if not payment_result["valid"]: + print("āŒ Payment validation failed!") return {"success": False, "error": "Payment validation failed"} # Check inventory for all items + print("\nšŸ“‹ Step 2: Inventory Check") for item in order_data["items"]: inventory_result = processor.check_inventory( item["product_id"], item["quantity"] ) if not inventory_result["available"]: + print(f"āŒ Item {item['product_id']} not available!") return {"success": False, "error": f"Item {item['product_id']} not available"} # Calculate shipping + print("\nšŸ“‹ Step 3: Shipping Calculation") shipping = processor.calculate_shipping( order_data["shipping_address"], order_data["items"] ) # Send confirmation + print("\nšŸ“‹ Step 4: Confirmation Email") confirmation = processor.send_confirmation_email( order_data["customer_email"], { @@ -268,6 +412,9 @@ def process_order(order_data): } ) + print("\nšŸŽ‰ Order processing completed successfully!") + print("=" * 60) + return { "success": True, "order_id": "ORD_12345", @@ -277,9 +424,12 @@ def process_order(order_data): } except Exception as e: + print(f"šŸ’„ Order processing failed: {e}") return {"success": False, "error": str(e)} # Usage +print("šŸŽ¬ Running e-commerce order processing demo...") + order = { "customer_email": "customer@example.com", "payment": {"card": "****1234", "amount": 99.99}, @@ -288,85 +438,126 @@ order = { } result = process_order(order) + +print(f"\nšŸ“Š ORDER PROCESSING RESULT:") +print(f" Success: {result['success']}") +if result['success']: + print(f" Order ID: {result['order_id']}") + print(f" Transaction: {result['payment']['transaction_id']}") + print(f" Shipping: ${result['shipping']['cost']}") +else: + print(f" Error: {result['error']}") ``` -### Machine Learning Pipeline +### Data Analysis Workflow ```python -from agentops.sdk.decorators import trace, operation, tool - -@tool(cost=0.10) -def load_dataset(dataset_path): - """Data loading service""" - return f"Dataset loaded from {dataset_path}" - -@operation -def preprocess_data(raw_data): - """Data preprocessing""" - return f"Preprocessed: {raw_data}" - -@tool(cost=0.50) -def train_model(processed_data, model_config): - """Model training service""" - return { - "model_id": "model_v1.0", - "accuracy": 0.95, - "training_time": "2h 30m" - } - -@operation -def evaluate_model(model, test_data): - """Model evaluation""" - return { - "accuracy": 0.94, - "precision": 0.93, - "recall": 0.95 - } +from agentops.sdk.decorators import trace, agent, operation, tool +from openai import OpenAI +import agentops -@tool(cost=0.05) -def deploy_model(model, deployment_config): - """Model deployment service""" - return { - "endpoint": "https://api.example.com/model/v1", - "status": "deployed" - } +agentops.init("your-api-key", auto_start_session=False) -@trace(name="ml-pipeline", tags=["machine-learning", "training"]) -def ml_training_pipeline(dataset_path, model_config, deployment_config): - """Complete ML training and deployment pipeline""" +@agent +class DataAnalysisAgent: + def __init__(self): + self.client = OpenAI() + print("šŸ¤– DataAnalysisAgent initialized") - # Load and preprocess data - raw_data = load_dataset(dataset_path) - processed_data = preprocess_data(raw_data) + @operation + def collect_data(self, source): + """Simulate data collection""" + print(f"šŸ“Š Collecting data from {source}...") + data = f"Raw data collected from {source}: [sample_data_1, sample_data_2, sample_data_3]" + print(f"āœ… Data collection complete: {len(data)} characters collected") + return data - # Train model - model = train_model(processed_data, model_config) + @operation + def analyze_data_with_llm(self, data): + """Use LLM to analyze the collected data""" + print("🧠 Analyzing data with LLM...") + response = self.client.chat.completions.create( + model="gpt-4o", + messages=[ + {"role": "system", "content": "You are a data analyst. Analyze the provided data and give insights."}, + {"role": "user", "content": f"Please analyze this data: {data}"} + ] + ) + analysis = response.choices[0].message.content + print(f"āœ… LLM analysis complete: {len(analysis)} characters generated") + return analysis - # Evaluate model - evaluation = evaluate_model(model, processed_data) + @tool(cost=0.05) + def generate_visualization(self, analysis): + """Generate data visualization""" + print("šŸ“ˆ Generating visualization...") + visualization = f"Chart generated for: {analysis[:50]}..." + print(f"āœ… Visualization generated: {visualization}") + return visualization - # Deploy if evaluation meets criteria - if evaluation["accuracy"] > 0.90: - deployment = deploy_model(model, deployment_config) - return { - "success": True, - "model": model, - "evaluation": evaluation, - "deployment": deployment - } - else: - return { - "success": False, - "reason": "Model accuracy below threshold", - "evaluation": evaluation - } + @operation + def generate_report(self, analysis, visualization): + """Generate final report using LLM""" + print("šŸ“ Generating final report with LLM...") + response = self.client.chat.completions.create( + model="gpt-4o", + messages=[ + {"role": "system", "content": "You are a report writer. Create a professional data analysis report."}, + {"role": "user", "content": f"Create a report based on this analysis: {analysis} and visualization: {visualization}"} + ] + ) + report = response.choices[0].message.content + print(f"āœ… Final report generated: {len(report)} characters") + return report + +@trace(name="data-analysis-workflow", tags=["analytics", "reporting"]) +def run_data_analysis(data_source): + """Complete data analysis workflow with LLM integration""" + print(f"šŸš€ Starting data analysis workflow for: {data_source}") + print("=" * 60) + + agent = DataAnalysisAgent() + + # Collect data + print("\nšŸ“‹ Step 1: Data Collection") + raw_data = agent.collect_data(data_source) + + # Analyze data using LLM + print("\nšŸ“‹ Step 2: LLM Analysis") + analysis = agent.analyze_data_with_llm(raw_data) + + # Generate visualization + print("\nšŸ“‹ Step 3: Visualization Generation") + visualization = agent.generate_visualization(analysis) + + # Generate final report using LLM + print("\nšŸ“‹ Step 4: Report Generation") + report = agent.generate_report(analysis, visualization) + + print("\nšŸŽ‰ Workflow completed successfully!") + print("=" * 60) + + return { + "source": data_source, + "raw_data": raw_data, + "analysis": analysis, + "visualization": visualization, + "final_report": report + } # Usage -pipeline_result = ml_training_pipeline( - dataset_path="/data/training_set.csv", - model_config={"algorithm": "random_forest", "max_depth": 10}, - deployment_config={"environment": "production", "replicas": 3} -) +print("šŸŽ¬ Running data analysis workflow demo...") + +result = run_data_analysis("customer_database") + +print(f"\nšŸ“Š ANALYSIS RESULTS:") +print(f" Data Source: {result['source']}") +print(f" Raw Data: {result['raw_data'][:80]}...") +print(f" Analysis Preview: {result['analysis'][:100]}...") +print(f" Visualization: {result['visualization']}") +print(f" Final Report Preview: {result['final_report'][:150]}...") + +print(f"\n✨ Analysis complete! Check your AgentOps dashboard to see the traced workflow.") ``` ## Best Practices diff --git a/docs/v2/usage/tracking-agents.mdx b/docs/v2/usage/tracking-agents.mdx index 096ecf54c..b1331cc22 100644 --- a/docs/v2/usage/tracking-agents.mdx +++ b/docs/v2/usage/tracking-agents.mdx @@ -3,42 +3,18 @@ title: "Tracking Agents" description: "Associate operations with specific named agents" --- -AgentOps automatically tracks LLM interactions in your application. For more detailed tracking, especially in multi-agent systems, you can use additional features to associate operations with specific agents. +AgentOps automatically tracks LLM interactions in your application. For more detailed tracking, especially in multi-agent systems, you can use the `@agent` decorator to associate operations with specific agents. -## Basic Agent Tracking +## Using the Agent Decorator -For simple applications, AgentOps will automatically track your LLM calls without additional configuration: - -```python -import agentops -from openai import OpenAI - -# Initialize AgentOps -agentops.init("your-api-key") - -# Create a simple agent function -def research_agent(query): - client = OpenAI() - response = client.chat.completions.create( - model="gpt-4o", - messages=[{"role": "user", "content": f"Research about: {query}"}] - ) - return response.choices[0].message.content - -# Use your agent - all LLM calls will be tracked automatically -result = research_agent("quantum computing") -``` - -## Advanced: Using the Agent Decorator - -For more structured tracking in complex applications, you can use the `@agent` decorator to explicitly identify different agents in your system: +For structured tracking in complex applications, you can use the `@agent` decorator to explicitly identify different agents in your system: ```python import agentops from agentops.sdk.decorators import agent, operation, trace from openai import OpenAI -# Initialize AgentOps +# Initialize AgentOps without auto-starting session since we use @trace agentops.init("your-api-key", auto_start_session=False) # Create a decorated agent class @@ -75,13 +51,41 @@ class ResearchAgent: pass ``` +## Basic Agent Tracking (Simple Applications) + +For simple applications, AgentOps will automatically track your LLM calls without additional configuration: + +```python +import agentops +from openai import OpenAI + +# Initialize AgentOps +agentops.init("your-api-key") + +# Create a simple agent function +def research_agent(query): + client = OpenAI() + response = client.chat.completions.create( + model="gpt-4o", + messages=[{"role": "user", "content": f"Research about: {query}"}] + ) + return response.choices[0].message.content + +# Use your agent - all LLM calls will be tracked automatically +result = research_agent("quantum computing") +``` + ## Multi-Agent Systems For complex multi-agent systems, you can organize multiple agents within a single trace: ```python +import agentops from agentops.sdk.decorators import agent, operation, tool, trace +# Initialize AgentOps without auto-starting session since we use @trace +agentops.init("your-api-key", auto_start_session=False) + @agent class DataCollectionAgent: @tool(cost=0.02) @@ -131,8 +135,12 @@ result = collaborative_workflow("customer_database") You can track complex agent interactions and communication patterns: ```python +import agentops from agentops.sdk.decorators import agent, operation, tool, trace +# Initialize AgentOps without auto-starting session since we use @trace +agentops.init("your-api-key", auto_start_session=False) + @agent class CoordinatorAgent: def __init__(self): @@ -217,18 +225,18 @@ All operations are automatically associated with the agent that originated them. If you're migrating from the legacy `@session` decorator, replace it with the `@trace` decorator: ```python -# Old approach (deprecated) -from agentops.sdk.decorators import session, agent +# New approach (recommended) +from agentops.sdk.decorators import trace, agent -@session +@trace(name="my-workflow") def my_workflow(): # workflow code pass -# New approach (recommended) -from agentops.sdk.decorators import trace, agent +# Old approach (deprecated) +from agentops.sdk.decorators import session, agent -@trace(name="my-workflow") +@session def my_workflow(): # workflow code pass From 26fe0710810db03734377fd3dcef2454d78b9e54 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Thu, 29 May 2025 02:33:11 +0530 Subject: [PATCH 3/4] Add Google Generative AI instrumentation module - Introduced a new module for Google Generative AI (Gemini) API instrumentation, including `GoogleGenAIInstrumentor` for capturing telemetry data. - Added attribute extraction functions for model and chat interactions, enhancing observability of API calls. - Implemented wrappers for synchronous and asynchronous streaming methods to track performance metrics. - Updated documentation to include usage examples and details on supported features for the new instrumentation. - Refactored existing code to accommodate the new module structure and improve clarity. These changes enhance the AgentOps SDK by providing comprehensive support for monitoring Google Generative AI interactions. --- agentops/instrumentation/__init__.py | 4 +- .../README.md | 0 .../__init__.py | 4 +- .../attributes/__init__.py | 6 +- .../attributes/chat.py | 4 +- .../attributes/common.py | 2 +- .../attributes/model.py | 2 +- .../instrumentor.py | 8 +- .../stream_wrapper.py | 4 +- docs/mint.json | 1 + docs/v1/examples/examples.mdx | 30 +- docs/v1/introduction.mdx | 22 +- docs/v2/integrations/ag2.mdx | 159 ++++++ docs/v2/integrations/autogen.mdx | 537 ++++++++++++++---- docs/v2/introduction.mdx | 3 +- 15 files changed, 621 insertions(+), 165 deletions(-) rename agentops/instrumentation/{google_generativeai => google_genai}/README.md (100%) rename agentops/instrumentation/{google_generativeai => google_genai}/__init__.py (87%) rename agentops/instrumentation/{google_generativeai => google_genai}/attributes/__init__.py (70%) rename agentops/instrumentation/{google_generativeai => google_genai}/attributes/chat.py (96%) rename agentops/instrumentation/{google_generativeai => google_genai}/attributes/common.py (97%) rename agentops/instrumentation/{google_generativeai => google_genai}/attributes/model.py (99%) rename agentops/instrumentation/{google_generativeai => google_genai}/instrumentor.py (96%) rename agentops/instrumentation/{google_generativeai => google_genai}/stream_wrapper.py (98%) create mode 100644 docs/v2/integrations/ag2.mdx diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index b7916e62a..d4e271f3d 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -183,8 +183,8 @@ class InstrumentorConfig(TypedDict): "min_version": "0.1.0", }, "google.genai": { - "module_name": "agentops.instrumentation.google_generativeai", - "class_name": "GoogleGenerativeAIInstrumentor", + "module_name": "agentops.instrumentation.google_genai", + "class_name": "GoogleGenAIInstrumentor", "min_version": "0.1.0", "package_name": "google-genai", # Actual pip package name }, diff --git a/agentops/instrumentation/google_generativeai/README.md b/agentops/instrumentation/google_genai/README.md similarity index 100% rename from agentops/instrumentation/google_generativeai/README.md rename to agentops/instrumentation/google_genai/README.md diff --git a/agentops/instrumentation/google_generativeai/__init__.py b/agentops/instrumentation/google_genai/__init__.py similarity index 87% rename from agentops/instrumentation/google_generativeai/__init__.py rename to agentops/instrumentation/google_genai/__init__.py index f4faf7d65..6a7ee24fa 100644 --- a/agentops/instrumentation/google_generativeai/__init__.py +++ b/agentops/instrumentation/google_genai/__init__.py @@ -31,10 +31,10 @@ def get_version() -> str: logger = logging.getLogger(__name__) # Import after defining constants to avoid circular imports -from agentops.instrumentation.google_generativeai.instrumentor import GoogleGenerativeAIInstrumentor # noqa: E402 +from agentops.instrumentation.google_genai.instrumentor import GoogleGenAIInstrumentor # noqa: E402 __all__ = [ "LIBRARY_NAME", "LIBRARY_VERSION", - "GoogleGenerativeAIInstrumentor", + "GoogleGenAIInstrumentor", ] diff --git a/agentops/instrumentation/google_generativeai/attributes/__init__.py b/agentops/instrumentation/google_genai/attributes/__init__.py similarity index 70% rename from agentops/instrumentation/google_generativeai/attributes/__init__.py rename to agentops/instrumentation/google_genai/attributes/__init__.py index 243549c99..94407d6cb 100644 --- a/agentops/instrumentation/google_generativeai/attributes/__init__.py +++ b/agentops/instrumentation/google_genai/attributes/__init__.py @@ -1,16 +1,16 @@ """Attribute extractors for Google Generative AI instrumentation.""" -from agentops.instrumentation.google_generativeai.attributes.common import ( +from agentops.instrumentation.google_genai.attributes.common import ( get_common_instrumentation_attributes, extract_request_attributes, ) -from agentops.instrumentation.google_generativeai.attributes.model import ( +from agentops.instrumentation.google_genai.attributes.model import ( get_model_attributes, get_generate_content_attributes, get_stream_attributes, get_token_counting_attributes, ) -from agentops.instrumentation.google_generativeai.attributes.chat import ( +from agentops.instrumentation.google_genai.attributes.chat import ( get_chat_attributes, ) diff --git a/agentops/instrumentation/google_generativeai/attributes/chat.py b/agentops/instrumentation/google_genai/attributes/chat.py similarity index 96% rename from agentops/instrumentation/google_generativeai/attributes/chat.py rename to agentops/instrumentation/google_genai/attributes/chat.py index cc29856d9..7b9c3a8ac 100644 --- a/agentops/instrumentation/google_generativeai/attributes/chat.py +++ b/agentops/instrumentation/google_genai/attributes/chat.py @@ -5,11 +5,11 @@ from agentops.logging import logger from agentops.semconv import SpanAttributes, LLMRequestTypeValues, MessageAttributes from agentops.instrumentation.common.attributes import AttributeMap -from agentops.instrumentation.google_generativeai.attributes.common import ( +from agentops.instrumentation.google_genai.attributes.common import ( extract_request_attributes, get_common_instrumentation_attributes, ) -from agentops.instrumentation.google_generativeai.attributes.model import ( +from agentops.instrumentation.google_genai.attributes.model import ( _extract_content_from_prompt, _set_response_attributes, ) diff --git a/agentops/instrumentation/google_generativeai/attributes/common.py b/agentops/instrumentation/google_genai/attributes/common.py similarity index 97% rename from agentops/instrumentation/google_generativeai/attributes/common.py rename to agentops/instrumentation/google_genai/attributes/common.py index 4e2b67d5b..da158d291 100644 --- a/agentops/instrumentation/google_generativeai/attributes/common.py +++ b/agentops/instrumentation/google_genai/attributes/common.py @@ -9,7 +9,7 @@ get_common_attributes, _extract_attributes_from_mapping, ) -from agentops.instrumentation.google_generativeai import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.google_genai import LIBRARY_NAME, LIBRARY_VERSION # Common mapping for config parameters REQUEST_CONFIG_ATTRIBUTES: AttributeMap = { diff --git a/agentops/instrumentation/google_generativeai/attributes/model.py b/agentops/instrumentation/google_genai/attributes/model.py similarity index 99% rename from agentops/instrumentation/google_generativeai/attributes/model.py rename to agentops/instrumentation/google_genai/attributes/model.py index 8082d4263..022a4fbac 100644 --- a/agentops/instrumentation/google_generativeai/attributes/model.py +++ b/agentops/instrumentation/google_genai/attributes/model.py @@ -5,7 +5,7 @@ from agentops.logging import logger from agentops.semconv import SpanAttributes, LLMRequestTypeValues, MessageAttributes from agentops.instrumentation.common.attributes import AttributeMap -from agentops.instrumentation.google_generativeai.attributes.common import ( +from agentops.instrumentation.google_genai.attributes.common import ( extract_request_attributes, get_common_instrumentation_attributes, ) diff --git a/agentops/instrumentation/google_generativeai/instrumentor.py b/agentops/instrumentation/google_genai/instrumentor.py similarity index 96% rename from agentops/instrumentation/google_generativeai/instrumentor.py rename to agentops/instrumentation/google_genai/instrumentor.py index 85d93e972..023cd5add 100644 --- a/agentops/instrumentation/google_generativeai/instrumentor.py +++ b/agentops/instrumentation/google_genai/instrumentor.py @@ -16,12 +16,12 @@ from agentops.logging import logger from agentops.instrumentation.common.wrappers import WrapConfig, wrap, unwrap -from agentops.instrumentation.google_generativeai import LIBRARY_NAME, LIBRARY_VERSION -from agentops.instrumentation.google_generativeai.attributes.model import ( +from agentops.instrumentation.google_genai import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.google_genai.attributes.model import ( get_generate_content_attributes, get_token_counting_attributes, ) -from agentops.instrumentation.google_generativeai.stream_wrapper import ( +from agentops.instrumentation.google_genai.stream_wrapper import ( generate_content_stream_wrapper, generate_content_stream_async_wrapper, ) @@ -96,7 +96,7 @@ ] -class GoogleGenerativeAIInstrumentor(BaseInstrumentor): +class GoogleGenAIInstrumentor(BaseInstrumentor): """An instrumentor for Google Generative AI (Gemini) API. This class provides instrumentation for Google's Generative AI API by wrapping key methods diff --git a/agentops/instrumentation/google_generativeai/stream_wrapper.py b/agentops/instrumentation/google_genai/stream_wrapper.py similarity index 98% rename from agentops/instrumentation/google_generativeai/stream_wrapper.py rename to agentops/instrumentation/google_genai/stream_wrapper.py index 61868ecbc..9b61cee62 100644 --- a/agentops/instrumentation/google_generativeai/stream_wrapper.py +++ b/agentops/instrumentation/google_genai/stream_wrapper.py @@ -14,11 +14,11 @@ from agentops.semconv import SpanAttributes, LLMRequestTypeValues, CoreAttributes, MessageAttributes from agentops.instrumentation.common.wrappers import _with_tracer_wrapper -from agentops.instrumentation.google_generativeai.attributes.model import ( +from agentops.instrumentation.google_genai.attributes.model import ( get_generate_content_attributes, get_stream_attributes, ) -from agentops.instrumentation.google_generativeai.attributes.common import ( +from agentops.instrumentation.google_genai.attributes.common import ( extract_request_attributes, ) diff --git a/docs/mint.json b/docs/mint.json index 2d106dab7..f0dbe37cb 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -168,6 +168,7 @@ "group": "Integrations", "pages": [ "v2/integrations/anthropic", + "v2/integrations/ag2", "v2/integrations/autogen", "v2/integrations/crewai", "v2/integrations/google_adk", diff --git a/docs/v1/examples/examples.mdx b/docs/v1/examples/examples.mdx index 040c3369b..7466084b9 100644 --- a/docs/v1/examples/examples.mdx +++ b/docs/v1/examples/examples.mdx @@ -21,7 +21,7 @@ mode: "wide" Manage multiple sessions at the same time - } iconType="image" href="/v1/integrations/openai" href="/v1/examples/openai_assistants"> + } iconType="image" href="/v1/integrations/openai" href="/v1/examples/openai_assistants"> Observe OpenAI Assistants @@ -33,19 +33,19 @@ mode: "wide" Integration with AI21's language models - } iconType="image" href="/v1/integrations/anthropic"> + } iconType="image" href="/v1/integrations/anthropic"> Track observations from Claude, Haiku and Sonnet series of models - } iconType="image" href="/v1/integrations/autogen"> + } iconType="image" href="/v1/integrations/autogen"> AG2 (Formerly AutoGen) multi-agent conversible workflow with tool usage - } iconType="image" href="/v1/examples/camel"> + } iconType="image" href="/v1/examples/camel"> Track and analyze CAMEL agents - } iconType="image" href="/v1/integrations/cohere"> + } iconType="image" href="/v1/integrations/cohere"> First class support for Command-R-Plus and chat streaming @@ -57,15 +57,15 @@ mode: "wide" Ultra-fast LLM inference with Groq Cloud - } iconType="image" href="/v1/integrations/gemini"> + } iconType="image" href="/v1/integrations/gemini"> Explore Google DeepMind's Gemini with observation via AgentOps - } iconType="image" href="/v1/integrations/haystack"> + } iconType="image" href="/v1/integrations/haystack"> Monitor your Haystack agents with AgentOps - } iconType="image" href="/v1/examples/langchain"> + } iconType="image" href="/v1/examples/langchain"> Jupyter Notebook with a sample LangChain integration @@ -77,7 +77,7 @@ mode: "wide" Unified interface for multiple LLM providers - } iconType="image" href="/v1/integrations/mistral"> + } iconType="image" href="/v1/integrations/mistral"> Support for Mistral AI's open-weight models @@ -85,11 +85,11 @@ mode: "wide" Create an autonomous browser agent capable of navigating the web and extracting information - } iconType="image" href="/v1/examples/ollama"> + } iconType="image" href="/v1/examples/ollama"> Simple Ollama integration with AgentOps - } iconType="image" href="/v1/integrations/openai"> + } iconType="image" href="/v1/integrations/openai"> First class support for GPT family of models @@ -97,19 +97,19 @@ mode: "wide" Create a REST server that performs and observes agent tasks - } iconType="image" iconType="solid" href="/v1/integrations/smolagents"> + } iconType="image" iconType="solid" href="/v1/integrations/smolagents"> Track HuggingFace's smolagents with AgentOps seamlessly - } iconType="image" href="/v1/integrations/swarmzero"> + } iconType="image" href="/v1/integrations/swarmzero"> SwarmZero multi-agent framework for AI Agents and AI Swarms with AgentOps support - } iconType="image" href="/v1/integrations/taskweaver"> + } iconType="image" href="/v1/integrations/taskweaver"> First class support for Microsoft TaskWeaver - } iconType="image" href="/v1/integrations/xai"> + } iconType="image" href="/v1/integrations/xai"> Observe the power of Grok and Grok Vision with AgentOps diff --git a/docs/v1/introduction.mdx b/docs/v1/introduction.mdx index 470a6b405..38f563c01 100644 --- a/docs/v1/introduction.mdx +++ b/docs/v1/introduction.mdx @@ -7,13 +7,13 @@ mode: "wide" ## Integrate with developer favorite agent frameworks - } iconType="image" href="/v1/integrations/agentssdk" /> - } iconType="image" href="/v1/integrations/crewai" /> - } iconType="image" href="/v1/integrations/autogen" /> - } iconType="image" href="/v1/integrations/autogen" /> - } iconType="image" href="/v1/integrations/anthropic" /> - } iconType="image" href="/v1/integrations/ollama" /> - } iconType="image" href="/v1/integrations/cohere" /> + } iconType="image" href="/v1/integrations/agentssdk" /> + } iconType="image" href="/v1/integrations/crewai" /> + } iconType="image" href="/v1/integrations/autogen" /> + } iconType="image" href="/v1/integrations/autogen" /> + } iconType="image" href="/v1/integrations/anthropic" /> + } iconType="image" href="/v1/integrations/ollama" /> + } iconType="image" href="/v1/integrations/cohere" /> @@ -42,12 +42,12 @@ You also get helpful debugging info such as any SDK versions you were on if you' LLM calls are presented as a familiar chat history view, and charts give you a breakdown of the types of events that were called and how long they took. - + Find any past sessions from your Session Drawer. - + Most powerful of all is the Session Waterfall. On the left, a time visualization of all your LLM calls, Action events, Tool calls, and Errors. @@ -55,14 +55,14 @@ On the right, specific details about the event you've selected on the waterfall. Most of which has been automatically recorded for you. - + ### Session Overview View a meta-analysis of all of your sessions in a single view. - + diff --git a/docs/v2/integrations/ag2.mdx b/docs/v2/integrations/ag2.mdx new file mode 100644 index 000000000..33a243777 --- /dev/null +++ b/docs/v2/integrations/ag2.mdx @@ -0,0 +1,159 @@ +--- +title: AG2 +description: "Track and analyze your AG2 agents with AgentOps" +--- + +import CodeTooltip from '/snippets/add-code-tooltip.mdx' +import EnvTooltip from '/snippets/add-env-tooltip.mdx' + +## Installation + + + ```bash pip + pip install agentops pyautogen + ``` + ```bash poetry + poetry add agentops pyautogen + ``` + + +## Usage + +Initialize AgentOps at the beginning of your application to automatically track all AG2 agent interactions: + + +```python Python +import agentops +import autogen + +# Initialize AgentOps +agentops.init() + +# Configure your AG2 agents +config_list = [ + { + "model": "gpt-4", + "api_key": "" + } +] + +llm_config = { + "config_list": config_list, + "timeout": 60, +} + +# Create AG2 agents +assistant = autogen.AssistantAgent( + name="assistant", + llm_config=llm_config, + system_message="You are a helpful AI assistant." +) + +user_proxy = autogen.UserProxyAgent( + name="user_proxy", + human_input_mode="TERMINATE", + max_consecutive_auto_reply=10, + is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), + code_execution_config={"last_n_messages": 3, "work_dir": "coding"}, +) + +# Initiate a conversation +user_proxy.initiate_chat( + assistant, + message="How can I implement a basic web scraper in Python?" +) + +# All agent interactions are automatically tracked by AgentOps +``` + + +## Multi-Agent Conversation Example + +AgentOps tracks interactions across multiple AG2 agents: + + +```python Python +import agentops +import autogen + +# Initialize AgentOps +agentops.init() + +# Configure LLM +config_list = [ + { + "model": "gpt-4", + "api_key": "" + } +] + +llm_config = { + "config_list": config_list, + "timeout": A 60, +} + +# Create a team of agents +researcher = autogen.AssistantAgent( + name="researcher", + llm_config=llm_config, + system_message="You are a researcher who specializes in finding accurate information." +) + +coder = autogen.AssistantAgent( + name="coder", + llm_config=llm_config, + system_message="You are an expert programmer who writes clean, efficient code." +) + +critic = autogen.AssistantAgent( + name="critic", + llm_config=llm_config, + system_message="You review solutions and provide constructive feedback." +) + +user_proxy = autogen.UserProxyAgent( + name="user_proxy", + human_input_mode="TERMINATE", + max_consecutive_auto_reply=10, + is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), + code_execution_config={"last_n_messages": 3, "work_dir": "coding"}, +) + +# Create a group chat +groupchat = autogen.GroupChat( + agents=[user_proxy, researcher, coder, critic], + messages=[], + max_round=12 +) + +manager = autogen.GroupChatManager( + groupchat=groupchat, + llm_config=llm_config +) + +# Initiate the group chat +user_proxy.initiate_chat( + manager, + message="Create a Python program to analyze sentiment from Twitter data." +) + +# All agent interactions across the group chat are automatically tracked by AgentOps +``` + + +## Environment Variables + + + + ```python .env + AGENTOPS_API_KEY= + OPENAI_API_KEY= + ``` + + +Read more about environment variables in [Advanced Configuration](/v2/usage/advanced-configuration) + + + + + diff --git a/docs/v2/integrations/autogen.mdx b/docs/v2/integrations/autogen.mdx index a8bf137bc..ed5fabfcf 100644 --- a/docs/v2/integrations/autogen.mdx +++ b/docs/v2/integrations/autogen.mdx @@ -1,159 +1,454 @@ --- -title: AutoGen -description: "Track and analyze your AutoGen agents with AgentOps" +title: "AutoGen" +description: "Integrate AgentOps with Microsoft AutoGen for multi-agent workflow tracking" --- -import CodeTooltip from '/snippets/add-code-tooltip.mdx' -import EnvTooltip from '/snippets/add-env-tooltip.mdx' +[AutoGen](https://microsoft.github.io/autogen/stable/) is Microsoft's framework for building multi-agent conversational AI systems. AgentOps provides seamless integration with AutoGen to track and monitor your multi-agent workflows. -## Installation +## Quick Start - ```bash pip - pip install agentops pyautogen - ``` - ```bash poetry - poetry add agentops pyautogen - ``` +```bash pip +pip install agentops autogen-core python-dotenv +``` +```bash poetry +poetry add agentops autogen-core python-dotenv +``` -## Usage +## Basic Integration -Initialize AgentOps at the beginning of your application to automatically track all AutoGen agent interactions: +AgentOps automatically instruments AutoGen agents and tracks their interactions. Simply initialize AgentOps before creating your AutoGen agents: - -```python Python + +**šŸ”„ Automatic Telemetry Integration**: AgentOps automatically picks up spans from AutoGen's built-in OpenTelemetry integration. No additional instrumentation is required - just initialize AgentOps and your AutoGen agents will be automatically tracked. + + +```python +import asyncio +from dataclasses import dataclass +from typing import Callable +from dotenv import load_dotenv import agentops -import autogen -# Initialize AgentOps -agentops.init() - -# Configure your AutoGen agents -config_list = [ - { - "model": "gpt-4", - "api_key": "" - } -] - -llm_config = { - "config_list": config_list, - "timeout": 60, -} - -# Create AutoGen agents -assistant = autogen.AssistantAgent( - name="assistant", - llm_config=llm_config, - system_message="You are a helpful AI assistant." +from autogen_core import ( + DefaultTopicId, + MessageContext, + RoutedAgent, + default_subscription, + message_handler, + AgentId, + SingleThreadedAgentRuntime ) -user_proxy = autogen.UserProxyAgent( - name="user_proxy", - human_input_mode="TERMINATE", - max_consecutive_auto_reply=10, - is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), - code_execution_config={"last_n_messages": 3, "work_dir": "coding"}, -) +# Load environment variables +load_dotenv() -# Initiate a conversation -user_proxy.initiate_chat( - assistant, - message="How can I implement a basic web scraper in Python?" -) +# Initialize AgentOps - this will automatically track AutoGen agents +agentops.init() + +@dataclass +class CountdownMessage: + """Message containing a number for countdown operations""" + content: int + +@default_subscription +class ModifierAgent(RoutedAgent): + """Agent that modifies numbers by applying a transformation function""" + + def __init__(self, modify_val: Callable[[int], int]) -> None: + super().__init__("A modifier agent that transforms numbers.") + self._modify_val = modify_val + + @message_handler + async def handle_message(self, message: CountdownMessage, ctx: MessageContext) -> None: + """Handle incoming messages and apply modification""" + original_val = message.content + modified_val = self._modify_val(original_val) + + print(f"šŸ”§ ModifierAgent: Transformed {original_val} → {modified_val}") + + # Publish the modified value to continue the workflow + await self.publish_message( + CountdownMessage(content=modified_val), + DefaultTopicId() + ) + +@default_subscription +class CheckerAgent(RoutedAgent): + """Agent that checks if a condition is met and decides whether to continue""" + + def __init__(self, stop_condition: Callable[[int], bool]) -> None: + super().__init__("A checker agent that validates conditions.") + self._stop_condition = stop_condition + + @message_handler + async def handle_message(self, message: CountdownMessage, ctx: MessageContext) -> None: + """Handle incoming messages and check stopping condition""" + value = message.content + + if not self._stop_condition(value): + print(f"āœ… CheckerAgent: {value} passed validation, continuing workflow") + # Continue the workflow by publishing the message + await self.publish_message( + CountdownMessage(content=value), + DefaultTopicId() + ) + else: + print(f"šŸ›‘ CheckerAgent: {value} failed validation, stopping workflow") + print("šŸŽ‰ Countdown completed successfully!") -# All agent interactions are automatically tracked by AgentOps +async def run_countdown_workflow(): + """Run a countdown workflow from 10 to 1 using AutoGen agents""" + + print("šŸš€ Starting AutoGen Countdown Workflow") + print("=" * 50) + + # Create the AutoGen runtime + runtime = SingleThreadedAgentRuntime() + + # Register the modifier agent (subtracts 1 from each number) + await ModifierAgent.register( + runtime, + "modifier", + lambda: ModifierAgent(modify_val=lambda x: x - 1), + ) + + # Register the checker agent (stops when value <= 1) + await CheckerAgent.register( + runtime, + "checker", + lambda: CheckerAgent(stop_condition=lambda x: x <= 1), + ) + + # Start the runtime + runtime.start() + print("šŸ¤– AutoGen runtime started") + print("šŸ“Ø Sending initial message with value: 10") + + # Send initial message to start the countdown + await runtime.send_message( + CountdownMessage(10), + AgentId("checker", "default") + ) + + # Wait for the workflow to complete + await runtime.stop_when_idle() + + print("=" * 50) + print("✨ Workflow completed! Check your AgentOps dashboard for detailed traces.") + +# Run the workflow +if __name__ == "__main__": + asyncio.run(run_countdown_workflow()) ``` - -## Multi-Agent Conversation Example +## Advanced Multi-Agent Example -AgentOps tracks interactions across multiple AutoGen agents: +Here's a more complex example showing a data processing pipeline with multiple specialized agents: - -```python Python +```python +import asyncio +from dataclasses import dataclass +from typing import List, Dict, Any +from dotenv import load_dotenv import agentops -import autogen -# Initialize AgentOps -agentops.init() - -# Configure LLM -config_list = [ - { - "model": "gpt-4", - "api_key": "" - } -] - -llm_config = { - "config_list": config_list, - "timeout": A 60, -} - -# Create a team of agents -researcher = autogen.AssistantAgent( - name="researcher", - llm_config=llm_config, - system_message="You are a researcher who specializes in finding accurate information." +from autogen_core import ( + DefaultTopicId, + MessageContext, + RoutedAgent, + default_subscription, + message_handler, + AgentId, + SingleThreadedAgentRuntime ) -coder = autogen.AssistantAgent( - name="coder", - llm_config=llm_config, - system_message="You are an expert programmer who writes clean, efficient code." -) +# Load environment variables +load_dotenv() -critic = autogen.AssistantAgent( - name="critic", - llm_config=llm_config, - system_message="You review solutions and provide constructive feedback." -) +# Initialize AgentOps +agentops.init() -user_proxy = autogen.UserProxyAgent( - name="user_proxy", - human_input_mode="TERMINATE", - max_consecutive_auto_reply=10, - is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), - code_execution_config={"last_n_messages": 3, "work_dir": "coding"}, -) +@dataclass +class DataMessage: + """Message containing data to be processed""" + data: List[Dict[str, Any]] + stage: str + metadata: Dict[str, Any] -# Create a group chat -groupchat = autogen.GroupChat( - agents=[user_proxy, researcher, coder, critic], - messages=[], - max_round=12 -) +@default_subscription +class DataCollectorAgent(RoutedAgent): + """Agent responsible for collecting and preparing initial data""" + + def __init__(self) -> None: + super().__init__("Data collector agent that gathers initial dataset.") -manager = autogen.GroupChatManager( - groupchat=groupchat, - llm_config=llm_config -) + @message_handler + async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None: + print(f"šŸ“Š DataCollector: Collecting data for {message.metadata.get('source', 'unknown')}") + + # Simulate data collection + collected_data = [ + {"id": 1, "value": 100, "category": "A"}, + {"id": 2, "value": 200, "category": "B"}, + {"id": 3, "value": 150, "category": "A"}, + {"id": 4, "value": 300, "category": "C"}, + ] + + print(f"āœ… DataCollector: Collected {len(collected_data)} records") + + # Send to processor + await self.publish_message( + DataMessage( + data=collected_data, + stage="processing", + metadata={**message.metadata, "collected_count": len(collected_data)} + ), + DefaultTopicId() + ) -# Initiate the group chat -user_proxy.initiate_chat( - manager, - message="Create a Python program to analyze sentiment from Twitter data." -) +@default_subscription +class DataProcessorAgent(RoutedAgent): + """Agent that processes and transforms data""" + + def __init__(self) -> None: + super().__init__("Data processor agent that transforms collected data.") + + @message_handler + async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None: + if message.stage != "processing": + return + + print(f"āš™ļø DataProcessor: Processing {len(message.data)} records") + + # Process data - add calculated fields + processed_data = [] + for item in message.data: + processed_item = { + **item, + "processed_value": item["value"] * 1.1, # 10% increase + "status": "processed" + } + processed_data.append(processed_item) + + print(f"āœ… DataProcessor: Processed {len(processed_data)} records") + + # Send to analyzer + await self.publish_message( + DataMessage( + data=processed_data, + stage="analysis", + metadata={**message.metadata, "processed_count": len(processed_data)} + ), + DefaultTopicId() + ) -# All agent interactions across the group chat are automatically tracked by AgentOps +@default_subscription +class DataAnalyzerAgent(RoutedAgent): + """Agent that analyzes processed data and generates insights""" + + def __init__(self) -> None: + super().__init__("Data analyzer agent that generates insights.") + + @message_handler + async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None: + if message.stage != "analysis": + return + + print(f"🧠 DataAnalyzer: Analyzing {len(message.data)} records") + + # Perform analysis + total_value = sum(item["processed_value"] for item in message.data) + avg_value = total_value / len(message.data) + categories = set(item["category"] for item in message.data) + + analysis_results = { + "total_records": len(message.data), + "total_value": total_value, + "average_value": avg_value, + "unique_categories": len(categories), + "categories": list(categories) + } + + print(f"šŸ“ˆ DataAnalyzer: Analysis complete") + print(f" • Total records: {analysis_results['total_records']}") + print(f" • Average value: {analysis_results['average_value']:.2f}") + print(f" • Categories: {', '.join(analysis_results['categories'])}") + + # Send to reporter + await self.publish_message( + DataMessage( + data=message.data, + stage="reporting", + metadata={ + **message.metadata, + "analysis": analysis_results + } + ), + DefaultTopicId() + ) + +@default_subscription +class ReportGeneratorAgent(RoutedAgent): + """Agent that generates final reports""" + + def __init__(self) -> None: + super().__init__("Report generator agent that creates final output.") + + @message_handler + async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None: + if message.stage != "reporting": + return + + print(f"šŸ“ ReportGenerator: Generating final report") + + analysis = message.metadata.get("analysis", {}) + + report = f""" +šŸŽÆ DATA PROCESSING REPORT +======================== +Source: {message.metadata.get('source', 'Unknown')} +Processing Date: {message.metadata.get('timestamp', 'Unknown')} + +šŸ“Š SUMMARY STATISTICS: +• Total Records Processed: {analysis.get('total_records', 0)} +• Total Value: ${analysis.get('total_value', 0):,.2f} +• Average Value: ${analysis.get('average_value', 0):,.2f} +• Unique Categories: {analysis.get('unique_categories', 0)} +• Categories Found: {', '.join(analysis.get('categories', []))} + +āœ… Processing pipeline completed successfully! + """ + + print(report) + print("šŸŽ‰ Multi-agent data processing workflow completed!") + +async def run_data_processing_pipeline(): + """Run a complete data processing pipeline using multiple AutoGen agents""" + + print("šŸš€ Starting AutoGen Data Processing Pipeline") + print("=" * 60) + + # Create runtime + runtime = SingleThreadedAgentRuntime() + + # Register all agents + await DataCollectorAgent.register( + runtime, + "collector", + lambda: DataCollectorAgent(), + ) + + await DataProcessorAgent.register( + runtime, + "processor", + lambda: DataProcessorAgent(), + ) + + await DataAnalyzerAgent.register( + runtime, + "analyzer", + lambda: DataAnalyzerAgent(), + ) + + await ReportGeneratorAgent.register( + runtime, + "reporter", + lambda: ReportGeneratorAgent(), + ) + + # Start runtime + runtime.start() + print("šŸ¤– AutoGen runtime with 4 agents started") + + # Trigger the pipeline + initial_message = DataMessage( + data=[], + stage="collection", + metadata={ + "source": "customer_database", + "timestamp": "2024-01-15T10:30:00Z", + "pipeline_id": "data_proc_001" + } + ) + + print("šŸ“Ø Triggering data processing pipeline...") + await runtime.send_message( + initial_message, + AgentId("collector", "default") + ) + + # Wait for completion + await runtime.stop_when_idle() + + print("=" * 60) + print("✨ Pipeline completed! Check AgentOps dashboard for detailed agent traces.") + +# Run the pipeline +if __name__ == "__main__": + asyncio.run(run_data_processing_pipeline()) ``` - -## Environment Variables +## What AgentOps Tracks - - - ```python .env - AGENTOPS_API_KEY= - OPENAI_API_KEY= - ``` - +AgentOps leverages AutoGen's built-in OpenTelemetry integration to automatically capture comprehensive telemetry data: + +### Automatic Span Collection +- **Agent Operations**: All agent message handling and processing operations +- **Message Flow**: Complete trace of message routing between agents +- **Runtime Events**: Agent registration, startup, and shutdown events +- **Error Handling**: Automatic capture of exceptions and error states + +### Built-in Telemetry Integration +AutoGen Core includes native OpenTelemetry support, and AgentOps seamlessly integrates with this telemetry system to provide: + +- **Zero-Configuration Tracking**: No manual instrumentation required +- **Complete Workflow Visibility**: End-to-end trace of multi-agent interactions +- **Performance Metrics**: Timing data for each agent operation and message exchange +- **Distributed Tracing**: Support for agents running across different processes or machines + +## Best Practices + +1. **Initialize Early**: Call `agentops.init()` before creating any AutoGen agents +2. **Use Descriptive Names**: Give your agents meaningful names for better dashboard visibility +3. **Structure Messages**: Use well-defined message classes for better tracking +4. **Handle Errors**: Implement proper error handling in your message handlers +5. **Monitor Performance**: Use the AgentOps dashboard to identify bottlenecks in your agent workflows + +## Dashboard Features + +The AgentOps dashboard provides: + +- **Agent Network Visualization**: See how your agents communicate +- **Message Trace Timeline**: Follow the complete flow of messages +- **Performance Analytics**: Identify slow agents or bottlenecks +- **Error Tracking**: Monitor and debug agent failures +- **Cost Analysis**: Track computational costs across your agent network + +## Example Output + +When you run the examples above, you'll see output like: + +``` +šŸš€ Starting AutoGen Countdown Workflow +================================================== +šŸ¤– AutoGen runtime started +šŸ“Ø Sending initial message with value: 10 +āœ… CheckerAgent: 10 passed validation, continuing workflow +šŸ”§ ModifierAgent: Transformed 10 → 9 +āœ… CheckerAgent: 9 passed validation, continuing workflow +šŸ”§ ModifierAgent: Transformed 9 → 8 +... +šŸ›‘ CheckerAgent: 1 failed validation, stopping workflow +šŸŽ‰ Countdown completed successfully! +================================================== +✨ Workflow completed! Check your AgentOps dashboard for detailed traces. +``` -Read more about environment variables in [Advanced Configuration](/v2/usage/advanced-configuration) +Visit your [AgentOps Dashboard](https://app.agentops.ai) to see detailed traces of your AutoGen agent interactions, performance metrics, and workflow analytics. - + \ No newline at end of file diff --git a/docs/v2/introduction.mdx b/docs/v2/introduction.mdx index dd9a2bd71..9e0d1a900 100644 --- a/docs/v2/introduction.mdx +++ b/docs/v2/introduction.mdx @@ -19,7 +19,8 @@ mode: "wide" } iconType="image" href="/v2/integrations/agentssdk" /> } iconType="image" href="/v2/integrations/crewai" /> - } iconType="image" href="/v2/integrations/autogen" /> + } iconType="image" href="/v2/integrations/ag2" /> + } iconType="image" href="/v1/integrations/autogen" /> } iconType="image" href="/v2/integrations/openai" /> } iconType="image" href="/v2/integrations/anthropic" /> } iconType="image" href="/v2/integrations/langchain" /> From 1b2d566e08374a7deb7b6dab6ed17068492733c3 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Thu, 29 May 2025 02:36:13 +0530 Subject: [PATCH 4/4] Update mint.json to reorder integration paths, moving 'anthropic' to the correct position in the integrations list. This change improves the organization and accessibility of integration documentation. --- docs/mint.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/mint.json b/docs/mint.json index f0dbe37cb..f2c0865f7 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -167,9 +167,9 @@ { "group": "Integrations", "pages": [ - "v2/integrations/anthropic", "v2/integrations/ag2", "v2/integrations/autogen", + "v2/integrations/anthropic", "v2/integrations/crewai", "v2/integrations/google_adk", "v2/integrations/gemini",