From 817d1a1f05b5d56351499f0f358156fc077af448 Mon Sep 17 00:00:00 2001 From: Teo Date: Thu, 13 Mar 2025 22:55:04 +0200 Subject: [PATCH 01/45] Cleanup __init__.py from legacy implementations Signed-off-by: Teo --- agentops/__init__.py | 114 +++---------------------------------------- 1 file changed, 7 insertions(+), 107 deletions(-) diff --git a/agentops/__init__.py b/agentops/__init__.py index 25a6dcf1b..9418232e0 100755 --- a/agentops/__init__.py +++ b/agentops/__init__.py @@ -1,11 +1,14 @@ -from typing import Dict, List, Optional, Union, Any +from typing import Any, Dict, List, Optional, Union -from .client import Client -from .sdk.commands import record as sdk_record, start_span as sdk_start_span, end_span as sdk_end_span -from .semconv.span_kinds import SpanKind import agentops.legacy as legacy from agentops.legacy import ErrorEvent, ToolEvent +from .client import Client +from .sdk.commands import end_span as sdk_end_span +from .sdk.commands import record as sdk_record +from .sdk.commands import start_span as sdk_start_span +from .semconv.span_kinds import SpanKind + # Client global instance; one per process runtime _client = Client() @@ -126,109 +129,6 @@ def configure(**kwargs): _client.configure(**kwargs) - -def start_session(**kwargs): - """Start a new session for recording events. - - Args: - tags (List[str], optional): Tags that can be used for grouping or sorting later. - e.g. ["test_run"] - - Returns: - Optional[Session]: Returns Session if successful, None otherwise. - """ - return _client.start_session(**kwargs) - - -def end_session(span, token): - """ - End a previously started AgentOps session. - - This function ends the session span and detaches the context token, - completing the session lifecycle. - - Args: - span: The span returned by start_session - token: The token returned by start_session - """ - legacy.end_session(span, token) - - -def start_span( - name: str = "manual_span", - span_kind: str = SpanKind.OPERATION, - attributes: Optional[Dict[str, Any]] = None, - version: Optional[int] = None, -): - """ - Start a new span manually. - - This function creates and starts a new span, which can be used to track - operations. The span will remain active until end_span is called with - the returned span and token. - - Args: - name: Name of the span - span_kind: Kind of span (defaults to SpanKind.OPERATION) - attributes: Optional attributes to set on the span - version: Optional version identifier for the span - - Returns: - A tuple of (span, token) that should be passed to end_span - """ - return sdk_start_span(name, span_kind, attributes, version) - - -def end_span(span, token): - """ - End a previously started span. - - This function ends the span and detaches the context token, - completing the span lifecycle. - - Args: - span: The span returned by start_span - token: The token returned by start_span - """ - sdk_end_span(span, token) - - -def record(message: str, attributes: Optional[Dict[str, Any]] = None): - """ - Record an event with a message within the current session. - - This function creates a simple operation span with the provided message - and attributes, which will be automatically associated with the current session. - - Args: - message: The message to record - attributes: Optional attributes to set on the span - """ - sdk_record(message, attributes) - - -def add_tags(tags: List[str]): - """ - Append to session tags at runtime. - - TODO: How do we retrieve the session context to add tags to? - - Args: - tags (List[str]): The list of tags to append. - """ - raise NotImplementedError - - -def set_tags(tags: List[str]): - """ - Replace session tags at runtime. - - Args: - tags (List[str]): The list of tags to set. - """ - raise NotImplementedError - - # For backwards compatibility and testing def get_client() -> Client: """Get the singleton client instance""" From b7cc190eb44d8b5898d4f353b75ea996e69cf7bb Mon Sep 17 00:00:00 2001 From: Teo Date: Thu, 13 Mar 2025 22:56:29 +0200 Subject: [PATCH 02/45] Format: core.py Signed-off-by: Teo --- agentops/sdk/core.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/agentops/sdk/core.py b/agentops/sdk/core.py index 66a322e60..26882ee4e 100644 --- a/agentops/sdk/core.py +++ b/agentops/sdk/core.py @@ -2,27 +2,22 @@ import atexit import threading -from typing import Any, Dict, List, Optional, Set, Type, Union, cast +from typing import List, Optional -from opentelemetry import context, metrics, trace +from opentelemetry import metrics, trace from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter -from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler -from opentelemetry.sdk._logs.export import SimpleLogRecordProcessor from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader from opentelemetry.sdk.resources import Resource -from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor, TracerProvider -from opentelemetry.sdk.trace.export import BatchSpanProcessor, SimpleSpanProcessor, SpanExporter -from opentelemetry.trace import Span +from opentelemetry.sdk.trace import SpanProcessor, TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor from agentops.exceptions import AgentOpsClientNotInitializedException from agentops.logging import logger -from agentops.sdk.exporters import AuthenticatedOTLPExporter from agentops.sdk.processors import InternalSpanProcessor from agentops.sdk.types import TracingConfig from agentops.semconv import ResourceAttributes -from agentops.semconv.core import CoreAttributes # No need to create shortcuts since we're using our own ResourceAttributes class now From 94229b2b4f22dbe25bf0fc48a8b83e43e19d6b9e Mon Sep 17 00:00:00 2001 From: Teo Date: Thu, 13 Mar 2025 23:00:14 +0200 Subject: [PATCH 03/45] end_session: cleanup attrs Signed-off-by: Teo --- agentops/legacy/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/agentops/legacy/__init__.py b/agentops/legacy/__init__.py index bfe1e4d36..58a505d01 100644 --- a/agentops/legacy/__init__.py +++ b/agentops/legacy/__init__.py @@ -6,7 +6,7 @@ This maintains compatibility with codebases that adhere to the previous API. """ -from typing import Dict, Any, Optional, Tuple +from typing import Any, Dict, Tuple from agentops.sdk.commands import start_span, end_span from agentops.semconv.span_kinds import SpanKind @@ -21,7 +21,7 @@ def start_session( - name: str = "manual_session", attributes: Optional[Dict[str, Any]] = None, version: Optional[int] = None + name: str = "manual_session", attributes: Dict[str, Any] = {} ) -> Tuple[Any, Any]: """ Start a new AgentOps session manually. @@ -40,7 +40,7 @@ def start_session( Returns: A tuple of (span, token) that should be passed to end_session """ - return start_span(name=name, span_kind=SpanKind.SESSION, attributes=attributes, version=version) + return start_span(name=name, span_kind=SpanKind.SESSION, attributes=attributes) def end_session(span, token) -> None: From a9a56a14409c2d99917cb75746b47f00a2cd1cac Mon Sep 17 00:00:00 2001 From: Teo Date: Thu, 13 Mar 2025 23:19:33 +0200 Subject: [PATCH 04/45] examples/opentelemetry/token_importance.py Signed-off-by: Teo --- examples/opentelemetry/token_importance.py | 211 +++++++++++++++++++++ 1 file changed, 211 insertions(+) create mode 100644 examples/opentelemetry/token_importance.py diff --git a/examples/opentelemetry/token_importance.py b/examples/opentelemetry/token_importance.py new file mode 100644 index 000000000..ffde6ec5d --- /dev/null +++ b/examples/opentelemetry/token_importance.py @@ -0,0 +1,211 @@ +from opentelemetry import trace, context +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import ConsoleSpanExporter, BatchSpanProcessor +import time +import sys + +# Set up basic tracing +trace.set_tracer_provider(TracerProvider()) +tracer_provider = trace.get_tracer_provider() +tracer_provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter())) +tracer = trace.get_tracer("token_demo") + +# ASCII art helpers for visualization +def print_header(title): + """Print a formatted header""" + print("\n" + "=" * 80) + print(f" {title}") + print("=" * 80) + +def print_step(step_num, description): + """Print a step in the process""" + print(f"\n[Step {step_num}] {description}") + +def print_span_tree(spans, indent=0): + """Print a visual representation of the span tree""" + for i, span in enumerate(spans): + is_last = i == len(spans) - 1 + prefix = "└── " if is_last else "├── " + print("│ " * indent + prefix + span) + +def print_context_state(active_span_name, context_stack=None): + """Print the current context state with visualization""" + print("\n Current Context State:") + print(" --------------------") + print(f" Active span: {active_span_name}") + + if context_stack: + print("\n Context Stack (top to bottom):") + for i, span in enumerate(context_stack): + if i == 0: + print(f" ┌─ {span} ← Current") + else: + print(f" │ {span}") + print(" └─────────────") + +def get_current_span_name(): + """Get the name of the current span or 'None' if no span is active""" + current = trace.get_current_span() + return getattr(current, "name", "None") + +# Scenario 1: Proper token management +print_header("Scenario 1: Proper Token Management") +print("This scenario demonstrates correct context management with proper token handling.") +print("We'll create a parent span, then a child span, and properly detach the context.") + +with tracer.start_as_current_span("parent") as parent: + print_step(1, "Created parent span and set as current") + parent_name = get_current_span_name() + print_context_state(parent_name, ["parent"]) + print_span_tree(["parent"]) + + print_step(2, "Creating child span and attaching to context") + # Manually create a child span and save the token + child = tracer.start_span("child") + ctx = trace.set_span_in_context(child) + token = context.attach(ctx) + + child_name = get_current_span_name() + print_context_state(child_name, ["child", "parent"]) + print_span_tree(["parent", "child"]) + + print_step(3, "Ending child span AND detaching token (proper cleanup)") + # End the child span and detach the token + child.end() + context.detach(token) + + restored_name = get_current_span_name() + print_context_state(restored_name, ["parent"]) + print_span_tree(["parent"]) + + print("\n✅ Result: Context properly restored to parent after child span ended") + +# Scenario 2: Missing token detachment +print_header("Scenario 2: Missing Token Detachment (Context Leak)") +print("This scenario demonstrates what happens when we don't detach the context token.") +print("We'll create a parent span, then a child span, but NOT detach the context.") + +with tracer.start_as_current_span("parent2") as parent: + print_step(1, "Created parent2 span and set as current") + parent_name = get_current_span_name() + print_context_state(parent_name, ["parent2"]) + print_span_tree(["parent2"]) + + print_step(2, "Creating child2 span and attaching to context") + # Manually create a child span but don't save the token + child = tracer.start_span("child2") + ctx = trace.set_span_in_context(child) + token = context.attach(ctx) # Token saved but not used later + + child_name = get_current_span_name() + print_context_state(child_name, ["child2", "parent2"]) + print_span_tree(["parent2", "child2"]) + + print_step(3, "Ending child2 span WITHOUT detaching token (improper cleanup)") + # End the child span but don't detach the token + child.end() + # No context.detach(token) call! + + leaked_name = get_current_span_name() + print_context_state(leaked_name, ["child2 (ended but context still active)", "parent2"]) + print_span_tree(["parent2", "child2 (ended)"]) + + print("\n⚠️ Result: Context LEAK! Still showing child2 as current context even though span ended") + print(" Any new spans created here would incorrectly use child2 as parent instead of parent2") + +# Scenario 3: Nested spans with context restoration +print_header("Scenario 3: Nested Spans with Context Restoration") +print("This scenario demonstrates proper context management with multiple nested spans.") +print("We'll create an outer → middle1 → middle2 span hierarchy and properly restore contexts.") + +with tracer.start_as_current_span("outer") as outer: + print_step(1, "Created outer span and set as current") + outer_name = get_current_span_name() + print_context_state(outer_name, ["outer"]) + print_span_tree(["outer"]) + + print_step(2, "Creating middle1 span and attaching to context") + # First middle span + middle1 = tracer.start_span("middle1") + ctx1 = trace.set_span_in_context(middle1) + token1 = context.attach(ctx1) + + middle1_name = get_current_span_name() + print_context_state(middle1_name, ["middle1", "outer"]) + print_span_tree(["outer", "middle1"]) + + print_step(3, "Creating middle2 span and attaching to context") + # Second middle span + middle2 = tracer.start_span("middle2") + ctx2 = trace.set_span_in_context(middle2) + token2 = context.attach(ctx2) + + middle2_name = get_current_span_name() + print_context_state(middle2_name, ["middle2", "middle1", "outer"]) + print_span_tree(["outer", "middle1", "middle2"]) + + print_step(4, "Ending middle2 span and detaching token2") + # End spans in reverse order with proper token management + middle2.end() + context.detach(token2) + + restored_middle1_name = get_current_span_name() + print_context_state(restored_middle1_name, ["middle1", "outer"]) + print_span_tree(["outer", "middle1", "middle2 (ended)"]) + + print_step(5, "Ending middle1 span and detaching token1") + middle1.end() + context.detach(token1) + + restored_outer_name = get_current_span_name() + print_context_state(restored_outer_name, ["outer"]) + print_span_tree(["outer", "middle1 (ended)", "middle2 (ended)"]) + + print("\n✅ Result: Context properly restored through multiple levels") + +# Scenario 4: What happens if we create new spans after a context leak +print_header("Scenario 4: Creating New Spans After Context Leak") +print("This scenario demonstrates the impact of context leaks on the span hierarchy.") +print("We'll create a parent span, leak a child context, then create another span.") + +with tracer.start_as_current_span("root") as root: + print_step(1, "Created root span and set as current") + root_name = get_current_span_name() + print_context_state(root_name, ["root"]) + print_span_tree(["root"]) + + print_step(2, "Creating leaky_child span and attaching to context") + # Create a child span but don't save the token + leaky = tracer.start_span("leaky_child") + ctx = trace.set_span_in_context(leaky) + context.attach(ctx) # Token not saved + + leaky_name = get_current_span_name() + print_context_state(leaky_name, ["leaky_child", "root"]) + print_span_tree(["root", "leaky_child"]) + + print_step(3, "Ending leaky_child span WITHOUT detaching token") + # End the child span but don't detach the token + leaky.end() + # No context.detach() call! + + print_step(4, "Creating new_child span after context leak") + # This span will be created with leaky_child as parent, not root! + with tracer.start_as_current_span("new_child") as new_child: + new_child_name = get_current_span_name() + print_context_state(new_child_name, ["new_child", "leaky_child (ended but context active)", "root"]) + print_span_tree(["root", "leaky_child (ended)", "new_child"]) + + print("\n⚠️ Problem: new_child is incorrectly parented to leaky_child instead of root") + print(" This creates an incorrect trace hierarchy that doesn't match execution flow") + +print_header("Conclusion") +print("The token returned by context.attach() is crucial for proper context management.") +print("Without proper token detachment:") +print("1. Context leaks occur - the active context doesn't revert to the parent") +print("2. New spans are created with incorrect parent relationships") +print("3. The trace hierarchy doesn't accurately represent the execution flow") +print("\nIn AgentOps, if end_session() doesn't detach the token:") +print("- Sessions might appear to be active even after they've ended") +print("- New operations might be incorrectly associated with ended sessions") +print("- The overall trace hierarchy would be inaccurate") From 14ad0d8fb92e4fe29a469848993738fe34ce659c Mon Sep 17 00:00:00 2001 From: Teo Date: Thu, 13 Mar 2025 23:54:29 +0200 Subject: [PATCH 05/45] utility record i/o: use agentops.semconv.SpanAttributes Signed-off-by: Teo --- agentops/sdk/decorators/utility.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/agentops/sdk/decorators/utility.py b/agentops/sdk/decorators/utility.py index 21a47d7b5..9daf570a0 100644 --- a/agentops/sdk/decorators/utility.py +++ b/agentops/sdk/decorators/utility.py @@ -13,9 +13,10 @@ from agentops.logging import logger from agentops.sdk.converters import dict_to_span_attributes from agentops.sdk.core import TracingCore -from agentops.semconv import SpanKind +from agentops.semconv import SpanKind, span_attributes from agentops.semconv.core import CoreAttributes from agentops.semconv.span_attributes import SpanAttributes +from agentops.semconv.span_kinds import AgentOpsSpanKindValues """ !! NOTE !! @@ -199,7 +200,7 @@ def _record_operation_input(span: trace.Span, args: tuple, kwargs: Dict[str, Any json_data = safe_serialize(input_data) if _check_content_size(json_data): - span.set_attribute("agentops.operation.input", json_data) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_INPUT, json_data) else: logger.debug("Operation input exceeds size limit, not recording") except Exception as err: @@ -213,7 +214,7 @@ def _record_operation_output(span: trace.Span, result: Any) -> None: json_data = safe_serialize(result) if _check_content_size(json_data): - span.set_attribute("agentops.operation.output", json_data) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_OUTPUT, json_data) else: logger.debug("Operation output exceeds size limit, not recording") except Exception as err: From ea429fe445453f7412e0067f64ff547b63dc0164 Mon Sep 17 00:00:00 2001 From: Teo Date: Thu, 13 Mar 2025 23:55:30 +0200 Subject: [PATCH 06/45] examples/opentelemetry/token_importance_2.py Signed-off-by: Teo --- examples/opentelemetry/token_importance_2.py | 51 ++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 examples/opentelemetry/token_importance_2.py diff --git a/examples/opentelemetry/token_importance_2.py b/examples/opentelemetry/token_importance_2.py new file mode 100644 index 000000000..aea2fe1f4 --- /dev/null +++ b/examples/opentelemetry/token_importance_2.py @@ -0,0 +1,51 @@ +from opentelemetry import trace, context +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import ConsoleSpanExporter, BatchSpanProcessor + +# Set up tracing +provider = TracerProvider() +processor = BatchSpanProcessor(ConsoleSpanExporter()) +provider.add_span_processor(processor) +trace.set_tracer_provider(provider) +tracer = trace.get_tracer("demo") + +def get_current_span_name(): + return getattr(trace.get_current_span(), "name", "None") + +print("\n=== Scenario: Multiple contexts with the same span ===") +print("This demonstrates why coupling spans and tokens can be problematic") + +# Create a span that we'll use in multiple contexts +shared_span = tracer.start_span("shared_span") +print(f"Created shared_span (not in any context yet): {shared_span.name}") + +# Create context A with the shared span +ctx_a = trace.set_span_in_context(shared_span) +token_a = context.attach(ctx_a) +print(f"Current span after attaching context A: {get_current_span_name()}") + +# Save context A state +first_context_span = trace.get_current_span() + +# Create context B with the same shared span +# If span and token were coupled, this would detach context A! +ctx_b = trace.set_span_in_context(shared_span) +token_b = context.attach(ctx_b) +print(f"Current span after attaching context B: {get_current_span_name()}") + +# Now detach context B +context.detach(token_b) +print(f"Current span after detaching context B: {get_current_span_name()}") + +# Detach context A +context.detach(token_a) +print(f"Current span after detaching context A: {get_current_span_name()}") + +# End the shared span once - if we did this twice with coupled tokens, it would error +shared_span.end() +print("Ended shared_span once") + +print("\nIf spans and tokens were coupled:") +print("1. Creating context B would have implicitly detached context A") +print("2. We couldn't use the same span in two different trace contexts") +print("3. Ending the span would have also detached all contexts using it") From 479f90654a6daf7dccd0f1287ffce41f7ba62315 Mon Sep 17 00:00:00 2001 From: Teo Date: Thu, 13 Mar 2025 23:55:51 +0200 Subject: [PATCH 07/45] sdk/tracing mod Signed-off-by: Teo --- agentops/sdk/commands.py | 2 +- agentops/sdk/decorators/agentops.py | 6 ++++-- agentops/sdk/{decorators => tracing}/utility.py | 10 +++------- 3 files changed, 8 insertions(+), 10 deletions(-) rename agentops/sdk/{decorators => tracing}/utility.py (96%) diff --git a/agentops/sdk/commands.py b/agentops/sdk/commands.py index 9d9d263e0..e0542f4a4 100644 --- a/agentops/sdk/commands.py +++ b/agentops/sdk/commands.py @@ -15,7 +15,7 @@ from agentops.exceptions import AgentOpsClientNotInitializedException from agentops.sdk.core import TracingCore -from agentops.sdk.decorators.utility import _finalize_span, _make_span +from agentops.sdk.tracing.utility import _finalize_span, _make_span from agentops.semconv.span_attributes import SpanAttributes from agentops.semconv.span_kinds import SpanKind diff --git a/agentops/sdk/decorators/agentops.py b/agentops/sdk/decorators/agentops.py index bc60f3ba2..93c5a2e36 100644 --- a/agentops/sdk/decorators/agentops.py +++ b/agentops/sdk/decorators/agentops.py @@ -6,10 +6,12 @@ """ import inspect -from typing import Optional, Any, Callable, TypeVar, cast, Type, Union, overload +from typing import (Any, Callable, Optional, Type, TypeVar, Union, cast, + overload) import wrapt -from agentops.sdk.decorators.utility import instrument_operation, instrument_class + +from agentops.sdk.tracing.utility import instrument_class, instrument_operation from agentops.semconv.span_kinds import SpanKind # Type variables for better type hinting diff --git a/agentops/sdk/decorators/utility.py b/agentops/sdk/tracing/utility.py similarity index 96% rename from agentops/sdk/decorators/utility.py rename to agentops/sdk/tracing/utility.py index 9daf570a0..d570a5048 100644 --- a/agentops/sdk/decorators/utility.py +++ b/agentops/sdk/tracing/utility.py @@ -1,22 +1,18 @@ import inspect -import json import os import types import warnings from functools import wraps -from typing import Any, Dict, Optional, Union +from typing import Any, Dict, Optional from opentelemetry import context as context_api from opentelemetry import trace -from agentops.helpers.serialization import AgentOpsJSONEncoder, safe_serialize +from agentops.helpers.serialization import safe_serialize from agentops.logging import logger -from agentops.sdk.converters import dict_to_span_attributes from agentops.sdk.core import TracingCore -from agentops.semconv import SpanKind, span_attributes -from agentops.semconv.core import CoreAttributes +from agentops.semconv import SpanKind from agentops.semconv.span_attributes import SpanAttributes -from agentops.semconv.span_kinds import AgentOpsSpanKindValues """ !! NOTE !! From ef523792de3df2b425dd3aeadd146384f1b7937a Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:06:56 +0200 Subject: [PATCH 08/45] Improve token_importance example Signed-off-by: Teo --- examples/opentelemetry/token_importance.py | 582 +++++++++++++++------ 1 file changed, 427 insertions(+), 155 deletions(-) diff --git a/examples/opentelemetry/token_importance.py b/examples/opentelemetry/token_importance.py index ffde6ec5d..fd3c71159 100644 --- a/examples/opentelemetry/token_importance.py +++ b/examples/opentelemetry/token_importance.py @@ -1,16 +1,37 @@ -from opentelemetry import trace, context +from opentelemetry import trace, context, baggage from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import ConsoleSpanExporter, BatchSpanProcessor +from opentelemetry.sdk.trace.export import ConsoleSpanExporter, BatchSpanProcessor, SpanExporter +from opentelemetry.trace import Status, StatusCode import time import sys +import json +from typing import Dict, Any, List, Optional, Sequence + +# Create a no-op exporter to prevent spans from being printed +class NoopExporter(SpanExporter): + """A span exporter that doesn't export spans anywhere.""" + + def export(self, spans: Sequence) -> None: + """Do nothing with the spans.""" + pass + + def shutdown(self) -> None: + """Shutdown the exporter.""" + pass # Set up basic tracing -trace.set_tracer_provider(TracerProvider()) -tracer_provider = trace.get_tracer_provider() -tracer_provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter())) +provider = TracerProvider() +# Use the NoopExporter instead of ConsoleSpanExporter +processor = BatchSpanProcessor(NoopExporter()) +provider.add_span_processor(processor) +trace.set_tracer_provider(provider) tracer = trace.get_tracer("token_demo") -# ASCII art helpers for visualization +# Second tracer for demonstration +llm_tracer = trace.get_tracer("llm_tracer") + +# ======== Visualization Helpers ======== + def print_header(title): """Print a formatted header""" print("\n" + "=" * 80) @@ -28,7 +49,7 @@ def print_span_tree(spans, indent=0): prefix = "└── " if is_last else "├── " print("│ " * indent + prefix + span) -def print_context_state(active_span_name, context_stack=None): +def print_context_state(active_span_name, context_stack=None, baggage_items=None): """Print the current context state with visualization""" print("\n Current Context State:") print(" --------------------") @@ -42,162 +63,413 @@ def print_context_state(active_span_name, context_stack=None): else: print(f" │ {span}") print(" └─────────────") + + if baggage_items: + print("\n Baggage Items:") + print(" -------------") + for key, value in baggage_items.items(): + print(f" 🔷 {key}: {value}") + +def print_span_details(span, title="Span Details"): + """Print detailed information about a span""" + if not hasattr(span, "get_span_context"): + print(" No span details available") + return + + ctx = span.get_span_context() + print(f"\n {title}:") + print(" " + "-" * len(title)) + print(f" Name: {getattr(span, 'name', 'Unknown')}") + print(f" Trace ID: {ctx.trace_id:x}") + print(f" Span ID: {ctx.span_id:x}") + + # Try to get attributes if possible + attributes = getattr(span, "_attributes", {}) + if attributes: + print("\n Attributes:") + for key, value in attributes.items(): + print(f" 📎 {key}: {str(value)}") def get_current_span_name(): """Get the name of the current span or 'None' if no span is active""" current = trace.get_current_span() return getattr(current, "name", "None") -# Scenario 1: Proper token management -print_header("Scenario 1: Proper Token Management") -print("This scenario demonstrates correct context management with proper token handling.") -print("We'll create a parent span, then a child span, and properly detach the context.") - -with tracer.start_as_current_span("parent") as parent: - print_step(1, "Created parent span and set as current") - parent_name = get_current_span_name() - print_context_state(parent_name, ["parent"]) - print_span_tree(["parent"]) - - print_step(2, "Creating child span and attaching to context") - # Manually create a child span and save the token - child = tracer.start_span("child") - ctx = trace.set_span_in_context(child) +def get_current_baggage() -> Dict[str, str]: + """Get all baggage items in the current context""" + items = {} + # This is a simplified approach - in a real app you'd enumerate all baggage items + for key in ["user.id", "tenant.id", "request.id", "environment"]: + value = baggage.get_baggage(key) + if value: + items[key] = value + return items + +# ======== Simulated Application Functions ======== + +def simulate_database_query(query: str) -> Dict[str, Any]: + """Simulate a database query with proper context propagation""" + with tracer.start_as_current_span("database.query") as span: + span.set_attribute("db.statement", query) + span.set_attribute("db.system", "postgresql") + + # Simulate query execution time + time.sleep(0.01) + + # Add current baggage to demonstrate propagation + user_id = baggage.get_baggage("user.id") + if user_id: + span.set_attribute("user.id", str(user_id)) + + # Return simulated data + return {"id": 1234, "name": "Sample Data", "status": "active"} + +def call_external_api(endpoint: str) -> Dict[str, Any]: + """Simulate an external API call with a different tracer""" + with llm_tracer.start_as_current_span("http.request") as span: + span.set_attribute("http.url", f"https://api.example.com/{endpoint}") + span.set_attribute("http.method", "GET") + + # Simulate API call latency + time.sleep(0.02) + + # Add baggage to simulate cross-service propagation + tenant_id = baggage.get_baggage("tenant.id") + if tenant_id: + span.set_attribute("tenant.id", str(tenant_id)) + + # Sometimes operations fail + if endpoint == "error": + span.set_status(Status(StatusCode.ERROR)) + span.set_attribute("error.message", "API returned 500 status code") + return {"error": "Internal Server Error"} + + return {"status": "success", "data": {"key": "value"}} + +def process_user_request(user_id: str, action: str) -> Dict[str, Any]: + """Process a user request with nested spans and context propagation""" + # Set baggage for the entire operation + ctx = baggage.set_baggage("user.id", user_id) + ctx = baggage.set_baggage("tenant.id", "tenant-1234", context=ctx) + ctx = baggage.set_baggage("request.id", f"req-{int(time.time())}", context=ctx) + + # Attach the context with baggage + token = context.attach(ctx) + + try: + with tracer.start_as_current_span("process_request") as span: + span.set_attribute("user.id", user_id) + span.set_attribute("request.action", action) + + # Query the database (creates a child span) + db_result = simulate_database_query(f"SELECT * FROM users WHERE id = '{user_id}'") + + # Call an external API (creates a child span with a different tracer) + api_result = call_external_api("users/profile") + + # Combine results + return { + "user": db_result, + "profile": api_result, + "processed_at": time.time() + } + finally: + # Always detach the context to clean up + context.detach(token) + +# ======== Scenarios ======== + +def run_basic_scenarios(): + """Run the original basic scenarios to demonstrate token importance""" + # Scenario 1: Proper token management + print_header("Scenario 1: Proper Token Management") + print("This scenario demonstrates correct context management with proper token handling.") + print("We'll create a parent span, then a child span, and properly detach the context.") + + with tracer.start_as_current_span("parent") as parent: + print_step(1, "Created parent span and set as current") + parent_name = get_current_span_name() + print_context_state(parent_name, ["parent"]) + print_span_tree(["parent"]) + + print_step(2, "Creating child span and attaching to context") + # Manually create a child span and save the token + child = tracer.start_span("child") + ctx = trace.set_span_in_context(child) + token = context.attach(ctx) + + child_name = get_current_span_name() + print_context_state(child_name, ["child", "parent"]) + print_span_tree(["parent", "child"]) + + print_step(3, "Ending child span AND detaching token (proper cleanup)") + # End the child span and detach the token + child.end() + context.detach(token) + + restored_name = get_current_span_name() + print_context_state(restored_name, ["parent"]) + print_span_tree(["parent"]) + + print("\n✅ Result: Context properly restored to parent after child span ended") + + # Scenario 2: Missing token detachment + print_header("Scenario 2: Missing Token Detachment (Context Leak)") + print("This scenario demonstrates what happens when we don't detach the context token.") + print("We'll create a parent span, then a child span, but NOT detach the context.") + + with tracer.start_as_current_span("parent2") as parent: + print_step(1, "Created parent2 span and set as current") + parent_name = get_current_span_name() + print_context_state(parent_name, ["parent2"]) + print_span_tree(["parent2"]) + + print_step(2, "Creating child2 span and attaching to context") + # Manually create a child span but don't save the token + child = tracer.start_span("child2") + ctx = trace.set_span_in_context(child) + token = context.attach(ctx) # Token saved but not used later + + child_name = get_current_span_name() + print_context_state(child_name, ["child2", "parent2"]) + print_span_tree(["parent2", "child2"]) + + print_step(3, "Ending child2 span WITHOUT detaching token (improper cleanup)") + # End the child span but don't detach the token + child.end() + # No context.detach(token) call! + + leaked_name = get_current_span_name() + print_context_state(leaked_name, ["child2 (ended but context still active)", "parent2"]) + print_span_tree(["parent2", "child2 (ended)"]) + + print("\n⚠️ Result: Context LEAK! Still showing child2 as current context even though span ended") + print(" Any new spans created here would incorrectly use child2 as parent instead of parent2") + + # Scenario 3: Nested spans with context restoration + print_header("Scenario 3: Nested Spans with Context Restoration") + print("This scenario demonstrates proper context management with multiple nested spans.") + print("We'll create an outer → middle1 → middle2 span hierarchy and properly restore contexts.") + + with tracer.start_as_current_span("outer") as outer: + print_step(1, "Created outer span and set as current") + outer_name = get_current_span_name() + print_context_state(outer_name, ["outer"]) + print_span_tree(["outer"]) + + print_step(2, "Creating middle1 span and attaching to context") + # First middle span + middle1 = tracer.start_span("middle1") + ctx1 = trace.set_span_in_context(middle1) + token1 = context.attach(ctx1) + + middle1_name = get_current_span_name() + print_context_state(middle1_name, ["middle1", "outer"]) + print_span_tree(["outer", "middle1"]) + + print_step(3, "Creating middle2 span and attaching to context") + # Second middle span + middle2 = tracer.start_span("middle2") + ctx2 = trace.set_span_in_context(middle2) + token2 = context.attach(ctx2) + + middle2_name = get_current_span_name() + print_context_state(middle2_name, ["middle2", "middle1", "outer"]) + print_span_tree(["outer", "middle1", "middle2"]) + + print_step(4, "Ending middle2 span and detaching token2") + # End spans in reverse order with proper token management + middle2.end() + context.detach(token2) + + restored_middle1_name = get_current_span_name() + print_context_state(restored_middle1_name, ["middle1", "outer"]) + print_span_tree(["outer", "middle1", "middle2 (ended)"]) + + print_step(5, "Ending middle1 span and detaching token1") + middle1.end() + context.detach(token1) + + restored_outer_name = get_current_span_name() + print_context_state(restored_outer_name, ["outer"]) + print_span_tree(["outer", "middle1 (ended)", "middle2 (ended)"]) + + print("\n✅ Result: Context properly restored through multiple levels") + + # Scenario 4: What happens if we create new spans after a context leak + print_header("Scenario 4: Creating New Spans After Context Leak") + print("This scenario demonstrates the impact of context leaks on the span hierarchy.") + print("We'll create a parent span, leak a child context, then create another span.") + + with tracer.start_as_current_span("root") as root: + print_step(1, "Created root span and set as current") + root_name = get_current_span_name() + print_context_state(root_name, ["root"]) + print_span_tree(["root"]) + + print_step(2, "Creating leaky_child span and attaching to context") + # Create a child span but don't save the token + leaky = tracer.start_span("leaky_child") + ctx = trace.set_span_in_context(leaky) + context.attach(ctx) # Token not saved + + leaky_name = get_current_span_name() + print_context_state(leaky_name, ["leaky_child", "root"]) + print_span_tree(["root", "leaky_child"]) + + print_step(3, "Ending leaky_child span WITHOUT detaching token") + # End the child span but don't detach the token + leaky.end() + # No context.detach() call! + + print_step(4, "Creating new_child span after context leak") + # This span will be created with leaky_child as parent, not root! + with tracer.start_as_current_span("new_child") as new_child: + new_child_name = get_current_span_name() + print_context_state(new_child_name, ["new_child", "leaky_child (ended but context active)", "root"]) + print_span_tree(["root", "leaky_child (ended)", "new_child"]) + + print("\n⚠️ Problem: new_child is incorrectly parented to leaky_child instead of root") + print(" This creates an incorrect trace hierarchy that doesn't match execution flow") + +def run_advanced_scenarios(): + """Run the new advanced scenarios demonstrating more complex context patterns""" + + # Scenario 5: Cross-function context propagation + print_header("Scenario 5: Cross-Function Context Propagation") + print("This scenario demonstrates how context and baggage propagate across function boundaries.") + print("We'll create a request processing flow with multiple nested functions and spans.") + + print_step(1, "Starting user request processing with baggage") + # Process a simulated request that will create nested spans across functions + result = process_user_request("user-5678", "update_profile") + + print_step(2, "Request processing completed") + print("\n Request processing result:") + print(f" User data: {result['user']['name']}") + print(f" Profile status: {result['profile']['status']}") + + print("\n✅ Result: Context and baggage successfully propagated across multiple function calls") + print(" Each function created properly nested spans that maintained the baggage context") + + # Scenario 6: Using different tracers with the same context + print_header("Scenario 6: Multiple Tracers with Shared Context") + print("This scenario demonstrates using multiple tracers while maintaining a consistent context.") + + print_step(1, "Creating context with baggage") + # Set up a context with baggage + ctx = baggage.set_baggage("environment", "production") + ctx = baggage.set_baggage("tenant.id", "tenant-9876", context=ctx) token = context.attach(ctx) - child_name = get_current_span_name() - print_context_state(child_name, ["child", "parent"]) - print_span_tree(["parent", "child"]) - - print_step(3, "Ending child span AND detaching token (proper cleanup)") - # End the child span and detach the token - child.end() - context.detach(token) - - restored_name = get_current_span_name() - print_context_state(restored_name, ["parent"]) - print_span_tree(["parent"]) - - print("\n✅ Result: Context properly restored to parent after child span ended") - -# Scenario 2: Missing token detachment -print_header("Scenario 2: Missing Token Detachment (Context Leak)") -print("This scenario demonstrates what happens when we don't detach the context token.") -print("We'll create a parent span, then a child span, but NOT detach the context.") - -with tracer.start_as_current_span("parent2") as parent: - print_step(1, "Created parent2 span and set as current") - parent_name = get_current_span_name() - print_context_state(parent_name, ["parent2"]) - print_span_tree(["parent2"]) - - print_step(2, "Creating child2 span and attaching to context") - # Manually create a child span but don't save the token - child = tracer.start_span("child2") - ctx = trace.set_span_in_context(child) - token = context.attach(ctx) # Token saved but not used later - - child_name = get_current_span_name() - print_context_state(child_name, ["child2", "parent2"]) - print_span_tree(["parent2", "child2"]) - - print_step(3, "Ending child2 span WITHOUT detaching token (improper cleanup)") - # End the child span but don't detach the token - child.end() - # No context.detach(token) call! - - leaked_name = get_current_span_name() - print_context_state(leaked_name, ["child2 (ended but context still active)", "parent2"]) - print_span_tree(["parent2", "child2 (ended)"]) - - print("\n⚠️ Result: Context LEAK! Still showing child2 as current context even though span ended") - print(" Any new spans created here would incorrectly use child2 as parent instead of parent2") - -# Scenario 3: Nested spans with context restoration -print_header("Scenario 3: Nested Spans with Context Restoration") -print("This scenario demonstrates proper context management with multiple nested spans.") -print("We'll create an outer → middle1 → middle2 span hierarchy and properly restore contexts.") - -with tracer.start_as_current_span("outer") as outer: - print_step(1, "Created outer span and set as current") - outer_name = get_current_span_name() - print_context_state(outer_name, ["outer"]) - print_span_tree(["outer"]) - - print_step(2, "Creating middle1 span and attaching to context") - # First middle span - middle1 = tracer.start_span("middle1") - ctx1 = trace.set_span_in_context(middle1) - token1 = context.attach(ctx1) - - middle1_name = get_current_span_name() - print_context_state(middle1_name, ["middle1", "outer"]) - print_span_tree(["outer", "middle1"]) - - print_step(3, "Creating middle2 span and attaching to context") - # Second middle span - middle2 = tracer.start_span("middle2") - ctx2 = trace.set_span_in_context(middle2) - token2 = context.attach(ctx2) - - middle2_name = get_current_span_name() - print_context_state(middle2_name, ["middle2", "middle1", "outer"]) - print_span_tree(["outer", "middle1", "middle2"]) - - print_step(4, "Ending middle2 span and detaching token2") - # End spans in reverse order with proper token management - middle2.end() - context.detach(token2) - - restored_middle1_name = get_current_span_name() - print_context_state(restored_middle1_name, ["middle1", "outer"]) - print_span_tree(["outer", "middle1", "middle2 (ended)"]) - - print_step(5, "Ending middle1 span and detaching token1") - middle1.end() - context.detach(token1) - - restored_outer_name = get_current_span_name() - print_context_state(restored_outer_name, ["outer"]) - print_span_tree(["outer", "middle1 (ended)", "middle2 (ended)"]) - - print("\n✅ Result: Context properly restored through multiple levels") - -# Scenario 4: What happens if we create new spans after a context leak -print_header("Scenario 4: Creating New Spans After Context Leak") -print("This scenario demonstrates the impact of context leaks on the span hierarchy.") -print("We'll create a parent span, leak a child context, then create another span.") - -with tracer.start_as_current_span("root") as root: - print_step(1, "Created root span and set as current") - root_name = get_current_span_name() - print_context_state(root_name, ["root"]) - print_span_tree(["root"]) - - print_step(2, "Creating leaky_child span and attaching to context") - # Create a child span but don't save the token - leaky = tracer.start_span("leaky_child") - ctx = trace.set_span_in_context(leaky) - context.attach(ctx) # Token not saved - - leaky_name = get_current_span_name() - print_context_state(leaky_name, ["leaky_child", "root"]) - print_span_tree(["root", "leaky_child"]) - - print_step(3, "Ending leaky_child span WITHOUT detaching token") - # End the child span but don't detach the token - leaky.end() - # No context.detach() call! - - print_step(4, "Creating new_child span after context leak") - # This span will be created with leaky_child as parent, not root! - with tracer.start_as_current_span("new_child") as new_child: - new_child_name = get_current_span_name() - print_context_state(new_child_name, ["new_child", "leaky_child (ended but context active)", "root"]) - print_span_tree(["root", "leaky_child (ended)", "new_child"]) - - print("\n⚠️ Problem: new_child is incorrectly parented to leaky_child instead of root") - print(" This creates an incorrect trace hierarchy that doesn't match execution flow") + try: + print_step(2, "Starting span with main tracer") + with tracer.start_as_current_span("main_operation") as main_span: + main_span_name = get_current_span_name() + baggage_items = get_current_baggage() + print_context_state(main_span_name, ["main_operation"], baggage_items) + print_span_details(main_span) + + print_step(3, "Creating span with LLM tracer (different tracer)") + with llm_tracer.start_as_current_span("llm_inference") as llm_span: + llm_span.set_attribute("model", "gpt-4") + llm_span.set_attribute("tokens", 150) + + llm_span_name = get_current_span_name() + print_context_state(llm_span_name, ["llm_inference", "main_operation"], baggage_items) + print_span_details(llm_span, "LLM Span Details") + + print_step(4, "Back to main tracer") + # Create another span with the first tracer + with tracer.start_as_current_span("post_processing") as post_span: + post_span_name = get_current_span_name() + print_context_state(post_span_name, ["post_processing", "llm_inference", "main_operation"], baggage_items) + finally: + context.detach(token) + + print("\n✅ Result: Multiple tracers successfully shared the same context") + print(" Baggage was accessible to spans from both tracers") + + # Scenario 7: Handling errors in spans + print_header("Scenario 7: Error Handling in Spans") + print("This scenario demonstrates proper error handling with spans.") + + print_step(1, "Starting operation that will encounter an error") + with tracer.start_as_current_span("error_prone_operation") as error_span: + try: + print_step(2, "Calling API that will fail") + result = call_external_api("error") + print(f" API result: {result}") + except Exception as e: + print_step(3, "Handling exception in span") + error_span.record_exception(e) + error_span.set_status(Status(StatusCode.ERROR)) + print(f" Recorded exception: {str(e)}") + + print("\n✅ Result: Properly recorded error in span without breaking execution flow") + print(" Errors should be visible in the trace visualization") + + # Scenario 8: Manual context saving and restoring + print_header("Scenario 8: Manual Context Saving and Restoring") + print("This scenario demonstrates saving a context and restoring it later.") + + print_step(1, "Creating initial context") + with tracer.start_as_current_span("initial_operation") as initial_span: + # Set some baggage + ctx = baggage.set_baggage("checkpoint", "saved_point") + + # Save the current context for later use + saved_context = context.get_current() + print_context_state("initial_operation", ["initial_operation"], {"checkpoint": "saved_point"}) + + print_step(2, "Creating a different context") + with tracer.start_as_current_span("intermediate_operation") as intermediate_span: + # Change the baggage + ctx = baggage.set_baggage("checkpoint", "intermediate_point") + print_context_state("intermediate_operation", ["intermediate_operation", "initial_operation"], + {"checkpoint": "intermediate_point"}) + + print_step(3, "Restoring saved context") + # Restore the saved context + token = context.attach(saved_context) + try: + current_span = trace.get_current_span() + current_name = getattr(current_span, "name", "Unknown") + checkpoint = baggage.get_baggage("checkpoint") + print_context_state(current_name, ["initial_operation"], {"checkpoint": checkpoint}) + + print("\n✅ Result: Successfully restored previous context") + finally: + context.detach(token) + + print_step(4, "Back to intermediate context") + print_context_state("intermediate_operation", ["intermediate_operation", "initial_operation"], + {"checkpoint": "intermediate_point"}) + +print_header("OpenTelemetry Context Management Demonstration") +print("This example illustrates the importance of proper context management in OpenTelemetry.") +print("It covers basic and advanced scenarios showing how context affects span relationships.") + +print("\n1. Basic Scenarios - Demonstrating context token importance") +print("2. Advanced Scenarios - Real-world patterns with nested functionality") +print("3. Run All Scenarios") +print("4. Exit") + +while True: + choice = input("\nEnter your choice (1-4): ") + + if choice == "1": + run_basic_scenarios() + elif choice == "2": + run_advanced_scenarios() + elif choice == "3": + run_basic_scenarios() + run_advanced_scenarios() + elif choice == "4": + print("\nExiting...") + break + else: + print("Invalid choice. Please enter 1, 2, 3, or 4.") print_header("Conclusion") print("The token returned by context.attach() is crucial for proper context management.") From 61babbbf339e6348a56e22b521fb68f1a8ca8603 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:11:28 +0200 Subject: [PATCH 09/45] Cleanup legacy Signed-off-by: Teo --- agentops/cli.py | 35 -- agentops/client.py | 439 --------------------- agentops/decorators.py | 347 ----------------- agentops/descriptor.py | 187 --------- agentops/helpers.py | 176 --------- agentops/host_env.py | 150 -------- agentops/http_client.py | 208 ---------- agentops/{ => legacy}/event.py | 0 agentops/log_config.py | 53 --- agentops/meta_client.py | 64 ---- agentops/session.py | 672 --------------------------------- agentops/singleton.py | 28 -- agentops/time_travel.py | 144 ------- 13 files changed, 2503 deletions(-) delete mode 100644 agentops/cli.py delete mode 100644 agentops/client.py delete mode 100644 agentops/decorators.py delete mode 100644 agentops/descriptor.py delete mode 100644 agentops/helpers.py delete mode 100644 agentops/host_env.py delete mode 100644 agentops/http_client.py rename agentops/{ => legacy}/event.py (100%) delete mode 100644 agentops/log_config.py delete mode 100644 agentops/meta_client.py delete mode 100644 agentops/session.py delete mode 100644 agentops/singleton.py delete mode 100644 agentops/time_travel.py diff --git a/agentops/cli.py b/agentops/cli.py deleted file mode 100644 index 29a81123e..000000000 --- a/agentops/cli.py +++ /dev/null @@ -1,35 +0,0 @@ -import argparse -from .time_travel import fetch_time_travel_id, set_time_travel_active_state - - -def main(): - parser = argparse.ArgumentParser(description="AgentOps CLI") - subparsers = parser.add_subparsers(dest="command") - - timetravel_parser = subparsers.add_parser("timetravel", help="Time Travel Debugging commands", aliases=["tt"]) - timetravel_parser.add_argument( - "branch_name", - type=str, - nargs="?", - help="Given a branch name, fetches the cache file for Time Travel Debugging. Turns on feature by default", - ) - timetravel_parser.add_argument( - "--on", - action="store_true", - help="Turns on Time Travel Debugging", - ) - timetravel_parser.add_argument( - "--off", - action="store_true", - help="Turns off Time Travel Debugging", - ) - - args = parser.parse_args() - - if args.command in ["timetravel", "tt"]: - if args.branch_name: - fetch_time_travel_id(args.branch_name) - if args.on: - set_time_travel_active_state(True) - if args.off: - set_time_travel_active_state(False) diff --git a/agentops/client.py b/agentops/client.py deleted file mode 100644 index fb3e17937..000000000 --- a/agentops/client.py +++ /dev/null @@ -1,439 +0,0 @@ -""" -AgentOps client module that provides a client class with public interfaces and configuration. - -Classes: - Client: Provides methods to interact with the AgentOps service. -""" - -import atexit -import inspect -import logging -import os -import signal -import sys -import threading -import traceback -from decimal import Decimal -from functools import cached_property -from typing import List, Optional, Tuple, Union -from uuid import UUID, uuid4 - -from termcolor import colored - -from .config import Configuration -from .event import ErrorEvent, Event -from .host_env import get_host_env -from .llms.tracker import LlmTracker -from .log_config import logger -from .meta_client import MetaClient -from .session import Session, active_sessions -from .singleton import conditional_singleton - - -@conditional_singleton -class Client(metaclass=MetaClient): - def __init__(self): - self._pre_init_messages: List[str] = [] - self._initialized: bool = False - self._llm_tracker: Optional[LlmTracker] = None - self._sessions: List[Session] = active_sessions - self._config = Configuration() - self._pre_init_queue = {"agents": []} - self._host_env = None # Cache host env data - - self.configure( - api_key=os.environ.get("AGENTOPS_API_KEY"), - parent_key=os.environ.get("AGENTOPS_PARENT_KEY"), - endpoint=os.environ.get("AGENTOPS_API_ENDPOINT"), - env_data_opt_out=os.environ.get("AGENTOPS_ENV_DATA_OPT_OUT", "False").lower() == "true", - ) - - def configure( - self, - api_key: Optional[str] = None, - parent_key: Optional[str] = None, - endpoint: Optional[str] = None, - max_wait_time: Optional[int] = None, - max_queue_size: Optional[int] = None, - default_tags: Optional[List[str]] = None, - instrument_llm_calls: Optional[bool] = None, - auto_start_session: Optional[bool] = None, - skip_auto_end_session: Optional[bool] = None, - env_data_opt_out: Optional[bool] = None, - ): - if self.has_sessions: - return logger.warning( - f"{len(self._sessions)} session(s) in progress. Configuration is locked until there are no more sessions running" - ) - - self._config.configure( - self, - api_key=api_key, - parent_key=parent_key, - endpoint=endpoint, - max_wait_time=max_wait_time, - max_queue_size=max_queue_size, - default_tags=default_tags, - instrument_llm_calls=instrument_llm_calls, - auto_start_session=auto_start_session, - skip_auto_end_session=skip_auto_end_session, - env_data_opt_out=env_data_opt_out, - ) - - def initialize(self) -> Union[Session, None]: - if self.is_initialized: - return - - self.unsuppress_logs() - if self._config.api_key is None: - return logger.error( - "Could not initialize AgentOps client - API Key is missing." - + "\n\t Find your API key at https://app.agentops.ai/settings/projects" - ) - - self._handle_unclean_exits() - self._initialized = True - - if self._config.instrument_llm_calls: - self._llm_tracker = LlmTracker(self) - self._llm_tracker.override_api() - - session = None - if self._config.auto_start_session: - session = self.start_session() - - if session: - for agent_args in self._pre_init_queue["agents"]: - session.create_agent(name=agent_args["name"], agent_id=agent_args["agent_id"]) - self._pre_init_queue["agents"] = [] - - return session - - def _initialize_autogen_logger(self) -> None: - try: - import autogen - - from .partners.autogen_logger import AutogenLogger - - autogen.runtime_logging.start(logger=AutogenLogger()) - except ImportError: - pass - except Exception as e: - logger.warning(f"Failed to set up AutoGen logger with AgentOps. Error: {e}") - - def add_tags(self, tags: List[str]) -> None: - """ - Append to session tags at runtime. - - Args: - tags (List[str]): The list of tags to append. - """ - if not self.is_initialized: - return - - # if a string and not a list of strings - if not (isinstance(tags, list) and all(isinstance(item, str) for item in tags)): - if isinstance(tags, str): # if it's a single string - tags = [tags] # make it a list - - session = self._safe_get_session() - if session is None: - return logger.warning("Could not add tags. Start a session by calling agentops.start_session().") - - session.add_tags(tags=tags) - - self._update_session(session) - - def set_tags(self, tags: List[str]) -> None: - """ - Replace session tags at runtime. - - Args: - tags (List[str]): The list of tags to set. - """ - if not self.is_initialized: - return - - session = self._safe_get_session() - - if session is None: - return logger.warning("Could not set tags. Start a session by calling agentops.start_session().") - else: - session.set_tags(tags=tags) - - def add_default_tags(self, tags: List[str]) -> None: - """ - Append default tags at runtime. - - Args: - tags (List[str]): The list of tags to set. - """ - self._config.default_tags.update(tags) - - def get_default_tags(self) -> List[str]: - """ - Append default tags at runtime. - - Args: - tags (List[str]): The list of tags to set. - """ - return list(self._config.default_tags) - - def record(self, event: Union[Event, ErrorEvent]) -> None: - """ - Record an event with the AgentOps service. - - Args: - event (Event): The event to record. - """ - if not self.is_initialized: - return - - session = self._safe_get_session() - if session is None: - return logger.error("Could not record event. Start a session by calling agentops.start_session().") - session.record(event) - - def start_session( - self, - tags: Optional[List[str]] = None, - inherited_session_id: Optional[str] = None, - ) -> Union[Session, None]: - """ - Start a new session for recording events. - - Args: - tags (List[str], optional): Tags that can be used for grouping or sorting later. - e.g. ["test_run"]. - config: (Configuration, optional): Client configuration object - inherited_session_id (optional, str): assign session id to match existing Session - """ - if not self.is_initialized: - return - - if inherited_session_id is not None: - try: - session_id = UUID(inherited_session_id) - except ValueError: - return logger.warning(f"Invalid session id: {inherited_session_id}") - else: - session_id = uuid4() - - session_tags = self._config.default_tags.copy() - if tags is not None: - session_tags.update(tags) - - session = Session( - session_id=session_id, - tags=list(session_tags), - host_env=self.host_env, - config=self._config, - ) - - if not session.is_running: - return logger.error("Failed to start session") - - if self._pre_init_queue["agents"] and len(self._pre_init_queue["agents"]) > 0: - for agent_args in self._pre_init_queue["agents"]: - session.create_agent(name=agent_args["name"], agent_id=agent_args["agent_id"]) - self._pre_init_queue["agents"] = [] - - self._sessions.append(session) - return session - - def end_session( - self, - end_state: str, - end_state_reason: Optional[str] = None, - video: Optional[str] = None, - is_auto_end: Optional[bool] = None, - ) -> Optional[Decimal]: - """ - End the current session with the AgentOps service. - - Args: - end_state (str): The final state of the session. Options: Success, Fail, or Indeterminate (default). - end_state_reason (str, optional): The reason for ending the session. - video (str, optional): The video screen recording of the session - is_auto_end (bool, optional): is this an automatic use of end_session and should be skipped with skip_auto_end_session - - Returns: - Decimal: The token cost of the session. Returns 0 if the cost is unknown. - """ - session = self._safe_get_session() - if session is None: - return - if is_auto_end and self._config.skip_auto_end_session: - return - - token_cost = session.end_session(end_state=end_state, end_state_reason=end_state_reason, video=video) - - return token_cost - - def create_agent( - self, - name: str, - agent_id: Optional[str] = None, - session: Optional[Session] = None, - ): - if agent_id is None: - agent_id = str(uuid4()) - - # if a session is passed in, use multi-session logic - if session: - return session.create_agent(name=name, agent_id=agent_id) - else: - # if no session passed, assume single session - session = self._safe_get_session() - if session is None: - self._pre_init_queue["agents"].append({"name": name, "agent_id": agent_id}) - else: - session.create_agent(name=name, agent_id=agent_id) - - return agent_id - - def _handle_unclean_exits(self): - def cleanup(end_state: str = "Fail", end_state_reason: Optional[str] = None): - for session in self._sessions: - if session.end_state is None: - session.end_session( - end_state=end_state, - end_state_reason=end_state_reason, - ) - - def signal_handler(signum, frame): - """ - Signal handler for SIGINT (Ctrl+C) and SIGTERM. Ends the session and exits the program. - - Args: - signum (int): The signal number. - frame: The current stack frame. - """ - signal_name = "SIGINT" if signum == signal.SIGINT else "SIGTERM" - logger.info("%s detected. Ending session...", signal_name) - self.end_session(end_state="Fail", end_state_reason=f"Signal {signal_name} detected") - sys.exit(0) - - def handle_exception(exc_type, exc_value, exc_traceback): - """ - Handle uncaught exceptions before they result in program termination. - - Args: - exc_type (Type[BaseException]): The type of the exception. - exc_value (BaseException): The exception instance. - exc_traceback (TracebackType): A traceback object encapsulating the call stack at the - point where the exception originally occurred. - """ - formatted_traceback = "".join(traceback.format_exception(exc_type, exc_value, exc_traceback)) - - for session in self._sessions: - session.end_session( - end_state="Fail", - end_state_reason=f"{str(exc_value)}: {formatted_traceback}", - ) - - # Then call the default excepthook to exit the program - sys.__excepthook__(exc_type, exc_value, exc_traceback) - - # if main thread - if threading.current_thread() is threading.main_thread(): - atexit.register( - lambda: cleanup( - end_state="Indeterminate", - end_state_reason="N/A (process exited without calling agentops.end_session(...))", - ) - ) - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - sys.excepthook = handle_exception - - def stop_instrumenting(self): - if self._llm_tracker is not None: - self._llm_tracker.stop_instrumenting() - - def add_pre_init_warning(self, message: str): - self._pre_init_messages.append(message) - - # replaces the session currently stored with a specific session_id, with a new session - def _update_session(self, session: Session): - self._sessions[ - self._sessions.index([sess for sess in self._sessions if sess.session_id == session.session_id][0]) - ] = session - - def _safe_get_session(self) -> Optional[Session]: - if not self.is_initialized: - return None - if len(self._sessions) == 1: - return self._sessions[0] - - if len(self._sessions) > 1: - calling_function = inspect.stack()[2].function # Using index 2 because we have a wrapper at index 1 - return logger.warning( - f"Multiple sessions detected. You must use session.{calling_function}(). More info: https://docs.agentops.ai/v1/concepts/core-concepts#session-management" - ) - - return None - - def get_session(self, session_id: str): - """ - Get an active (not ended) session from the AgentOps service - - Args: - session_id (str): the session id for the session to be retreived - """ - for session in self._sessions: - if session.session_id == session_id: - return session - - def unsuppress_logs(self): - logging_level = os.getenv("AGENTOPS_LOGGING_LEVEL", "INFO") - log_levels = { - "CRITICAL": logging.CRITICAL, - "ERROR": logging.ERROR, - "INFO": logging.INFO, - "WARNING": logging.WARNING, - "DEBUG": logging.DEBUG, - } - logger.setLevel(log_levels.get(logging_level, "INFO")) - - for message in self._pre_init_messages: - logger.warning(message) - - def end_all_sessions(self): - for s in self._sessions: - s.end_session() - - self._sessions.clear() - - @property - def is_initialized(self) -> bool: - return self._initialized - - @property - def has_sessions(self) -> bool: - return len(self._sessions) > 0 - - @property - def is_multi_session(self) -> bool: - return len(self._sessions) > 1 - - @property - def session_count(self) -> int: - return len(self._sessions) - - @property - def current_session_ids(self) -> List[str]: - return [str(s.session_id) for s in self._sessions] - - @property - def api_key(self): - return self._config.api_key - - @property - def parent_key(self): - return self._config.parent_key - - @cached_property - def host_env(self): - """Cache and reuse host environment data""" - return get_host_env(self._config.env_data_opt_out) diff --git a/agentops/decorators.py b/agentops/decorators.py deleted file mode 100644 index 62e18a62f..000000000 --- a/agentops/decorators.py +++ /dev/null @@ -1,347 +0,0 @@ -import functools -import inspect -from typing import Optional, Union -from uuid import uuid4 - -from .client import Client -from .descriptor import agentops_property -from .event import ActionEvent, ErrorEvent, ToolEvent -from .helpers import check_call_stack_for_agent_id, get_ISO_time -from .log_config import logger -from .session import Session - - -def record_function(event_name: str): - logger.warning( - "DEPRECATION WARNING: record_function has been replaced with record_action and will be removed in the next minor version. Also see: record_tool" - ) - return record_action(event_name) - - -def record_action(event_name: Optional[str] = None): - """ - Decorator to record an event before and after a function call. - Usage: - - Actions: Records function parameters and return statements of the - function being decorated. Additionally, timing information about - the action is recorded - Args: - event_name (optional, str): The name of the event to record. - """ - - def decorator(func): - if inspect.iscoroutinefunction(func): - - @functools.wraps(func) - async def async_wrapper(*args, session: Optional[Session] = None, **kwargs): - init_time = get_ISO_time() - if "session" in kwargs.keys(): - del kwargs["session"] - if session is None: - if Client().is_multi_session: - raise ValueError( - "If multiple sessions exists, `session` is a required parameter in the function decorated by @record_action" - ) - func_args = inspect.signature(func).parameters - arg_names = list(func_args.keys()) - # Get default values - arg_values = { - name: func_args[name].default for name in arg_names if func_args[name].default is not inspect._empty - } - # Update with positional arguments - arg_values.update(dict(zip(arg_names, args))) - arg_values.update(kwargs) - - if not event_name: - action_type = func.__name__ - else: - action_type = event_name - - event = ActionEvent( - params=arg_values, - init_timestamp=init_time, - agent_id=check_call_stack_for_agent_id(), - action_type=action_type, - ) - - try: - returns = await func(*args, **kwargs) - - event.returns = list(returns) if isinstance(returns, tuple) else returns - - # NOTE: Will likely remove in future since this is tightly coupled. Adding it to see how useful we find it for now - # TODO: check if screenshot is the url string we expect it to be? And not e.g. "True" - if hasattr(returns, "screenshot"): - event.screenshot = returns.screenshot # type: ignore - - event.end_timestamp = get_ISO_time() - - if session: - session.record(event) - else: - Client().record(event) - - except Exception as e: - Client().record(ErrorEvent(trigger_event=event, exception=e)) - - # Re-raise the exception - raise - - return returns - - return async_wrapper - else: - - @functools.wraps(func) - def sync_wrapper(*args, session: Optional[Session] = None, **kwargs): - init_time = get_ISO_time() - if "session" in kwargs.keys(): - del kwargs["session"] - if session is None: - if Client().is_multi_session: - raise ValueError( - "If multiple sessions exists, `session` is a required parameter in the function decorated by @record_action" - ) - func_args = inspect.signature(func).parameters - arg_names = list(func_args.keys()) - # Get default values - arg_values = { - name: func_args[name].default for name in arg_names if func_args[name].default is not inspect._empty - } - # Update with positional arguments - arg_values.update(dict(zip(arg_names, args))) - arg_values.update(kwargs) - - if not event_name: - action_type = func.__name__ - else: - action_type = event_name - - event = ActionEvent( - params=arg_values, - init_timestamp=init_time, - agent_id=check_call_stack_for_agent_id(), - action_type=action_type, - ) - - try: - returns = func(*args, **kwargs) - - event.returns = list(returns) if isinstance(returns, tuple) else returns - - if hasattr(returns, "screenshot"): - event.screenshot = returns.screenshot # type: ignore - - event.end_timestamp = get_ISO_time() - - if session: - session.record(event) - else: - Client().record(event) - - except Exception as e: - Client().record(ErrorEvent(trigger_event=event, exception=e)) - - # Re-raise the exception - raise - - return returns - - return sync_wrapper - - return decorator - - -def record_tool(tool_name: Optional[str] = None): - """ - Decorator to record a tool use event before and after a function call. - Usage: - - Tools: Records function parameters and return statements of the - function being decorated. Additionally, timing information about - the action is recorded - Args: - tool_name (optional, str): The name of the event to record. - """ - - def decorator(func): - if inspect.iscoroutinefunction(func): - - @functools.wraps(func) - async def async_wrapper(*args, session: Optional[Session] = None, **kwargs): - init_time = get_ISO_time() - if "session" in kwargs.keys(): - del kwargs["session"] - if session is None: - if Client().is_multi_session: - raise ValueError( - "If multiple sessions exists, `session` is a required parameter in the function decorated by @record_tool" - ) - func_args = inspect.signature(func).parameters - arg_names = list(func_args.keys()) - # Get default values - arg_values = { - name: func_args[name].default for name in arg_names if func_args[name].default is not inspect._empty - } - # Update with positional arguments - arg_values.update(dict(zip(arg_names, args))) - arg_values.update(kwargs) - - if not tool_name: - name = func.__name__ - else: - name = tool_name - - event = ToolEvent( - params=arg_values, - init_timestamp=init_time, - agent_id=check_call_stack_for_agent_id(), - name=name, - ) - - try: - returns = await func(*args, **kwargs) - - event.returns = list(returns) if isinstance(returns, tuple) else returns - - # NOTE: Will likely remove in future since this is tightly coupled. Adding it to see how useful we find it for now - # TODO: check if screenshot is the url string we expect it to be? And not e.g. "True" - if hasattr(returns, "screenshot"): - event.screenshot = returns.screenshot # type: ignore - - event.end_timestamp = get_ISO_time() - - if session: - session.record(event) - else: - Client().record(event) - - except Exception as e: - Client().record(ErrorEvent(trigger_event=event, exception=e)) - - # Re-raise the exception - raise - - return returns - - return async_wrapper - else: - - @functools.wraps(func) - def sync_wrapper(*args, session: Optional[Session] = None, **kwargs): - init_time = get_ISO_time() - if "session" in kwargs.keys(): - del kwargs["session"] - if session is None: - if Client().is_multi_session: - raise ValueError( - "If multiple sessions exists, `session` is a required parameter in the function decorated by @record_tool" - ) - func_args = inspect.signature(func).parameters - arg_names = list(func_args.keys()) - # Get default values - arg_values = { - name: func_args[name].default for name in arg_names if func_args[name].default is not inspect._empty - } - # Update with positional arguments - arg_values.update(dict(zip(arg_names, args))) - arg_values.update(kwargs) - - if not tool_name: - name = func.__name__ - else: - name = tool_name - - event = ToolEvent( - params=arg_values, - init_timestamp=init_time, - agent_id=check_call_stack_for_agent_id(), - name=name, - ) - - try: - returns = func(*args, **kwargs) - - event.returns = list(returns) if isinstance(returns, tuple) else returns - - if hasattr(returns, "screenshot"): - event.screenshot = returns.screenshot # type: ignore - - event.end_timestamp = get_ISO_time() - - if session: - session.record(event) - else: - Client().record(event) - - except Exception as e: - Client().record(ErrorEvent(trigger_event=event, exception=e)) - - # Re-raise the exception - raise - - return returns - - return sync_wrapper - - return decorator - - -def track_agent(name: Union[str, None] = None): - def decorator(obj): - if inspect.isclass(obj): - # Set up the descriptors on the class - setattr(obj, "agentops_agent_id", agentops_property()) - setattr(obj, "agentops_agent_name", agentops_property()) - - original_init = obj.__init__ - - def new_init(self, *args, **kwargs): - """ - WIthin the __init__ method, we set agentops_ properties via the private, internal descriptor - """ - try: - # Handle name from kwargs first - name_ = kwargs.pop("agentops_name", None) - - # Call original init - original_init(self, *args, **kwargs) - - # Set the agent ID - self._agentops_agent_id = str(uuid4()) - - # Force set the private name directly to bypass potential Pydantic interference - if name_ is not None: - setattr(self, "_agentops_agent_name", name_) - elif name is not None: - setattr(self, "_agentops_agent_name", name) - elif hasattr(self, "role"): - setattr(self, "_agentops_agent_name", self.role) - - session = kwargs.get("session", None) - if session is not None: - self._agentops_session_id = session.session_id - - Client().create_agent( - name=self.agentops_agent_name, - agent_id=self.agentops_agent_id, - session=session, - ) - - except AttributeError as ex: - logger.debug(ex) - Client().add_pre_init_warning(f"Failed to track an agent {name} with the @track_agent decorator.") - logger.warning("Failed to track an agent with the @track_agent decorator.") - - obj.__init__ = new_init - - elif inspect.isfunction(obj): - obj.agentops_agent_id = str(uuid4()) - obj.agentops_agent_name = name - Client().create_agent(name=obj.agentops_agent_name, agent_id=obj.agentops_agent_id) - - else: - raise Exception("Invalid input, 'obj' must be a class or a function") - - return obj - - return decorator diff --git a/agentops/descriptor.py b/agentops/descriptor.py deleted file mode 100644 index 020804cbe..000000000 --- a/agentops/descriptor.py +++ /dev/null @@ -1,187 +0,0 @@ -import inspect -import logging -from typing import Union -from uuid import UUID - - -class agentops_property: - """ - A descriptor that provides a standardized way to handle agent property access and storage. - Properties are automatically stored with an '_agentops_' prefix to avoid naming conflicts. - - The descriptor can be used in two ways: - 1. As a class attribute directly - 2. Added dynamically through a decorator (like @track_agent) - - Attributes: - private_name (str): The internal name used for storing the property value, - prefixed with '_agentops_'. Set either through __init__ or __set_name__. - - Example: - ```python - # Direct usage in a class - class Agent: - name = agentops_property() - id = agentops_property() - - def __init__(self): - self.name = "Agent1" # Stored as '_agentops_name' - self.id = "123" # Stored as '_agentops_id' - - # Usage with decorator - @track_agent() - class Agent: - pass - # agentops_agent_id and agentops_agent_name are added automatically - ``` - - Notes: - - Property names with 'agentops_' prefix are automatically stripped when creating - the internal storage name - - Returns None if the property hasn't been set - - The descriptor will attempt to resolve property names even when added dynamically - """ - - def __init__(self, name=None): - """ - Initialize the descriptor. - - Args: - name (str, optional): The name for the property. Used as fallback when - the descriptor is added dynamically and __set_name__ isn't called. - """ - self.private_name = None - if name: - self.private_name = f"_agentops_{name.replace('agentops_', '')}" - - def __set_name__(self, owner, name): - """ - Called by Python when the descriptor is defined directly in a class. - Sets up the private name used for attribute storage. - - Args: - owner: The class that owns this descriptor - name: The name given to this descriptor in the class - """ - self.private_name = f"_agentops_{name.replace('agentops_', '')}" - - def __get__(self, obj, objtype=None): - """ - Get the property value. - - Args: - obj: The instance to get the property from - objtype: The class of the instance - - Returns: - The property value, or None if not set - The descriptor itself if accessed on the class rather than an instance - - Raises: - AttributeError: If the property name cannot be determined - """ - if obj is None: - return self - - # Handle case where private_name wasn't set by __set_name__ - if self.private_name is None: - # Try to find the name by looking through the class dict - for name, value in type(obj).__dict__.items(): - if value is self: - self.private_name = f"_agentops_{name.replace('agentops_', '')}" - break - if self.private_name is None: - raise AttributeError("Property name could not be determined") - - # First try getting from object's __dict__ (for Pydantic) - if hasattr(obj, "__dict__"): - dict_value = obj.__dict__.get(self.private_name[1:]) - if dict_value is not None: - return dict_value - - # Fall back to our private storage - return getattr(obj, self.private_name, None) - - def __set__(self, obj, value): - """ - Set the property value. - - Args: - obj: The instance to set the property on - value: The value to set - - Raises: - AttributeError: If the property name cannot be determined - """ - if self.private_name is None: - # Same name resolution as in __get__ - for name, val in type(obj).__dict__.items(): - if val is self: - self.private_name = f"_agentops_{name.replace('agentops_', '')}" - break - if self.private_name is None: - raise AttributeError("Property name could not be determined") - - # Set in both object's __dict__ (for Pydantic) and our private storage - if hasattr(obj, "__dict__"): - obj.__dict__[self.private_name[1:]] = value - setattr(obj, self.private_name, value) - - def __delete__(self, obj): - """ - Delete the property value. - - Args: - obj: The instance to delete the property from - - Raises: - AttributeError: If the property name cannot be determined - """ - if self.private_name is None: - raise AttributeError("Property name could not be determined") - try: - delattr(obj, self.private_name) - except AttributeError: - pass - - @staticmethod - def stack_lookup() -> Union[UUID, None]: - """ - Look through the call stack to find an agent ID. - - This method searches the call stack for objects that have agentops_property - descriptors and returns the agent_id if found. - - Returns: - UUID: The agent ID if found in the call stack - None: If no agent ID is found or if "__main__" is encountered - """ - for frame_info in inspect.stack(): - local_vars = frame_info.frame.f_locals - - for var_name, var in local_vars.items(): - # Stop at main - if var == "__main__": - return None - - try: - # Check if object has our AgentOpsDescriptor descriptors - var_type = type(var) - - # Get all class attributes - class_attrs = {name: getattr(var_type, name, None) for name in dir(var_type)} - - agent_id_desc = class_attrs.get("agentops_agent_id") - - if isinstance(agent_id_desc, agentops_property): - agent_id = agent_id_desc.__get__(var, var_type) - - if agent_id: - agent_name_desc = class_attrs.get("agentops_agent_name") - if isinstance(agent_name_desc, agentops_property): - agent_name = agent_name_desc.__get__(var, var_type) - return agent_id - except Exception: - continue - - return None diff --git a/agentops/helpers.py b/agentops/helpers.py deleted file mode 100644 index ca0c4f0e3..000000000 --- a/agentops/helpers.py +++ /dev/null @@ -1,176 +0,0 @@ -import inspect -import json -from datetime import datetime, timezone -from functools import wraps -from importlib.metadata import PackageNotFoundError, version -from pprint import pformat -from typing import Any, Optional, Union -from uuid import UUID -from .descriptor import agentops_property - -import requests - -from .log_config import logger - - -def get_ISO_time(): - """ - Get the current UTC time in ISO 8601 format with milliseconds precision in UTC timezone. - - Returns: - str: The current UTC time as a string in ISO 8601 format. - """ - return datetime.now(timezone.utc).isoformat() - - -def is_jsonable(x): - try: - json.dumps(x) - return True - except (TypeError, OverflowError): - return False - - -def filter_unjsonable(d: dict) -> dict: - def filter_dict(obj): - if isinstance(obj, dict): - # TODO: clean up this mess lol - return { - k: ( - filter_dict(v) - if isinstance(v, (dict, list)) or is_jsonable(v) - else str(v) - if isinstance(v, UUID) - else "" - ) - for k, v in obj.items() - } - elif isinstance(obj, list): - return [ - ( - filter_dict(x) - if isinstance(x, (dict, list)) or is_jsonable(x) - else str(x) - if isinstance(x, UUID) - else "" - ) - for x in obj - ] - else: - return obj if is_jsonable(obj) or isinstance(obj, UUID) else "" - - return filter_dict(d) - - -def safe_serialize(obj): - def default(o): - try: - if isinstance(o, UUID): - return str(o) - elif hasattr(o, "model_dump_json"): - return str(o.model_dump_json()) - elif hasattr(o, "to_json"): - return str(o.to_json()) - elif hasattr(o, "json"): - return str(o.json()) - elif hasattr(o, "to_dict"): - return {k: str(v) for k, v in o.to_dict().items() if not callable(v)} - elif hasattr(o, "dict"): - return {k: str(v) for k, v in o.dict().items() if not callable(v)} - elif isinstance(o, dict): - return {k: str(v) for k, v in o.items()} - elif isinstance(o, list): - return [str(item) for item in o] - else: - return f"<>" - except Exception as e: - return f"<>" - - def remove_unwanted_items(value): - """Recursively remove self key and None/... values from dictionaries so they aren't serialized""" - if isinstance(value, dict): - return { - k: remove_unwanted_items(v) for k, v in value.items() if v is not None and v is not ... and k != "self" - } - elif isinstance(value, list): - return [remove_unwanted_items(item) for item in value] - else: - return value - - cleaned_obj = remove_unwanted_items(obj) - return json.dumps(cleaned_obj, default=default) - - -def check_call_stack_for_agent_id() -> Union[UUID, None]: - return agentops_property.stack_lookup() - - -def get_agentops_version(): - try: - pkg_version = version("agentops") - return pkg_version - except Exception as e: - logger.warning("Error reading package version: %s", e) - return None - - -def check_agentops_update(): - try: - response = requests.get("https://pypi.org/pypi/agentops/json") - - if response.status_code == 200: - json_data = response.json() - latest_version = json_data["info"]["version"] - - try: - current_version = version("agentops") - except PackageNotFoundError: - return None - - if not latest_version == current_version: - logger.warning( - " WARNING: agentops is out of date. Please update with the command: 'pip install --upgrade agentops'" - ) - except Exception as e: - logger.debug(f"Failed to check for updates: {e}") - return None - - -# Function decorator that prints function name and its arguments to the console for debug purposes -# Example output: -# -# on_llm_start called with arguments: -# run_id: UUID('5fda42fe-809b-4179-bad2-321d1a6090c7') -# parent_run_id: UUID('63f1c4da-3e9f-4033-94d0-b3ebed06668f') -# tags: [] -# metadata: {} -# invocation_params: {'_type': 'openai-chat', -# 'model': 'gpt-3.5-turbo', -# 'model_name': 'gpt-3.5-turbo', -# 'n': 1, -# 'stop': ['Observation:'], -# 'stream': False, -# 'temperature': 0.7} -# options: {'stop': ['Observation:']} -# name: None -# batch_size: 1 -# - -# regex to filter for just this: -# ([\s\S]*?)<\/AGENTOPS_DEBUG_OUTPUT>\n - - -def debug_print_function_params(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - logger.debug("\n") - logger.debug(f"{func.__name__} called with arguments:") - - for key, value in kwargs.items(): - logger.debug(f"{key}: {pformat(value)}") - - logger.debug("\n") - - return func(self, *args, **kwargs) - - return wrapper diff --git a/agentops/host_env.py b/agentops/host_env.py deleted file mode 100644 index d3f798b72..000000000 --- a/agentops/host_env.py +++ /dev/null @@ -1,150 +0,0 @@ -import platform -import psutil -import socket -from .helpers import get_agentops_version -from .log_config import logger -import importlib.metadata -import os -import sys - - -def get_sdk_details(): - try: - return { - "AgentOps SDK Version": get_agentops_version(), - "Python Version": platform.python_version(), - "System Packages": get_sys_packages(), - } - except: - return {} - - -def get_python_details(): - try: - return {"Python Version": platform.python_version()} - except: - return {} - - -def get_agentops_details(): - try: - return {"AgentOps SDK Version": get_agentops_version()} - except: - return {} - - -def get_sys_packages(): - sys_packages = {} - for module in sys.modules: - try: - version = importlib.metadata.version(module) - sys_packages[module] = version - except importlib.metadata.PackageNotFoundError: - # Skip built-in modules and those without package metadata - continue - - return sys_packages - - -def get_installed_packages(): - try: - return { - # TODO: add to opt out - "Installed Packages": { - dist.metadata.get("Name"): dist.metadata.get("Version") for dist in importlib.metadata.distributions() - } - } - except: - return {} - - -def get_current_directory(): - try: - return {"Project Working Directory": os.getcwd()} - except: - return {} - - -def get_virtual_env(): - try: - return {"Virtual Environment": os.environ.get("VIRTUAL_ENV", None)} - except: - return {} - - -def get_os_details(): - try: - return { - "Hostname": socket.gethostname(), - "OS": platform.system(), - "OS Version": platform.version(), - "OS Release": platform.release(), - } - except: - return {} - - -def get_cpu_details(): - try: - return { - "Physical cores": psutil.cpu_count(logical=False), - "Total cores": psutil.cpu_count(logical=True), - # "Max Frequency": f"{psutil.cpu_freq().max:.2f}Mhz", # Fails right now - "CPU Usage": f"{psutil.cpu_percent()}%", - } - except: - return {} - - -def get_ram_details(): - try: - ram_info = psutil.virtual_memory() - return { - "Total": f"{ram_info.total / (1024**3):.2f} GB", - "Available": f"{ram_info.available / (1024**3):.2f} GB", - "Used": f"{ram_info.used / (1024**3):.2f} GB", - "Percentage": f"{ram_info.percent}%", - } - except: - return {} - - -def get_disk_details(): - partitions = psutil.disk_partitions() - disk_info = {} - for partition in partitions: - try: - usage = psutil.disk_usage(partition.mountpoint) - disk_info[partition.device] = { - "Mountpoint": partition.mountpoint, - "Total": f"{usage.total / (1024**3):.2f} GB", - "Used": f"{usage.used / (1024**3):.2f} GB", - "Free": f"{usage.free / (1024**3):.2f} GB", - "Percentage": f"{usage.percent}%", - } - except OSError as inaccessible: - # Skip inaccessible partitions, such as removable drives with no media - logger.debug("Mountpoint %s inaccessible: %s", partition.mountpoint, inaccessible) - - return disk_info - - -def get_host_env(opt_out: bool = False): - if opt_out: - return { - "SDK": get_sdk_details(), - "OS": get_os_details(), - "Project Working Directory": get_current_directory(), - "Virtual Environment": get_virtual_env(), - } - else: - return { - "SDK": get_sdk_details(), - "OS": get_os_details(), - "CPU": get_cpu_details(), - "RAM": get_ram_details(), - "Disk": get_disk_details(), - "Installed Packages": get_installed_packages(), - "Project Working Directory": get_current_directory(), - "Virtual Environment": get_virtual_env(), - } diff --git a/agentops/http_client.py b/agentops/http_client.py deleted file mode 100644 index 11c0bf49f..000000000 --- a/agentops/http_client.py +++ /dev/null @@ -1,208 +0,0 @@ -from enum import Enum -from typing import Optional, Dict, Any - -import requests -from requests.adapters import HTTPAdapter, Retry -import json - -from .exceptions import ApiServerException - -JSON_HEADER = {"Content-Type": "application/json; charset=UTF-8", "Accept": "*/*"} - -retry_config = Retry(total=5, backoff_factor=0.1) - - -class HttpStatus(Enum): - SUCCESS = 200 - INVALID_REQUEST = 400 - INVALID_API_KEY = 401 - TIMEOUT = 408 - PAYLOAD_TOO_LARGE = 413 - TOO_MANY_REQUESTS = 429 - FAILED = 500 - UNKNOWN = -1 - - -class Response: - def __init__(self, status: HttpStatus = HttpStatus.UNKNOWN, body: Optional[dict] = None): - self.status: HttpStatus = status - self.code: int = status.value - self.body = body if body else {} - - def parse(self, res: requests.models.Response): - res_body = res.json() - self.code = res.status_code - self.status = self.get_status(self.code) - self.body = res_body - return self - - @staticmethod - def get_status(code: int) -> HttpStatus: - if 200 <= code < 300: - return HttpStatus.SUCCESS - elif code == 429: - return HttpStatus.TOO_MANY_REQUESTS - elif code == 413: - return HttpStatus.PAYLOAD_TOO_LARGE - elif code == 408: - return HttpStatus.TIMEOUT - elif code == 401: - return HttpStatus.INVALID_API_KEY - elif 400 <= code < 500: - return HttpStatus.INVALID_REQUEST - elif code >= 500: - return HttpStatus.FAILED - return HttpStatus.UNKNOWN - - -class HttpClient: - _session: Optional[requests.Session] = None - - @classmethod - def get_session(cls) -> requests.Session: - """Get or create the global session with optimized connection pooling""" - if cls._session is None: - cls._session = requests.Session() - - # Configure connection pooling - adapter = requests.adapters.HTTPAdapter( - pool_connections=15, # Number of connection pools - pool_maxsize=256, # Connections per pool - max_retries=Retry(total=3, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504]), - ) - - # Mount adapter for both HTTP and HTTPS - cls._session.mount("http://", adapter) - cls._session.mount("https://", adapter) - - # Set default headers - cls._session.headers.update( - { - "Connection": "keep-alive", - "Keep-Alive": "timeout=10, max=1000", - "Content-Type": "application/json", - } - ) - - return cls._session - - @classmethod - def _prepare_headers( - cls, - api_key: Optional[str] = None, - parent_key: Optional[str] = None, - jwt: Optional[str] = None, - custom_headers: Optional[dict] = None, - ) -> dict: - """Prepare headers for the request""" - headers = JSON_HEADER.copy() - - if api_key is not None: - headers["X-Agentops-Api-Key"] = api_key - - if parent_key is not None: - headers["X-Agentops-Parent-Key"] = parent_key - - if jwt is not None: - headers["Authorization"] = f"Bearer {jwt}" - - if custom_headers is not None: - headers.update(custom_headers) - - return headers - - @classmethod - def post( - cls, - url: str, - payload: bytes, - api_key: Optional[str] = None, - parent_key: Optional[str] = None, - jwt: Optional[str] = None, - header: Optional[Dict[str, str]] = None, - ) -> Response: - """Make HTTP POST request using connection pooling""" - result = Response() - try: - headers = cls._prepare_headers(api_key, parent_key, jwt, header) - session = cls.get_session() - res = session.post(url, data=payload, headers=headers, timeout=20) - result.parse(res) - - except requests.exceptions.Timeout: - result.code = 408 - result.status = HttpStatus.TIMEOUT - raise ApiServerException("Could not reach API server - connection timed out") - except requests.exceptions.HTTPError as e: - try: - result.parse(e.response) - except Exception: - result = Response() - result.code = e.response.status_code - result.status = Response.get_status(e.response.status_code) - result.body = {"error": str(e)} - raise ApiServerException(f"HTTPError: {e}") - except requests.exceptions.RequestException as e: - result.body = {"error": str(e)} - raise ApiServerException(f"RequestException: {e}") - - if result.code == 401: - raise ApiServerException( - f"API server: invalid API key: {api_key}. Find your API key at https://app.agentops.ai/settings/projects" - ) - if result.code == 400: - if "message" in result.body: - raise ApiServerException(f"API server: {result.body['message']}") - else: - raise ApiServerException(f"API server: {result.body}") - if result.code == 500: - raise ApiServerException("API server: - internal server error") - - return result - - @classmethod - def get( - cls, - url: str, - api_key: Optional[str] = None, - jwt: Optional[str] = None, - header: Optional[Dict[str, str]] = None, - ) -> Response: - """Make HTTP GET request using connection pooling""" - result = Response() - try: - headers = cls._prepare_headers(api_key, None, jwt, header) - session = cls.get_session() - res = session.get(url, headers=headers, timeout=20) - result.parse(res) - - except requests.exceptions.Timeout: - result.code = 408 - result.status = HttpStatus.TIMEOUT - raise ApiServerException("Could not reach API server - connection timed out") - except requests.exceptions.HTTPError as e: - try: - result.parse(e.response) - except Exception: - result = Response() - result.code = e.response.status_code - result.status = Response.get_status(e.response.status_code) - result.body = {"error": str(e)} - raise ApiServerException(f"HTTPError: {e}") - except requests.exceptions.RequestException as e: - result.body = {"error": str(e)} - raise ApiServerException(f"RequestException: {e}") - - if result.code == 401: - raise ApiServerException( - f"API server: invalid API key: {api_key}. Find your API key at https://app.agentops.ai/settings/projects" - ) - if result.code == 400: - if "message" in result.body: - raise ApiServerException(f"API server: {result.body['message']}") - else: - raise ApiServerException(f"API server: {result.body}") - if result.code == 500: - raise ApiServerException("API server: - internal server error") - - return result diff --git a/agentops/event.py b/agentops/legacy/event.py similarity index 100% rename from agentops/event.py rename to agentops/legacy/event.py diff --git a/agentops/log_config.py b/agentops/log_config.py deleted file mode 100644 index 584cfb381..000000000 --- a/agentops/log_config.py +++ /dev/null @@ -1,53 +0,0 @@ -import logging -import os -import re - - -class AgentOpsLogFormatter(logging.Formatter): - blue = "\x1b[34m" - bold_red = "\x1b[31;1m" - reset = "\x1b[0m" - prefix = "🖇 AgentOps: " - - FORMATS = { - logging.DEBUG: f"(DEBUG) {prefix}%(message)s", - logging.INFO: f"{prefix}%(message)s", - logging.WARNING: f"{prefix}%(message)s", - logging.ERROR: f"{bold_red}{prefix}%(message)s{reset}", - logging.CRITICAL: f"{bold_red}{prefix}%(message)s{reset}", - } - - def format(self, record): - log_fmt = self.FORMATS.get(record.levelno, self.FORMATS[logging.INFO]) - formatter = logging.Formatter(log_fmt) - return formatter.format(record) - - -logger = logging.getLogger("agentops") -logger.propagate = False -logger.setLevel(logging.CRITICAL) - -# Streaming Handler -stream_handler = logging.StreamHandler() -stream_handler.setLevel(logging.DEBUG) -stream_handler.setFormatter(AgentOpsLogFormatter()) -logger.addHandler(stream_handler) - - -# File Handler -class AgentOpsLogFileFormatter(logging.Formatter): - def format(self, record): - # Remove ANSI escape codes from the message - record.msg = ANSI_ESCAPE_PATTERN.sub("", str(record.msg)) - return super().format(record) - - -ANSI_ESCAPE_PATTERN = re.compile(r"\x1b\[[0-9;]*m") -log_to_file = os.environ.get("AGENTOPS_LOGGING_TO_FILE", "False").lower() == "true" -if log_to_file: - file_handler = logging.FileHandler("agentops.log", mode="w") - file_handler.setLevel(logging.DEBUG) - formatter = AgentOpsLogFileFormatter("%(asctime)s - %(levelname)s - %(message)s") - file_handler.setFormatter(formatter) - file_handler.setFormatter(formatter) - logger.addHandler(file_handler) diff --git a/agentops/meta_client.py b/agentops/meta_client.py deleted file mode 100644 index 6cc7ed2ef..000000000 --- a/agentops/meta_client.py +++ /dev/null @@ -1,64 +0,0 @@ -from .log_config import logger -import traceback - -from .host_env import get_host_env -from .http_client import HttpClient -from .helpers import safe_serialize, get_agentops_version - -from os import environ - - -class MetaClient(type): - """Metaclass to automatically decorate methods with exception handling and provide a shared exception handler.""" - - def __new__(cls, name, bases, dct): - # Wrap each method with the handle_exceptions decorator - for method_name, method in dct.items(): - if (callable(method) and not method_name.startswith("__")) or method_name == "__init__": - dct[method_name] = handle_exceptions(method) - - return super().__new__(cls, name, bases, dct) - - def send_exception_to_server(cls, exception, api_key, session): - """Class method to send exception to server.""" - if api_key: - exception_type = type(exception).__name__ - exception_message = str(exception) - exception_traceback = traceback.format_exc() - developer_error = { - "sdk_version": get_agentops_version(), - "type": exception_type, - "message": exception_message, - "stack_trace": exception_traceback, - "host_env": get_host_env(), - } - - if session: - developer_error["session_id"] = session.session_id - try: - HttpClient.post( - "https://api.agentops.ai/v2/developer_errors", - safe_serialize(developer_error).encode("utf-8"), - api_key=api_key, - ) - except: - pass - - -def handle_exceptions(method): - """Decorator within the metaclass to wrap method execution in try-except block.""" - - def wrapper(self, *args, **kwargs): - try: - return method(self, *args, **kwargs) - except Exception as e: - logger.warning(f"Error: {e}") - config = getattr(self, "config", None) - if config is not None: - session = None - if len(self._sessions) > 0: - session = self._sessions[0] - type(self).send_exception_to_server(e, self.config._api_key, session) - raise e - - return wrapper diff --git a/agentops/session.py b/agentops/session.py deleted file mode 100644 index 95d1fba15..000000000 --- a/agentops/session.py +++ /dev/null @@ -1,672 +0,0 @@ -from __future__ import annotations - -import asyncio -import functools -import json -import threading -from datetime import datetime, timezone -from decimal import ROUND_HALF_UP, Decimal -from enum import Enum -from typing import Any, Dict, List, Optional, Sequence, Union -from uuid import UUID, uuid4 - -from opentelemetry import trace -from opentelemetry.context import attach, detach, set_value -from opentelemetry.sdk.resources import SERVICE_NAME, Resource -from opentelemetry.sdk.trace import ReadableSpan, TracerProvider -from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter, SpanExporter, SpanExportResult -from termcolor import colored - -from .config import Configuration -from .event import ErrorEvent, Event -from .exceptions import ApiServerException -from .helpers import filter_unjsonable, get_ISO_time, safe_serialize -from .http_client import HttpClient, Response -from .log_config import logger - -""" -OTEL Guidelines: - - - -- Maintain a single TracerProvider for the application runtime - - Have one global TracerProvider in the Client class - -- According to the OpenTelemetry Python documentation, Resource should be initialized once per application and shared across all telemetry (traces, metrics, logs). -- Each Session gets its own Tracer (with session-specific context) -- Allow multiple sessions to share the provider while maintaining their own context - - - -:: Resource - - '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - Captures information about the entity producing telemetry as Attributes. - For example, a process producing telemetry that is running in a container - on Kubernetes has a process name, a pod name, a namespace, and possibly - a deployment name. All these attributes can be included in the Resource. - '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - - The key insight from the documentation is: - - - Resource represents the entity producing telemetry - in our case, that's the AgentOps SDK application itself - - Session-specific information should be attributes on the spans themselves - - A Resource is meant to identify the service/process/application1 - - Sessions are units of work within that application - - The documentation example about "process name, pod name, namespace" refers to where the code is running, not the work it's doing - -""" - - -class EndState(Enum): - """ - Enum representing the possible end states of a session. - - Attributes: - SUCCESS: Indicates the session ended successfully. - FAIL: Indicates the session failed. - INDETERMINATE (default): Indicates the session ended with an indeterminate state. - This is the default state if not specified, e.g. if you forget to call end_session() - at the end of your program or don't pass it the end_state parameter - """ - - SUCCESS = "Success" - FAIL = "Fail" - INDETERMINATE = "Indeterminate" # Default - - -class SessionExporter(SpanExporter): - """ - Manages publishing events for Session - """ - - def __init__(self, session: Session, **kwargs): - self.session = session - self._shutdown = threading.Event() - self._export_lock = threading.Lock() - super().__init__(**kwargs) - - @property - def endpoint(self): - return f"{self.session.config.endpoint}/v2/create_events" - - def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: - if self._shutdown.is_set(): - return SpanExportResult.SUCCESS - - with self._export_lock: - try: - # Skip if no spans to export - if not spans: - return SpanExportResult.SUCCESS - - events = [] - for span in spans: - event_data = json.loads(span.attributes.get("event.data", "{}")) - - # Format event data based on event type - if span.name == "actions": - formatted_data = { - "action_type": event_data.get("action_type", event_data.get("name", "unknown_action")), - "params": event_data.get("params", {}), - "returns": event_data.get("returns"), - } - elif span.name == "tools": - formatted_data = { - "name": event_data.get("name", event_data.get("tool_name", "unknown_tool")), - "params": event_data.get("params", {}), - "returns": event_data.get("returns"), - } - else: - formatted_data = event_data - - formatted_data = {**event_data, **formatted_data} - # Get timestamps, providing defaults if missing - current_time = datetime.now(timezone.utc).isoformat() - init_timestamp = span.attributes.get("event.timestamp") - end_timestamp = span.attributes.get("event.end_timestamp") - - # Handle missing timestamps - if init_timestamp is None: - init_timestamp = current_time - if end_timestamp is None: - end_timestamp = current_time - - # Get event ID, generate new one if missing - event_id = span.attributes.get("event.id") - if event_id is None: - event_id = str(uuid4()) - - events.append( - { - "id": event_id, - "event_type": span.name, - "init_timestamp": init_timestamp, - "end_timestamp": end_timestamp, - **formatted_data, - "session_id": str(self.session.session_id), - } - ) - - # Only make HTTP request if we have events and not shutdown - if events: - try: - res = HttpClient.post( - self.endpoint, - json.dumps({"events": events}).encode("utf-8"), - api_key=self.session.config.api_key, - jwt=self.session.jwt, - ) - return SpanExportResult.SUCCESS if res.code == 200 else SpanExportResult.FAILURE - except Exception as e: - logger.error(f"Failed to send events: {e}") - return SpanExportResult.FAILURE - - return SpanExportResult.SUCCESS - - except Exception as e: - logger.error(f"Failed to export spans: {e}") - return SpanExportResult.FAILURE - - def force_flush(self, timeout_millis: Optional[int] = None) -> bool: - return True - - def shutdown(self) -> None: - """Handle shutdown gracefully""" - self._shutdown.set() - # Don't call session.end_session() here to avoid circular dependencies - - -class Session: - """ - Represents a session of events, with a start and end state. - - Args: - session_id (UUID): The session id is used to record particular runs. - config (Configuration): The configuration object for the session. - tags (List[str], optional): Tags that can be used for grouping or sorting later. Examples could be ["GPT-4"]. - host_env (dict, optional): A dictionary containing host and environment data. - - Attributes: - init_timestamp (str): The ISO timestamp for when the session started. - end_timestamp (str, optional): The ISO timestamp for when the session ended. Only set after end_session is called. - end_state (str, optional): The final state of the session. Options: "Success", "Fail", "Indeterminate". Defaults to "Indeterminate". - end_state_reason (str, optional): The reason for ending the session. - session_id (UUID): Unique identifier for the session. - tags (List[str]): List of tags associated with the session for grouping and filtering. - video (str, optional): URL to a video recording of the session. - host_env (dict, optional): Dictionary containing host and environment data. - config (Configuration): Configuration object containing settings for the session. - jwt (str, optional): JSON Web Token for authentication with the AgentOps API. - token_cost (Decimal): Running total of token costs for the session. - event_counts (dict): Counter for different types of events: - - llms: Number of LLM calls - - tools: Number of tool calls - - actions: Number of actions - - errors: Number of errors - - apis: Number of API calls - session_url (str, optional): URL to view the session in the AgentOps dashboard. - is_running (bool): Flag indicating if the session is currently active. - """ - - def __init__( - self, - session_id: UUID, - config: Configuration, - tags: Optional[List[str]] = None, - host_env: Optional[dict] = None, - ): - self.end_timestamp = None - self.end_state: Optional[str] = "Indeterminate" - self.session_id = session_id - self.init_timestamp = get_ISO_time() - self.tags: List[str] = tags or [] - self.video: Optional[str] = None - self.end_state_reason: Optional[str] = None - self.host_env = host_env - self.config = config - self.jwt = None - self._lock = threading.Lock() - self._end_session_lock = threading.Lock() - self.token_cost: Decimal = Decimal(0) - self._session_url: str = "" - self.event_counts = { - "llms": 0, - "tools": 0, - "actions": 0, - "errors": 0, - "apis": 0, - } - # self.session_url: Optional[str] = None - - # Start session first to get JWT - self.is_running = self._start_session() - if not self.is_running: - return - - # Initialize OTEL components with a more controlled processor - self._tracer_provider = TracerProvider() - self._otel_tracer = self._tracer_provider.get_tracer( - f"agentops.session.{str(session_id)}", - ) - self._otel_exporter = SessionExporter(session=self) - - # Use smaller batch size and shorter delay to reduce buffering - self._span_processor = BatchSpanProcessor( - self._otel_exporter, - max_queue_size=self.config.max_queue_size, - schedule_delay_millis=self.config.max_wait_time, - max_export_batch_size=min( - max(self.config.max_queue_size // 20, 1), - min(self.config.max_queue_size, 32), - ), - export_timeout_millis=20000, - ) - - self._tracer_provider.add_span_processor(self._span_processor) - - def set_video(self, video: str) -> None: - """ - Sets a url to the video recording of the session. - - Args: - video (str): The url of the video recording - """ - self.video = video - - def _flush_spans(self) -> bool: - """ - Flush pending spans for this specific session with timeout. - Returns True if flush was successful, False otherwise. - """ - if not hasattr(self, "_span_processor"): - return True - - try: - success = self._span_processor.force_flush(timeout_millis=self.config.max_wait_time) - if not success: - logger.warning("Failed to flush all spans before session end") - return success - except Exception as e: - logger.warning(f"Error flushing spans: {e}") - return False - - def end_session( - self, - end_state: str = "Indeterminate", - end_state_reason: Optional[str] = None, - video: Optional[str] = None, - ) -> Union[Decimal, None]: - with self._end_session_lock: - if not self.is_running: - return None - - if not any(end_state == state.value for state in EndState): - logger.warning("Invalid end_state. Please use one of the EndState") - return None - - try: - # Force flush any pending spans before ending session - if hasattr(self, "_span_processor"): - self._span_processor.force_flush(timeout_millis=5000) - - # 1. Set shutdown flag on exporter first - if hasattr(self, "_otel_exporter"): - self._otel_exporter.shutdown() - - # 2. Set session end state - self.end_timestamp = get_ISO_time() - self.end_state = end_state - self.end_state_reason = end_state_reason - if video is not None: - self.video = video - - # 3. Mark session as not running before cleanup - self.is_running = False - - # 4. Clean up OTEL components - if hasattr(self, "_span_processor"): - try: - # Force flush any pending spans - self._span_processor.force_flush(timeout_millis=5000) - # Shutdown the processor - self._span_processor.shutdown() - except Exception as e: - logger.warning(f"Error during span processor cleanup: {e}") - finally: - del self._span_processor - - # 5. Final session update - if not (analytics_stats := self.get_analytics()): - return None - - analytics = ( - f"Session Stats - " - f"{colored('Duration:', attrs=['bold'])} {analytics_stats['Duration']} | " - f"{colored('Cost:', attrs=['bold'])} ${analytics_stats['Cost']} | " - f"{colored('LLMs:', attrs=['bold'])} {analytics_stats['LLM calls']} | " - f"{colored('Tools:', attrs=['bold'])} {analytics_stats['Tool calls']} | " - f"{colored('Actions:', attrs=['bold'])} {analytics_stats['Actions']} | " - f"{colored('Errors:', attrs=['bold'])} {analytics_stats['Errors']}" - ) - logger.info(analytics) - - except Exception as e: - logger.exception(f"Error during session end: {e}") - finally: - active_sessions.remove(self) # First thing, get rid of the session - - logger.info( - colored( - f"\x1b[34mSession Replay: {self.session_url}\x1b[0m", - "blue", - ) - ) - return self.token_cost - - def add_tags(self, tags: List[str]) -> None: - """ - Append to session tags at runtime. - """ - if not self.is_running: - return - - if not (isinstance(tags, list) and all(isinstance(item, str) for item in tags)): - if isinstance(tags, str): - tags = [tags] - - # Initialize tags if None - if self.tags is None: - self.tags = [] - - # Add new tags that don't exist - for tag in tags: - if tag not in self.tags: - self.tags.append(tag) - - # Update session state immediately - self._update_session() - - def set_tags(self, tags): - """Set session tags, replacing any existing tags""" - if not self.is_running: - return - - if not (isinstance(tags, list) and all(isinstance(item, str) for item in tags)): - if isinstance(tags, str): - tags = [tags] - - # Set tags directly - self.tags = tags.copy() # Make a copy to avoid reference issues - - # Update session state immediately - self._update_session() - - def record(self, event: Union[Event, ErrorEvent], flush_now=False): - """Record an event using OpenTelemetry spans""" - if not self.is_running: - return - - # Ensure event has all required base attributes - if not hasattr(event, "id"): - event.id = uuid4() - if not hasattr(event, "init_timestamp"): - event.init_timestamp = get_ISO_time() - if not hasattr(event, "end_timestamp") or event.end_timestamp is None: - event.end_timestamp = get_ISO_time() - - # Create session context - token = set_value("session.id", str(self.session_id)) - - try: - token = attach(token) - - # Create a copy of event data to modify - event_data = dict(filter_unjsonable(event.__dict__)) - - # Add required fields based on event type - if isinstance(event, ErrorEvent): - event_data["error_type"] = getattr(event, "error_type", event.event_type) - elif event.event_type == "actions": - # Ensure action events have action_type - if "action_type" not in event_data: - event_data["action_type"] = event_data.get("name", "unknown_action") - if "name" not in event_data: - event_data["name"] = event_data.get("action_type", "unknown_action") - elif event.event_type == "tools": - # Ensure tool events have name - if "name" not in event_data: - event_data["name"] = event_data.get("tool_name", "unknown_tool") - if "tool_name" not in event_data: - event_data["tool_name"] = event_data.get("name", "unknown_tool") - - with self._otel_tracer.start_as_current_span( - name=event.event_type, - attributes={ - "event.id": str(event.id), - "event.type": event.event_type, - "event.timestamp": event.init_timestamp or get_ISO_time(), - "event.end_timestamp": event.end_timestamp or get_ISO_time(), - "session.id": str(self.session_id), - "session.tags": ",".join(self.tags) if self.tags else "", - "event.data": json.dumps(event_data), - }, - ) as span: - if event.event_type in self.event_counts: - self.event_counts[event.event_type] += 1 - - if isinstance(event, ErrorEvent): - span.set_attribute("error", True) - if hasattr(event, "trigger_event") and event.trigger_event: - span.set_attribute("trigger_event.id", str(event.trigger_event.id)) - span.set_attribute("trigger_event.type", event.trigger_event.event_type) - - if flush_now and hasattr(self, "_span_processor"): - self._span_processor.force_flush() - finally: - detach(token) - - def _send_event(self, event): - """Direct event sending for testing""" - try: - payload = { - "events": [ - { - "id": str(event.id), - "event_type": event.event_type, - "init_timestamp": event.init_timestamp, - "end_timestamp": event.end_timestamp, - "data": filter_unjsonable(event.__dict__), - } - ] - } - - HttpClient.post( - f"{self.config.endpoint}/v2/create_events", - json.dumps(payload).encode("utf-8"), - jwt=self.jwt, - ) - except Exception as e: - logger.error(f"Failed to send event: {e}") - - def _reauthorize_jwt(self) -> Union[str, None]: - with self._lock: - payload = {"session_id": self.session_id} - serialized_payload = json.dumps(filter_unjsonable(payload)).encode("utf-8") - res = HttpClient.post( - f"{self.config.endpoint}/v2/reauthorize_jwt", - serialized_payload, - self.config.api_key, - ) - - logger.debug(res.body) - - if res.code != 200: - return None - - jwt = res.body.get("jwt", None) - self.jwt = jwt - return jwt - - def _start_session(self): - with self._lock: - payload = {"session": self.__dict__} - serialized_payload = json.dumps(filter_unjsonable(payload)).encode("utf-8") - - try: - res = HttpClient.post( - f"{self.config.endpoint}/v2/create_session", - serialized_payload, - api_key=self.config.api_key, - parent_key=self.config.parent_key, - ) - except ApiServerException as e: - return logger.error(f"Could not start session - {e}") - - logger.debug(res.body) - - if res.code != 200: - return False - - jwt = res.body.get("jwt", None) - self.jwt = jwt - if jwt is None: - return False - - logger.info( - colored( - f"\x1b[34mSession Replay: {self.session_url}\x1b[0m", - "blue", - ) - ) - - return True - - def _update_session(self) -> None: - """Update session state on the server""" - if not self.is_running: - return - - # TODO: Determine whether we really need to lock here: are incoming calls coming from other threads? - with self._lock: - payload = {"session": self.__dict__} - - try: - res = HttpClient.post( - f"{self.config.endpoint}/v2/update_session", - json.dumps(filter_unjsonable(payload)).encode("utf-8"), - # self.config.api_key, - jwt=self.jwt, - ) - except ApiServerException as e: - return logger.error(f"Could not update session - {e}") - - def create_agent(self, name, agent_id): - if not self.is_running: - return - if agent_id is None: - agent_id = str(uuid4()) - - payload = { - "id": agent_id, - "name": name, - } - - serialized_payload = safe_serialize(payload).encode("utf-8") - try: - HttpClient.post( - f"{self.config.endpoint}/v2/create_agent", - serialized_payload, - api_key=self.config.api_key, - jwt=self.jwt, - ) - except ApiServerException as e: - return logger.error(f"Could not create agent - {e}") - - return agent_id - - def patch(self, func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - kwargs["session"] = self - return func(*args, **kwargs) - - return wrapper - - def _get_response(self) -> Optional[Response]: - payload = {"session": self.__dict__} - try: - response = HttpClient.post( - f"{self.config.endpoint}/v2/update_session", - json.dumps(filter_unjsonable(payload)).encode("utf-8"), - api_key=self.config.api_key, - jwt=self.jwt, - ) - except ApiServerException as e: - return logger.error(f"Could not end session - {e}") - - logger.debug(response.body) - return response - - def _format_duration(self, start_time, end_time) -> str: - start = datetime.fromisoformat(start_time.replace("Z", "+00:00")) - end = datetime.fromisoformat(end_time.replace("Z", "+00:00")) - duration = end - start - - hours, remainder = divmod(duration.total_seconds(), 3600) - minutes, seconds = divmod(remainder, 60) - - parts = [] - if hours > 0: - parts.append(f"{int(hours)}h") - if minutes > 0: - parts.append(f"{int(minutes)}m") - parts.append(f"{seconds:.1f}s") - - return " ".join(parts) - - def _get_token_cost(self, response: Response) -> Decimal: - token_cost = response.body.get("token_cost", "unknown") - if token_cost == "unknown" or token_cost is None: - return Decimal(0) - return Decimal(token_cost) - - def _format_token_cost(self, token_cost: Decimal) -> str: - return ( - "{:.2f}".format(token_cost) - if token_cost == 0 - else "{:.6f}".format(token_cost.quantize(Decimal("0.000001"), rounding=ROUND_HALF_UP)) - ) - - def get_analytics(self) -> Optional[Dict[str, Any]]: - if not self.end_timestamp: - self.end_timestamp = get_ISO_time() - - formatted_duration = self._format_duration(self.init_timestamp, self.end_timestamp) - - if (response := self._get_response()) is None: - return None - - self.token_cost = self._get_token_cost(response) - - return { - "LLM calls": self.event_counts["llms"], - "Tool calls": self.event_counts["tools"], - "Actions": self.event_counts["actions"], - "Errors": self.event_counts["errors"], - "Duration": formatted_duration, - "Cost": self._format_token_cost(self.token_cost), - } - - @property - def session_url(self) -> str: - """Returns the URL for this session in the AgentOps dashboard.""" - assert self.session_id, "Session ID is required to generate a session URL" - return f"https://app.agentops.ai/drilldown?session_id={self.session_id}" - - # @session_url.setter - # def session_url(self, url: str): - # pass - - -active_sessions: List[Session] = [] diff --git a/agentops/singleton.py b/agentops/singleton.py deleted file mode 100644 index b22e4edc1..000000000 --- a/agentops/singleton.py +++ /dev/null @@ -1,28 +0,0 @@ -ao_instances = {} - - -def singleton(class_): - def getinstance(*args, **kwargs): - if class_ not in ao_instances: - ao_instances[class_] = class_(*args, **kwargs) - return ao_instances[class_] - - return getinstance - - -def conditional_singleton(class_): - def getinstance(*args, **kwargs): - use_singleton = kwargs.pop("use_singleton", True) - if use_singleton: - if class_ not in ao_instances: - ao_instances[class_] = class_(*args, **kwargs) - return ao_instances[class_] - else: - return class_(*args, **kwargs) - - return getinstance - - -def clear_singletons(): - global ao_instances - ao_instances = {} diff --git a/agentops/time_travel.py b/agentops/time_travel.py deleted file mode 100644 index 55ad66629..000000000 --- a/agentops/time_travel.py +++ /dev/null @@ -1,144 +0,0 @@ -import json -import yaml -import os -from .http_client import HttpClient -from .exceptions import ApiServerException -from .singleton import singleton - -ttd_prepend_string = "🖇️ Agentops: ⏰ Time Travel |" - - -@singleton -class TimeTravel: - def __init__(self): - self._completion_overrides = {} - - script_dir = os.path.dirname(os.path.abspath(__file__)) - parent_dir = os.path.dirname(script_dir) - cache_path = os.path.join(parent_dir, "agentops_time_travel.json") - - try: - with open(cache_path, "r") as file: - time_travel_cache_json = json.load(file) - self._completion_overrides = time_travel_cache_json.get("completion_overrides") - except FileNotFoundError: - return - - -def fetch_time_travel_id(ttd_id): - try: - endpoint = os.environ.get("AGENTOPS_API_ENDPOINT", "https://api.agentops.ai") - ttd_res = HttpClient.get(f"{endpoint}/v2/ttd/{ttd_id}") - if ttd_res.code != 200: - raise Exception(f"Failed to fetch TTD with status code {ttd_res.code}") - - completion_overrides = { - "completion_overrides": { - ( - str({"messages": item["prompt"]["messages"]}) - if item["prompt"].get("type") == "chatml" - else str(item["prompt"]) - ): item["returns"] - for item in ttd_res.body # TODO: rename returns to completion_override - } - } - with open("agentops_time_travel.json", "w") as file: - json.dump(completion_overrides, file, indent=4) - - set_time_travel_active_state(True) - except ApiServerException as e: - print(f"{ttd_prepend_string} Error - {e}") - except Exception as e: - print(f"{ttd_prepend_string} Error - {e}") - - -def fetch_completion_override_from_time_travel_cache(kwargs): - if not check_time_travel_active(): - return - - if TimeTravel()._completion_overrides: - return find_cache_hit(kwargs["messages"], TimeTravel()._completion_overrides) - - -# NOTE: This is specific to the messages: [{'role': '...', 'content': '...'}, ...] format -def find_cache_hit(prompt_messages, completion_overrides): - if not isinstance(prompt_messages, (list, tuple)): - print( - f"{ttd_prepend_string} Error - unexpected type for prompt_messages. Expected 'list' or 'tuple'. Got ", - type(prompt_messages), - ) - return None - - if not isinstance(completion_overrides, dict): - print( - f"{ttd_prepend_string} Error - unexpected type for completion_overrides. Expected 'dict'. Got ", - type(completion_overrides), - ) - return None - for key, value in completion_overrides.items(): - try: - completion_override_dict = eval(key) - if not isinstance(completion_override_dict, dict): - print( - f"{ttd_prepend_string} Error - unexpected type for completion_override_dict. Expected 'dict'. Got ", - type(completion_override_dict), - ) - continue - - cached_messages = completion_override_dict.get("messages") - if not isinstance(cached_messages, list): - print( - f"{ttd_prepend_string} Error - unexpected type for cached_messages. Expected 'list'. Got ", - type(cached_messages), - ) - continue - - if len(cached_messages) != len(prompt_messages): - continue - - if all( - isinstance(a, dict) and isinstance(b, dict) and a.get("content") == b.get("content") - for a, b in zip(prompt_messages, cached_messages) - ): - return value - except (SyntaxError, ValueError, TypeError) as e: - print(f"{ttd_prepend_string} Error - Error processing completion_overrides item: {e}") - except Exception as e: - print(f"{ttd_prepend_string} Error - Unexpected error in find_cache_hit: {e}") - return None - - -def check_time_travel_active(): - script_dir = os.path.dirname(os.path.abspath(__file__)) - parent_dir = os.path.dirname(script_dir) - config_file_path = os.path.join(parent_dir, ".agentops_time_travel.yaml") - - try: - with open(config_file_path, "r") as config_file: - config = yaml.safe_load(config_file) - return config.get("Time_Travel_Debugging_Active", False) - except FileNotFoundError: - return False - - -def set_time_travel_active_state(is_active: bool): - config_path = ".agentops_time_travel.yaml" - try: - with open(config_path, "r") as config_file: - config = yaml.safe_load(config_file) or {} - except FileNotFoundError: - config = {} - - config["Time_Travel_Debugging_Active"] = is_active - - with open(config_path, "w") as config_file: - try: - yaml.dump(config, config_file) - except: - print(f"{ttd_prepend_string} Error - Unable to write to {config_path}. Time Travel not activated") - return - - if is_active: - print(f"{ttd_prepend_string} Activated") - else: - print(f"{ttd_prepend_string} Deactivated") From 81c22fd56718b4e04b72978c7770b308db53148f Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:11:51 +0200 Subject: [PATCH 10/45] cleanup providers/ Signed-off-by: Teo --- agentops/llms/__init__.py | 0 agentops/llms/providers/__init__.py | 0 agentops/llms/providers/ai21.py | 176 --------- agentops/llms/providers/anthropic.py | 332 ----------------- agentops/llms/providers/base.py | 36 -- agentops/llms/providers/cohere.py | 252 ------------- agentops/llms/providers/gemini.py | 194 ---------- agentops/llms/providers/groq.py | 175 --------- agentops/llms/providers/litellm.py | 231 ------------ agentops/llms/providers/llama_stack_client.py | 295 --------------- agentops/llms/providers/mistral.py | 215 ----------- agentops/llms/providers/ollama.py | 126 ------- agentops/llms/providers/openai.py | 344 ------------------ agentops/llms/providers/taskweaver.py | 146 -------- agentops/llms/tracker.py | 285 --------------- 15 files changed, 2807 deletions(-) delete mode 100644 agentops/llms/__init__.py delete mode 100644 agentops/llms/providers/__init__.py delete mode 100644 agentops/llms/providers/ai21.py delete mode 100644 agentops/llms/providers/anthropic.py delete mode 100644 agentops/llms/providers/base.py delete mode 100644 agentops/llms/providers/cohere.py delete mode 100644 agentops/llms/providers/gemini.py delete mode 100644 agentops/llms/providers/groq.py delete mode 100644 agentops/llms/providers/litellm.py delete mode 100644 agentops/llms/providers/llama_stack_client.py delete mode 100644 agentops/llms/providers/mistral.py delete mode 100644 agentops/llms/providers/ollama.py delete mode 100644 agentops/llms/providers/openai.py delete mode 100644 agentops/llms/providers/taskweaver.py delete mode 100644 agentops/llms/tracker.py diff --git a/agentops/llms/__init__.py b/agentops/llms/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/agentops/llms/providers/__init__.py b/agentops/llms/providers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/agentops/llms/providers/ai21.py b/agentops/llms/providers/ai21.py deleted file mode 100644 index 8271a2a64..000000000 --- a/agentops/llms/providers/ai21.py +++ /dev/null @@ -1,176 +0,0 @@ -import inspect -import pprint -from typing import Optional - -from agentops.llms.providers.base import BaseProvider -from agentops.time_travel import fetch_completion_override_from_time_travel_cache - -from agentops.event import ErrorEvent, LLMEvent, ActionEvent, ToolEvent -from agentops.session import Session -from agentops.log_config import logger -from agentops.helpers import check_call_stack_for_agent_id, get_ISO_time -from agentops.singleton import singleton - - -@singleton -class AI21Provider(BaseProvider): - original_create = None - original_create_async = None - - def __init__(self, client): - super().__init__(client) - self._provider_name = "AI21" - - def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None): - """Handle responses for AI21""" - from ai21.stream.stream import Stream - from ai21.stream.async_stream import AsyncStream - from ai21.models.chat.chat_completion_chunk import ChatCompletionChunk - - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - - if session is not None: - llm_event.session_id = session.session_id - - def handle_stream_chunk(chunk: ChatCompletionChunk): - # We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion - if llm_event.returns is None: - llm_event.returns = chunk - # Manually setting content to empty string to avoid error - llm_event.returns.choices[0].delta.content = "" - - try: - accumulated_delta = llm_event.returns.choices[0].delta - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = kwargs["model"] - llm_event.prompt = [message.model_dump() for message in kwargs["messages"]] - - # NOTE: We assume for completion only choices[0] is relevant - choice = chunk.choices[0] - - if choice.delta.content: - accumulated_delta.content += choice.delta.content - - if choice.delta.role: - accumulated_delta.role = choice.delta.role - - if getattr("choice.delta", "tool_calls", None): - accumulated_delta.tool_calls += ToolEvent(logs=choice.delta.tools) - - if choice.finish_reason: - # Streaming is done. Record LLMEvent - llm_event.returns.choices[0].finish_reason = choice.finish_reason - llm_event.completion = { - "role": accumulated_delta.role, - "content": accumulated_delta.content, - } - llm_event.prompt_tokens = chunk.usage.prompt_tokens - llm_event.completion_tokens = chunk.usage.completion_tokens - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - chunk = pprint.pformat(chunk) - logger.warning( - f"Unable to parse a chunk for LLM call. Skipping upload to AgentOps\n" - f"chunk:\n {chunk}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - # if the response is a generator, decorate the generator - # For synchronous Stream - if isinstance(response, Stream): - - def generator(): - for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return generator() - - # For asynchronous AsyncStream - if isinstance(response, AsyncStream): - - async def async_generator(): - async for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return async_generator() - - # Handle object responses - try: - llm_event.returns = response - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = kwargs["model"] - llm_event.prompt = [message.model_dump() for message in kwargs["messages"]] - llm_event.prompt_tokens = response.usage.prompt_tokens - llm_event.completion = response.choices[0].message.model_dump() - llm_event.completion_tokens = response.usage.completion_tokens - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - kwargs_str = pprint.pformat(kwargs) - response = pprint.pformat(response) - logger.warning( - f"Unable to parse response for LLM call. Skipping upload to AgentOps\n" - f"response:\n {response}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - return response - - def override(self): - self._override_completion() - self._override_completion_async() - - def _override_completion(self): - from ai21.clients.studio.resources.chat import ChatCompletions - - global original_create - original_create = ChatCompletions.create - - def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - result = original_create(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - ChatCompletions.create = patched_function - - def _override_completion_async(self): - from ai21.clients.studio.resources.chat import AsyncChatCompletions - - global original_create_async - original_create_async = AsyncChatCompletions.create - - async def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - result = await original_create_async(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - AsyncChatCompletions.create = patched_function - - def undo_override(self): - if self.original_create is not None and self.original_create_async is not None: - from ai21.clients.studio.resources.chat import ( - ChatCompletions, - AsyncChatCompletions, - ) - - ChatCompletions.create = self.original_create - AsyncChatCompletions.create = self.original_create_async diff --git a/agentops/llms/providers/anthropic.py b/agentops/llms/providers/anthropic.py deleted file mode 100644 index 02d536fb4..000000000 --- a/agentops/llms/providers/anthropic.py +++ /dev/null @@ -1,332 +0,0 @@ -import json -import pprint -from typing import Optional - -from agentops.llms.providers.base import BaseProvider -from agentops.time_travel import fetch_completion_override_from_time_travel_cache - -from agentops.event import ErrorEvent, LLMEvent, ToolEvent -from agentops.helpers import check_call_stack_for_agent_id, get_ISO_time -from agentops.log_config import logger -from agentops.session import Session -from agentops.singleton import singleton - - -@singleton -class AnthropicProvider(BaseProvider): - original_create = None - original_create_async = None - - def __init__(self, client): - super().__init__(client) - self._provider_name = "Anthropic" - self.tool_event = {} - self.tool_id = "" - - def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None): - """Handle responses for Anthropic""" - import anthropic.resources.beta.messages.messages as beta_messages - from anthropic import AsyncStream, Stream - from anthropic.resources import AsyncMessages - from anthropic.types import Message - - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - llm_event.session_id = session.session_id - - def handle_stream_chunk(chunk: Message): - try: - # We take the first chunk and accumulate the deltas from all subsequent chunks to build one full chat completion - if chunk.type == "message_start": - llm_event.returns = chunk - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = kwargs["model"] - llm_event.prompt = kwargs["messages"] - llm_event.prompt_tokens = chunk.message.usage.input_tokens - llm_event.completion = { - "role": chunk.message.role, - "content": "", # Always returned as [] in this instance type - } - - elif chunk.type == "content_block_start": - if chunk.content_block.type == "text": - llm_event.completion["content"] += chunk.content_block.text - - elif chunk.content_block.type == "tool_use": - self.tool_id = chunk.content_block.id - self.tool_event[self.tool_id] = ToolEvent( - name=chunk.content_block.name, - logs={"type": chunk.content_block.type, "input": ""}, - ) - - elif chunk.type == "content_block_delta": - if chunk.delta.type == "text_delta": - llm_event.completion["content"] += chunk.delta.text - - elif chunk.delta.type == "input_json_delta": - self.tool_event[self.tool_id].logs["input"] += chunk.delta.partial_json - - elif chunk.type == "content_block_stop": - pass - - elif chunk.type == "message_delta": - llm_event.completion_tokens = chunk.usage.output_tokens - - elif chunk.type == "message_stop": - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - chunk = pprint.pformat(chunk) - logger.warning( - f"Unable to parse a chunk for LLM call. Skipping upload to AgentOps\n" - f"chunk:\n {chunk}\n" - f"kwargs:\n {kwargs_str}\n", - ) - - # if the response is a generator, decorate the generator - if isinstance(response, Stream): - - def generator(): - for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return generator() - - # For asynchronous AsyncStream - if isinstance(response, AsyncStream): - - async def async_generator(): - async for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return async_generator() - - # For async AsyncMessages - if isinstance(response, AsyncMessages): - - async def async_generator(): - async for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return async_generator() - - # Handle object responses - try: - # Naively handle AttributeError("'LegacyAPIResponse' object has no attribute 'model_dump'") - if hasattr(response, "model_dump"): - # This bets on the fact that the response object has a model_dump method - llm_event.returns = response.model_dump() - llm_event.prompt_tokens = response.usage.input_tokens - llm_event.completion_tokens = response.usage.output_tokens - - llm_event.completion = { - "role": "assistant", - "content": response.content[0].text, - } - llm_event.model = response.model - - else: - """Handle raw response data from the Anthropic API. - - The raw response has the following structure: - { - 'id': str, # Message ID (e.g. 'msg_018Gk9N2pcWaYLS7mxXbPD5i') - 'type': str, # Type of response (e.g. 'message') - 'role': str, # Role of responder (e.g. 'assistant') - 'model': str, # Model used (e.g. 'claude-3-5-sonnet-20241022') - 'content': List[Dict], # List of content blocks with 'type' and 'text' - 'stop_reason': str, # Reason for stopping (e.g. 'end_turn') - 'stop_sequence': Any, # Stop sequence used, if any - 'usage': { # Token usage statistics - 'input_tokens': int, - 'output_tokens': int - } - } - - Note: We import Anthropic types here since the package must be installed - for raw responses to be available; doing so in the global scope would - result in dependencies error since this provider is not lazily imported (tests fail) - """ - from anthropic import APIResponse - from anthropic._legacy_response import LegacyAPIResponse - - assert isinstance(response, (APIResponse, LegacyAPIResponse)), ( - f"Expected APIResponse or LegacyAPIResponse, got {type(response)}. " - "This is likely caused by changes in the Anthropic SDK and the integrations with AgentOps needs update." - "Please open an issue at https://github.com/AgentOps-AI/agentops/issues" - ) - response_data = json.loads(response.text) - llm_event.returns = response_data - llm_event.model = response_data["model"] - llm_event.completion = { - "role": response_data.get("role"), - "content": (response_data.get("content")[0].get("text") if response_data.get("content") else ""), - } - if usage := response_data.get("usage"): - llm_event.prompt_tokens = usage.get("input_tokens") - llm_event.completion_tokens = usage.get("output_tokens") - - llm_event.end_timestamp = get_ISO_time() - llm_event.prompt = kwargs["messages"] - llm_event.agent_id = check_call_stack_for_agent_id() - - self._safe_record(session, llm_event) - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - kwargs_str = pprint.pformat(kwargs) - response = pprint.pformat(response) - logger.warning( - f"Unable to parse response for LLM call. Skipping upload to AgentOps\n" - f"response:\n {response}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - return response - - def override(self): - self._override_completion() - self._override_async_completion() - - def _override_completion(self): - import anthropic.resources.beta.messages.messages as beta_messages - from anthropic.resources import messages - from anthropic.types import ( - Message, - RawContentBlockDeltaEvent, - RawContentBlockStartEvent, - RawContentBlockStopEvent, - RawMessageDeltaEvent, - RawMessageStartEvent, - RawMessageStopEvent, - ) - - # Store the original method - self.original_create = messages.Messages.create - self.original_create_beta = beta_messages.Messages.create - - def create_patched_function(is_beta=False): - def patched_function(*args, **kwargs): - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - - if "session" in kwargs.keys(): - del kwargs["session"] - - completion_override = fetch_completion_override_from_time_travel_cache(kwargs) - if completion_override: - result_model = None - pydantic_models = ( - Message, - RawContentBlockDeltaEvent, - RawContentBlockStartEvent, - RawContentBlockStopEvent, - RawMessageDeltaEvent, - RawMessageStartEvent, - RawMessageStopEvent, - ) - - for pydantic_model in pydantic_models: - try: - result_model = pydantic_model.model_validate_json(completion_override) - break - except Exception as e: - pass - - if result_model is None: - logger.error( - f"Time Travel: Pydantic validation failed for {pydantic_models} \n" - f"Time Travel: Completion override was:\n" - f"{pprint.pformat(completion_override)}" - ) - return None - return self.handle_response(result_model, kwargs, init_timestamp, session=session) - - # Call the original function with its original arguments - original_func = self.original_create_beta if is_beta else self.original_create - result = original_func(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - return patched_function - - # Override the original methods with the patched ones - messages.Messages.create = create_patched_function(is_beta=False) - beta_messages.Messages.create = create_patched_function(is_beta=True) - - def _override_async_completion(self): - import anthropic.resources.beta.messages.messages as beta_messages - from anthropic.resources import messages - from anthropic.types import ( - Message, - RawContentBlockDeltaEvent, - RawContentBlockStartEvent, - RawContentBlockStopEvent, - RawMessageDeltaEvent, - RawMessageStartEvent, - RawMessageStopEvent, - ) - - # Store the original method - self.original_create_async = messages.AsyncMessages.create - self.original_create_async_beta = beta_messages.AsyncMessages.create - - def create_patched_async_function(is_beta=False): - async def patched_function(*args, **kwargs): - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - - completion_override = fetch_completion_override_from_time_travel_cache(kwargs) - if completion_override: - result_model = None - pydantic_models = ( - Message, - RawContentBlockDeltaEvent, - RawContentBlockStartEvent, - RawContentBlockStopEvent, - RawMessageDeltaEvent, - RawMessageStartEvent, - RawMessageStopEvent, - ) - - for pydantic_model in pydantic_models: - try: - result_model = pydantic_model.model_validate_json(completion_override) - break - except Exception as e: - pass - - if result_model is None: - logger.error( - f"Time Travel: Pydantic validation failed for {pydantic_models} \n" - f"Time Travel: Completion override was:\n" - f"{pprint.pformat(completion_override)}" - ) - return None - - return self.handle_response(result_model, kwargs, init_timestamp, session=session) - - # Call the original function with its original arguments - original_func = self.original_create_async_beta if is_beta else self.original_create_async - result = await original_func(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - return patched_function - - # Override the original methods with the patched ones - messages.AsyncMessages.create = create_patched_async_function(is_beta=False) - beta_messages.AsyncMessages.create = create_patched_async_function(is_beta=True) - - def undo_override(self): - if self.original_create is not None and self.original_create_async is not None: - from anthropic.resources import messages - - messages.Messages.create = self.original_create - messages.AsyncMessages.create = self.original_create_async diff --git a/agentops/llms/providers/base.py b/agentops/llms/providers/base.py deleted file mode 100644 index 7a54b5f0e..000000000 --- a/agentops/llms/providers/base.py +++ /dev/null @@ -1,36 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional - -from agentops.session import Session -from agentops.event import LLMEvent - - -class BaseProvider(ABC): - _provider_name: str = "InstrumentedModel" - llm_event: Optional[LLMEvent] = None - client = None - - def __init__(self, client): - self.client = client - - @abstractmethod - def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict: - pass - - @abstractmethod - def override(self): - pass - - @abstractmethod - def undo_override(self): - pass - - @property - def provider_name(self): - return self._provider_name - - def _safe_record(self, session, event): - if session is not None: - session.record(event) - else: - self.client.record(event) diff --git a/agentops/llms/providers/cohere.py b/agentops/llms/providers/cohere.py deleted file mode 100644 index 5e4961216..000000000 --- a/agentops/llms/providers/cohere.py +++ /dev/null @@ -1,252 +0,0 @@ -import inspect -import pprint -from typing import Optional - -from .base import BaseProvider -from agentops.event import ActionEvent, ErrorEvent, LLMEvent -from agentops.session import Session -from agentops.log_config import logger -from agentops.helpers import get_ISO_time, check_call_stack_for_agent_id -from agentops.singleton import singleton - - -@singleton -class CohereProvider(BaseProvider): - original_create = None - original_create_stream = None - original_create_async = None - - def override(self): - self._override_chat() - self._override_chat_stream() - self._override_async_chat() - - def undo_override(self): - if ( - self.original_create is not None - and self.original_create_async is not None - and self.original_create_stream is not None - ): - import cohere - - cohere.Client.chat = self.original_create - cohere.Client.chat_stream = self.original_create_stream - cohere.AsyncClient.chat = self.original_create_async - - def __init__(self, client): - super().__init__(client) - - def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None): - """Handle responses for Cohere versions >v5.4.0""" - from cohere.types.streamed_chat_response import ( - StreamedChatResponse_CitationGeneration, - StreamedChatResponse_SearchQueriesGeneration, - StreamedChatResponse_SearchResults, - StreamedChatResponse_StreamEnd, - StreamedChatResponse_StreamStart, - StreamedChatResponse_TextGeneration, - StreamedChatResponse_ToolCallsGeneration, - ) - - # from cohere.types.chat import ChatGenerationChunk - # NOTE: Cohere only returns one message and its role will be CHATBOT which we are coercing to "assistant" - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - llm_event.session_id = session.session_id - - self.action_events = {} - - def handle_stream_chunk(chunk, session: Optional[Session] = None): - # We take the first chunk and accumulate the deltas from all subsequent chunks to build one full chat completion - if isinstance(chunk, StreamedChatResponse_StreamStart): - llm_event.returns = chunk - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = kwargs.get("model", "command-r-plus") - llm_event.prompt = kwargs["message"] - llm_event.completion = "" - return - - try: - if isinstance(chunk, StreamedChatResponse_StreamEnd): - # StreamedChatResponse_TextGeneration = LLMEvent - llm_event.completion = { - "role": "assistant", - "content": chunk.response.text, - } - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - - # StreamedChatResponse_SearchResults = ActionEvent - search_results = chunk.response.search_results - if search_results: - for search_result in search_results: - query = search_result.search_query - if query.generation_id in self.action_events: - action_event = self.action_events[query.generation_id] - search_result_dict = search_result.dict() - del search_result_dict["search_query"] - action_event.returns = search_result_dict - action_event.end_timestamp = get_ISO_time() - - # StreamedChatResponse_CitationGeneration = ActionEvent - if chunk.response.documents: - documents = {doc["id"]: doc for doc in chunk.response.documents} - citations = chunk.response.citations - for citation in citations: - citation_id = f"{citation.start}.{citation.end}" - if citation_id in self.action_events: - action_event = self.action_events[citation_id] - citation_dict = citation.dict() - # Replace document_ids with the actual documents - citation_dict["documents"] = [ - documents[doc_id] for doc_id in citation_dict["document_ids"] if doc_id in documents - ] - del citation_dict["document_ids"] - - action_event.returns = citation_dict - action_event.end_timestamp = get_ISO_time() - - for key, action_event in self.action_events.items(): - self._safe_record(session, action_event) - - elif isinstance(chunk, StreamedChatResponse_TextGeneration): - llm_event.completion += chunk.text - elif isinstance(chunk, StreamedChatResponse_ToolCallsGeneration): - pass - elif isinstance(chunk, StreamedChatResponse_CitationGeneration): - for citation in chunk.citations: - self.action_events[f"{citation.start}.{citation.end}"] = ActionEvent( - action_type="citation", - init_timestamp=get_ISO_time(), - params=citation.text, - ) - elif isinstance(chunk, StreamedChatResponse_SearchQueriesGeneration): - for query in chunk.search_queries: - self.action_events[query.generation_id] = ActionEvent( - action_type="search_query", - init_timestamp=get_ISO_time(), - params=query.text, - ) - elif isinstance(chunk, StreamedChatResponse_SearchResults): - pass - - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - chunk = pprint.pformat(chunk) - logger.warning( - f"Unable to parse a chunk for LLM call. Skipping upload to AgentOps\n" - f"chunk:\n {chunk}\n" - f"kwargs:\n {kwargs_str}\n" - ) - raise e - - # NOTE: As of Cohere==5.x.x, async is not supported - # if the response is a generator, decorate the generator - if inspect.isasyncgen(response): - - async def async_generator(): - async for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return async_generator() - - elif inspect.isgenerator(response): - - def generator(): - for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return generator() - - # TODO: we should record if they pass a chat.connectors, because it means they intended to call a tool - # Not enough to record StreamedChatResponse_ToolCallsGeneration because the tool may have not gotten called - - try: - llm_event.returns = response - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.prompt = [] - if response.chat_history: - role_map = {"USER": "user", "CHATBOT": "assistant", "SYSTEM": "system"} - - for i in range(len(response.chat_history) - 1): - message = response.chat_history[i] - llm_event.prompt.append( - { - "role": role_map.get(message.role, message.role), - "content": message.message, - } - ) - - last_message = response.chat_history[-1] - llm_event.completion = { - "role": role_map.get(last_message.role, last_message.role), - "content": last_message.message, - } - llm_event.prompt_tokens = int(response.meta.tokens.input_tokens) - llm_event.completion_tokens = int(response.meta.tokens.output_tokens) - llm_event.model = kwargs.get("model", "command-r-plus") - - self._safe_record(session, llm_event) - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - kwargs_str = pprint.pformat(kwargs) - response = pprint.pformat(response) - logger.warning( - f"Unable to parse response for LLM call. Skipping upload to AgentOps\n" - f"response:\n {response}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - return response - - def _override_chat(self): - import cohere - - self.original_create = cohere.Client.chat - - def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - result = self.original_create(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - cohere.Client.chat = patched_function - - def _override_async_chat(self): - import cohere.types - - self.original_create_async = cohere.AsyncClient.chat - - async def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - result = await self.original_create_async(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - cohere.AsyncClient.chat = patched_function - - def _override_chat_stream(self): - import cohere - - self.original_create_stream = cohere.Client.chat_stream - - def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - result = self.original_create_stream(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp) - - # Override the original method with the patched one - cohere.Client.chat_stream = patched_function diff --git a/agentops/llms/providers/gemini.py b/agentops/llms/providers/gemini.py deleted file mode 100644 index 6ca96c3eb..000000000 --- a/agentops/llms/providers/gemini.py +++ /dev/null @@ -1,194 +0,0 @@ -from typing import Optional, Any, Dict, Union - -from agentops.llms.providers.base import BaseProvider -from agentops.event import LLMEvent, ErrorEvent -from agentops.session import Session -from agentops.helpers import get_ISO_time, check_call_stack_for_agent_id -from agentops.log_config import logger -from agentops.singleton import singleton - - -@singleton -class GeminiProvider(BaseProvider): - original_generate_content = None - original_generate_content_async = None - - """Provider for Google's Gemini API. - - This provider is automatically detected and initialized when agentops.init() - is called and the google.generativeai package is imported. No manual - initialization is required.""" - - def __init__(self, client=None): - """Initialize the Gemini provider. - - Args: - client: Optional client instance. If not provided, will be set during override. - """ - super().__init__(client) - self._provider_name = "Gemini" - - def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict: - """Handle responses from Gemini API for both sync and streaming modes. - - Args: - response: The response from the Gemini API - kwargs: The keyword arguments passed to generate_content - init_timestamp: The timestamp when the request was initiated - session: Optional AgentOps session for recording events - - Returns: - For sync responses: The original response object - For streaming responses: A generator yielding response chunks - """ - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - llm_event.session_id = session.session_id - - accumulated_content = "" - - def handle_stream_chunk(chunk): - nonlocal llm_event, accumulated_content - try: - if llm_event.returns is None: - llm_event.returns = chunk - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = getattr(chunk, "model", None) or "gemini-1.5-flash" - llm_event.prompt = kwargs.get("prompt", kwargs.get("contents", None)) or [] - - # Accumulate text from chunk - if hasattr(chunk, "text") and chunk.text: - accumulated_content += chunk.text - - # Extract token counts if available - if hasattr(chunk, "usage_metadata"): - llm_event.prompt_tokens = getattr(chunk.usage_metadata, "prompt_token_count", None) - llm_event.completion_tokens = getattr(chunk.usage_metadata, "candidates_token_count", None) - - # If this is the last chunk - if hasattr(chunk, "finish_reason") and chunk.finish_reason: - llm_event.completion = accumulated_content - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - logger.warning( - f"Unable to parse chunk for Gemini LLM call. Error: {str(e)}\n" - f"Response: {chunk}\n" - f"Arguments: {kwargs}\n" - ) - - # For streaming responses - if kwargs.get("stream", False): - - def generator(): - for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return generator() - - # For synchronous responses - try: - llm_event.returns = response - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.prompt = kwargs.get("prompt", kwargs.get("contents", None)) or [] - llm_event.completion = response.text - llm_event.model = getattr(response, "model", None) or "gemini-1.5-flash" - - # Extract token counts from usage metadata if available - if hasattr(response, "usage_metadata"): - llm_event.prompt_tokens = getattr(response.usage_metadata, "prompt_token_count", None) - llm_event.completion_tokens = getattr(response.usage_metadata, "candidates_token_count", None) - - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - logger.warning( - f"Unable to parse response for Gemini LLM call. Error: {str(e)}\n" - f"Response: {response}\n" - f"Arguments: {kwargs}\n" - ) - - return response - - def override(self): - """Override Gemini's generate_content method to track LLM events.""" - self._override_gemini_generate_content() - self._override_gemini_generate_content_async() - - def _override_gemini_generate_content(self): - """Override synchronous generate_content method""" - import google.generativeai as genai - - # Store original method if not already stored - if self.original_generate_content is None: - self.original_generate_content = genai.GenerativeModel.generate_content - - provider = self # Store provider instance for closure - - def patched_function(model_self, *args, **kwargs): - init_timestamp = get_ISO_time() - session = kwargs.pop("session", None) - - # Handle positional prompt argument - event_kwargs = kwargs.copy() - if args and len(args) > 0: - prompt = args[0] - if "contents" not in kwargs: - kwargs["contents"] = prompt - event_kwargs["prompt"] = prompt - args = args[1:] - - result = provider.original_generate_content(model_self, *args, **kwargs) - return provider.handle_response(result, event_kwargs, init_timestamp, session=session) - - # Override the method at class level - genai.GenerativeModel.generate_content = patched_function - - def _override_gemini_generate_content_async(self): - """Override asynchronous generate_content method""" - import google.generativeai as genai - - # Store original async method if not already stored - if self.original_generate_content_async is None: - self.original_generate_content_async = genai.GenerativeModel.generate_content_async - - provider = self # Store provider instance for closure - - async def patched_function(model_self, *args, **kwargs): - init_timestamp = get_ISO_time() - session = kwargs.pop("session", None) - - # Handle positional prompt argument - event_kwargs = kwargs.copy() - if args and len(args) > 0: - prompt = args[0] - if "contents" not in kwargs: - kwargs["contents"] = prompt - event_kwargs["prompt"] = prompt - args = args[1:] - - result = await provider.original_generate_content_async(model_self, *args, **kwargs) - return provider.handle_response(result, event_kwargs, init_timestamp, session=session) - - # Override the async method at class level - genai.GenerativeModel.generate_content_async = patched_function - - def undo_override(self): - """Restore original Gemini methods. - - Note: - This method is called automatically by AgentOps during cleanup. - Users should not call this method directly.""" - import google.generativeai as genai - - if self.original_generate_content is not None: - genai.GenerativeModel.generate_content = self.original_generate_content - self.original_generate_content = None - - if self.original_generate_content_async is not None: - genai.GenerativeModel.generate_content_async = self.original_generate_content_async - self.original_generate_content_async = None diff --git a/agentops/llms/providers/groq.py b/agentops/llms/providers/groq.py deleted file mode 100644 index 4f07a9fd9..000000000 --- a/agentops/llms/providers/groq.py +++ /dev/null @@ -1,175 +0,0 @@ -import pprint -from typing import Optional - -from .base import BaseProvider -from agentops.event import ErrorEvent, LLMEvent -from agentops.session import Session -from agentops.log_config import logger -from agentops.helpers import get_ISO_time, check_call_stack_for_agent_id -from agentops.singleton import singleton - - -@singleton -class GroqProvider(BaseProvider): - original_create = None - original_async_create = None - - def __init__(self, client): - super().__init__(client) - self.client = client - - def override(self): - self._override_chat() - self._override_async_chat() - - def undo_override(self): - if self.original_create is not None and self.original_async_create is not None: - from groq.resources.chat import completions - - completions.Completions.create = self.original_create - completions.AsyncCompletions.create = self.original_create - - def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None): - """Handle responses for OpenAI versions >v1.0.0""" - from groq import AsyncStream, Stream - from groq.resources.chat import AsyncCompletions - from groq.types.chat import ChatCompletionChunk - - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - llm_event.session_id = session.session_id - - def handle_stream_chunk(chunk: ChatCompletionChunk): - # NOTE: prompt/completion usage not returned in response when streaming - # We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion - if llm_event.returns == None: - llm_event.returns = chunk - - try: - accumulated_delta = llm_event.returns.choices[0].delta - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = chunk.model - llm_event.prompt = kwargs["messages"] - - # NOTE: We assume for completion only choices[0] is relevant - choice = chunk.choices[0] - - if choice.delta.content: - accumulated_delta.content += choice.delta.content - - if choice.delta.role: - accumulated_delta.role = choice.delta.role - - if choice.delta.tool_calls: - accumulated_delta.tool_calls = choice.delta.tool_calls - - if choice.delta.function_call: - accumulated_delta.function_call = choice.delta.function_call - - if choice.finish_reason: - # Streaming is done. Record LLMEvent - llm_event.returns.choices[0].finish_reason = choice.finish_reason - llm_event.completion = { - "role": accumulated_delta.role, - "content": accumulated_delta.content, - "function_call": accumulated_delta.function_call, - "tool_calls": accumulated_delta.tool_calls, - } - llm_event.end_timestamp = get_ISO_time() - - self._safe_record(session, llm_event) - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - chunk = pprint.pformat(chunk) - logger.warning( - f"Unable to parse a chunk for LLM call. Skipping upload to AgentOps\n" - f"chunk:\n {chunk}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - # if the response is a generator, decorate the generator - if isinstance(response, Stream): - - def generator(): - for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return generator() - - # For asynchronous AsyncStream - elif isinstance(response, AsyncStream): - - async def async_generator(): - async for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return async_generator() - - # For async AsyncCompletion - elif isinstance(response, AsyncCompletions): - - async def async_generator(): - async for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return async_generator() - - # v1.0.0+ responses are objects - try: - llm_event.returns = response.model_dump() - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.prompt = kwargs["messages"] - llm_event.prompt_tokens = response.usage.prompt_tokens - llm_event.completion = response.choices[0].message.model_dump() - llm_event.completion_tokens = response.usage.completion_tokens - llm_event.model = response.model - - self._safe_record(session, llm_event) - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - response = pprint.pformat(response) - logger.warning( - f"Unable to parse response for LLM call. Skipping upload to AgentOps\n" - f"response:\n {response}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - return response - - def _override_chat(self): - from groq.resources.chat import completions - - self.original_create = completions.Completions.create - - def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - result = self.original_create(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - completions.Completions.create = patched_function - - def _override_async_chat(self): - from groq.resources.chat import completions - - self.original_async_create = completions.AsyncCompletions.create - - async def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - result = await self.original_async_create(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp) - - # Override the original method with the patched one - completions.AsyncCompletions.create = patched_function diff --git a/agentops/llms/providers/litellm.py b/agentops/llms/providers/litellm.py deleted file mode 100644 index 488a94b9b..000000000 --- a/agentops/llms/providers/litellm.py +++ /dev/null @@ -1,231 +0,0 @@ -import pprint -from typing import Optional - -from agentops.log_config import logger -from agentops.event import LLMEvent, ErrorEvent -from agentops.session import Session -from agentops.helpers import get_ISO_time, check_call_stack_for_agent_id -from agentops.llms.providers.base import BaseProvider -from agentops.time_travel import fetch_completion_override_from_time_travel_cache -from agentops.singleton import singleton - - -@singleton -class LiteLLMProvider(BaseProvider): - original_create = None - original_create_async = None - original_oai_create = None - original_oai_create_async = None - - def __init__(self, client): - super().__init__(client) - - def override(self): - self._override_async_completion() - self._override_completion() - - def undo_override(self): - if ( - self.original_create is not None - and self.original_create_async is not None - and self.original_oai_create is not None - and self.original_oai_create_async is not None - ): - import litellm - from openai.resources.chat import completions - - litellm.acompletion = self.original_create_async - litellm.completion = self.original_create - - completions.Completions.create = self.original_oai_create - completions.AsyncCompletions.create = self.original_oai_create_async - - def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict: - """Handle responses for OpenAI versions >v1.0.0""" - from openai import AsyncStream, Stream - from openai.resources import AsyncCompletions - from openai.types.chat import ChatCompletionChunk - from litellm.utils import CustomStreamWrapper - - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - llm_event.session_id = session.session_id - - def handle_stream_chunk(chunk: ChatCompletionChunk): - # NOTE: prompt/completion usage not returned in response when streaming - # We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion - if llm_event.returns == None: - llm_event.returns = chunk - - try: - accumulated_delta = llm_event.returns.choices[0].delta - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = chunk.model - llm_event.prompt = kwargs["messages"] - - # NOTE: We assume for completion only choices[0] is relevant - choice = chunk.choices[0] - - if choice.delta.content: - accumulated_delta.content += choice.delta.content - - if choice.delta.role: - accumulated_delta.role = choice.delta.role - - if choice.delta.tool_calls: - accumulated_delta.tool_calls = choice.delta.tool_calls - - if choice.delta.function_call: - accumulated_delta.function_call = choice.delta.function_call - - if choice.finish_reason: - # Streaming is done. Record LLMEvent - llm_event.returns.choices[0].finish_reason = choice.finish_reason - llm_event.completion = { - "role": accumulated_delta.role, - "content": accumulated_delta.content, - "function_call": accumulated_delta.function_call, - "tool_calls": accumulated_delta.tool_calls, - } - llm_event.end_timestamp = get_ISO_time() - - self._safe_record(session, llm_event) - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - chunk = pprint.pformat(chunk) - logger.warning( - f"Unable to parse a chunk for LLM call. Skipping upload to AgentOps\n" - f"chunk:\n {chunk}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - # if the response is a generator, decorate the generator - if isinstance(response, Stream): - - def generator(): - for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return generator() - - # litellm uses a CustomStreamWrapper - if isinstance(response, CustomStreamWrapper): - - def generator(): - for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return generator() - - # For asynchronous AsyncStream - elif isinstance(response, AsyncStream): - - async def async_generator(): - async for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return async_generator() - - # For async AsyncCompletion - elif isinstance(response, AsyncCompletions): - - async def async_generator(): - async for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return async_generator() - - # v1.0.0+ responses are objects - try: - llm_event.returns = response - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.prompt = kwargs["messages"] - llm_event.prompt_tokens = response.usage.prompt_tokens - llm_event.completion = response.choices[0].message.model_dump() - llm_event.completion_tokens = response.usage.completion_tokens - llm_event.model = response.model - - self._safe_record(session, llm_event) - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - response = pprint.pformat(response) - logger.warning( - f"Unable to parse response for LLM call. Skipping upload to AgentOps\n" - f"response:\n {response}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - return response - - def _override_completion(self): - import litellm - from openai.types.chat import ( - ChatCompletion, - ) # Note: litellm calls all LLM APIs using the OpenAI format - from openai.resources.chat import completions - - self.original_create = litellm.completion - self.original_oai_create = completions.Completions.create - - def patched_function(*args, **kwargs): - init_timestamp = get_ISO_time() - - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - - completion_override = fetch_completion_override_from_time_travel_cache(kwargs) - if completion_override: - result_model = ChatCompletion.model_validate_json(completion_override) - return self.handle_response(result_model, kwargs, init_timestamp, session=session) - - # prompt_override = fetch_prompt_override_from_time_travel_cache(kwargs) - # if prompt_override: - # kwargs["messages"] = prompt_override["messages"] - - # Call the original function with its original arguments - result = self.original_create(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - litellm.completion = patched_function - - def _override_async_completion(self): - import litellm - from openai.types.chat import ( - ChatCompletion, - ) # Note: litellm calls all LLM APIs using the OpenAI format - from openai.resources.chat import completions - - self.original_create_async = litellm.acompletion - self.original_oai_create_async = completions.AsyncCompletions.create - - async def patched_function(*args, **kwargs): - init_timestamp = get_ISO_time() - - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - - completion_override = fetch_completion_override_from_time_travel_cache(kwargs) - if completion_override: - result_model = ChatCompletion.model_validate_json(completion_override) - return self.handle_response(result_model, kwargs, init_timestamp, session=session) - - # prompt_override = fetch_prompt_override_from_time_travel_cache(kwargs) - # if prompt_override: - # kwargs["messages"] = prompt_override["messages"] - - # Call the original function with its original arguments - result = await self.original_create_async(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - litellm.acompletion = patched_function diff --git a/agentops/llms/providers/llama_stack_client.py b/agentops/llms/providers/llama_stack_client.py deleted file mode 100644 index 0f7601536..000000000 --- a/agentops/llms/providers/llama_stack_client.py +++ /dev/null @@ -1,295 +0,0 @@ -import inspect -import pprint -from typing import Any, AsyncGenerator, Dict, Optional, List, Union -import logging - -from agentops.event import LLMEvent, ErrorEvent, ToolEvent -from agentops.session import Session -from agentops.log_config import logger -from agentops.helpers import get_ISO_time, check_call_stack_for_agent_id -from agentops.llms.providers.base import BaseProvider - - -class LlamaStackClientProvider(BaseProvider): - original_complete = None - original_create_turn = None - - def __init__(self, client): - super().__init__(client) - self._provider_name = "LlamaStack" - - def handle_response( - self, response, kwargs, init_timestamp, session: Optional[Session] = None, metadata: Optional[Dict] = {} - ) -> dict: - """Handle responses for LlamaStack""" - - try: - stack = [] - accum_delta = None - accum_tool_delta = None - # tool_event = None - # llm_event = None - - def handle_stream_chunk(chunk: dict): - nonlocal stack - - # NOTE: prompt/completion usage not returned in response when streaming - - try: - nonlocal accum_delta - - if chunk.event.event_type == "start": - llm_event = LLMEvent(init_timestamp=get_ISO_time(), params=kwargs) - stack.append({"event_type": "start", "event": llm_event}) - accum_delta = chunk.event.delta - elif chunk.event.event_type == "progress": - accum_delta += chunk.event.delta - elif chunk.event.event_type == "complete": - if ( - stack[-1]["event_type"] == "start" - ): # check if the last event in the stack is a step start event - llm_event = stack.pop().get("event") - llm_event.prompt = [ - {"content": message.content, "role": message.role} for message in kwargs["messages"] - ] - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = kwargs["model_id"] - llm_event.prompt_tokens = None - llm_event.completion = accum_delta or kwargs["completion"] - llm_event.completion_tokens = None - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - - except Exception as e: - llm_event = LLMEvent(init_timestamp=init_timestamp, end_timestamp=get_ISO_time(), params=kwargs) - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - chunk = pprint.pformat(chunk) - logger.warning( - f"Unable to parse a chunk for LLM call. Skipping upload to AgentOps\n" - f"chunk:\n {chunk}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - def handle_stream_agent(chunk: dict): - # NOTE: prompt/completion usage not returned in response when streaming - - # nonlocal llm_event - nonlocal stack - - if session is not None: - llm_event.session_id = session.session_id - - try: - if chunk.event.payload.event_type == "turn_start": - logger.debug("turn_start") - stack.append({"event_type": chunk.event.payload.event_type, "event": None}) - elif chunk.event.payload.event_type == "step_start": - logger.debug("step_start") - llm_event = LLMEvent(init_timestamp=get_ISO_time(), params=kwargs) - stack.append({"event_type": chunk.event.payload.event_type, "event": llm_event}) - elif chunk.event.payload.event_type == "step_progress": - if ( - chunk.event.payload.step_type == "inference" - and chunk.event.payload.text_delta_model_response - ): - nonlocal accum_delta - delta = chunk.event.payload.text_delta_model_response - - if accum_delta: - accum_delta += delta - else: - accum_delta = delta - elif chunk.event.payload.step_type == "inference" and chunk.event.payload.tool_call_delta: - if chunk.event.payload.tool_call_delta.parse_status == "started": - logger.debug("tool_started") - tool_event = ToolEvent(init_timestamp=get_ISO_time(), params=kwargs) - tool_event.name = "tool_started" - - stack.append({"event_type": "tool_started", "event": tool_event}) - - elif chunk.event.payload.tool_call_delta.parse_status == "in_progress": - nonlocal accum_tool_delta - delta = chunk.event.payload.tool_call_delta.content - if accum_tool_delta: - accum_tool_delta += delta - else: - accum_tool_delta = delta - elif chunk.event.payload.tool_call_delta.parse_status == "success": - logger.debug("ToolExecution - success") - if ( - stack[-1]["event_type"] == "tool_started" - ): # check if the last event in the stack is a tool execution event - tool_event = stack.pop().get("event") - tool_event.end_timestamp = get_ISO_time() - tool_event.params["completion"] = accum_tool_delta - self._safe_record(session, tool_event) - elif chunk.event.payload.tool_call_delta.parse_status == "failure": - logger.warning("ToolExecution - failure") - if stack[-1]["event_type"] == "ToolExecution - started": - tool_event = stack.pop().get("event") - tool_event.end_timestamp = get_ISO_time() - tool_event.params["completion"] = accum_tool_delta - self._safe_record( - session, - ErrorEvent( - trigger_event=tool_event, exception=Exception("ToolExecution - failure") - ), - ) - - elif chunk.event.payload.event_type == "step_complete": - logger.debug("Step complete event received") - - if chunk.event.payload.step_type == "inference": - logger.debug("Step complete inference") - - if stack[-1]["event_type"] == "step_start": - llm_event = stack.pop().get("event") - llm_event.prompt = [ - {"content": message["content"], "role": message["role"]} - for message in kwargs["messages"] - ] - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = metadata.get("model_id", "Unable to identify model") - llm_event.prompt_tokens = None - llm_event.completion = accum_delta or kwargs["completion"] - llm_event.completion_tokens = None - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - else: - logger.warning("Unexpected event stack state for inference step complete") - elif chunk.event.payload.step_type == "tool_execution": - if stack[-1]["event_type"] == "tool_started": - logger.debug("tool_complete") - tool_event = stack.pop().get("event") - tool_event.name = "tool_complete" - tool_event.params["completion"] = accum_tool_delta - self._safe_record(session, tool_event) - elif chunk.event.payload.event_type == "turn_complete": - if stack[-1]["event_type"] == "turn_start": - logger.debug("turn_start") - pass - - except Exception as e: - llm_event = LLMEvent(init_timestamp=init_timestamp, end_timestamp=get_ISO_time(), params=kwargs) - - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - chunk = pprint.pformat(chunk) - logger.warning( - f"Unable to parse a chunk for LLM call. Skipping upload to AgentOps\n" - f"chunk:\n {chunk}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - if kwargs.get("stream", False): - - def generator(): - for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return generator() - elif inspect.isasyncgen(response): - - async def agent_generator(): - async for chunk in response: - handle_stream_agent(chunk) - yield chunk - - return agent_generator() - elif inspect.isgenerator(response): - - def agent_generator(): - for chunk in response: - handle_stream_agent(chunk) - yield chunk - - return agent_generator() - else: - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - llm_event.session_id = session.session_id - - llm_event.returns = response - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = kwargs["model_id"] - llm_event.prompt = [ - {"content": message.content, "role": message.role} for message in kwargs["messages"] - ] - llm_event.prompt_tokens = None - llm_event.completion = response.completion_message.content - llm_event.completion_tokens = None - llm_event.end_timestamp = get_ISO_time() - - self._safe_record(session, llm_event) - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - kwargs_str = pprint.pformat(kwargs) - response = pprint.pformat(response) - logger.warning( - f"Unable to parse response for LLM call. Skipping upload to AgentOps\n" - f"response:\n {response}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - return response - - def _override_complete(self): - from llama_stack_client.resources import InferenceResource - - global original_complete - original_complete = InferenceResource.chat_completion - - def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - result = original_complete(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - InferenceResource.chat_completion = patched_function - - def _override_create_turn(self): - from llama_stack_client.lib.agents.agent import Agent - - self.original_create_turn = Agent.create_turn - - def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - - result = self.original_create_turn(*args, **kwargs) - return self.handle_response( - result, - kwargs, - init_timestamp, - session=session, - metadata={"model_id": args[0].agent_config.get("model")}, - ) - - # Override the original method with the patched one - Agent.create_turn = patched_function - - def override(self): - self._override_complete() - self._override_create_turn() - - def undo_override(self): - if self.original_complete is not None: - from llama_stack_client.resources import InferenceResource - - InferenceResource.chat_completion = self.original_complete - - if self.original_create_turn is not None: - from llama_stack_client.lib.agents.agent import Agent - - Agent.create_turn = self.original_create_turn diff --git a/agentops/llms/providers/mistral.py b/agentops/llms/providers/mistral.py deleted file mode 100644 index 83f090cf0..000000000 --- a/agentops/llms/providers/mistral.py +++ /dev/null @@ -1,215 +0,0 @@ -import inspect -import pprint -import sys -from typing import Optional - -from agentops.event import LLMEvent, ErrorEvent -from agentops.session import Session -from agentops.log_config import logger -from agentops.helpers import get_ISO_time, check_call_stack_for_agent_id -from .base import BaseProvider - - -class MistralProvider(BaseProvider): - original_complete = None - original_complete_async = None - original_stream = None - original_stream_async = None - - def __init__(self, client): - super().__init__(client) - self._provider_name = "Mistral" - - def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict: - """Handle responses for Mistral""" - from mistralai import Chat - from mistralai.types import UNSET, UNSET_SENTINEL - - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - llm_event.session_id = session.session_id - - def handle_stream_chunk(chunk: dict): - # NOTE: prompt/completion usage not returned in response when streaming - # We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion - if llm_event.returns is None: - llm_event.returns = chunk.data - - try: - accumulated_delta = llm_event.returns.choices[0].delta - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = "mistral/" + chunk.data.model - llm_event.prompt = kwargs["messages"] - - # NOTE: We assume for completion only choices[0] is relevant - choice = chunk.data.choices[0] - - if choice.delta.content: - accumulated_delta.content += choice.delta.content - - if choice.delta.role: - accumulated_delta.role = choice.delta.role - - # Check if tool_calls is Unset and set to None if it is - if choice.delta.tool_calls in (UNSET, UNSET_SENTINEL): - accumulated_delta.tool_calls = None - elif choice.delta.tool_calls: - accumulated_delta.tool_calls = choice.delta.tool_calls - - if choice.finish_reason: - # Streaming is done. Record LLMEvent - llm_event.returns.choices[0].finish_reason = choice.finish_reason - llm_event.completion = { - "role": accumulated_delta.role, - "content": accumulated_delta.content, - "tool_calls": accumulated_delta.tool_calls, - } - llm_event.prompt_tokens = chunk.data.usage.prompt_tokens - llm_event.completion_tokens = chunk.data.usage.completion_tokens - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - chunk = pprint.pformat(chunk) - logger.warning( - f"Unable to parse a chunk for LLM call. Skipping upload to AgentOps\n" - f"chunk:\n {chunk}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - # if the response is a generator, decorate the generator - if inspect.isgenerator(response): - - def generator(): - for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return generator() - - elif inspect.isasyncgen(response): - - async def async_generator(): - async for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return async_generator() - - try: - llm_event.returns = response - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = "mistral/" + response.model - llm_event.prompt = kwargs["messages"] - llm_event.prompt_tokens = response.usage.prompt_tokens - llm_event.completion = response.choices[0].message.model_dump() - llm_event.completion_tokens = response.usage.completion_tokens - llm_event.end_timestamp = get_ISO_time() - - self._safe_record(session, llm_event) - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - kwargs_str = pprint.pformat(kwargs) - response = pprint.pformat(response) - logger.warning( - f"Unable to parse response for LLM call. Skipping upload to AgentOps\n" - f"response:\n {response}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - return response - - def _override_complete(self): - from mistralai import Chat - - global original_complete - original_complete = Chat.complete - - def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - result = original_complete(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - Chat.complete = patched_function - - def _override_complete_async(self): - from mistralai import Chat - - global original_complete_async - original_complete_async = Chat.complete_async - - async def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - result = await original_complete_async(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - Chat.complete_async = patched_function - - def _override_stream(self): - from mistralai import Chat - - global original_stream - original_stream = Chat.stream - - def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - result = original_stream(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - Chat.stream = patched_function - - def _override_stream_async(self): - from mistralai import Chat - - global original_stream_async - original_stream_async = Chat.stream_async - - async def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - result = await original_stream_async(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - Chat.stream_async = patched_function - - def override(self): - self._override_complete() - self._override_complete_async() - self._override_stream() - self._override_stream_async() - - def undo_override(self): - if ( - self.original_complete is not None - and self.original_complete_async is not None - and self.original_stream is not None - and self.original_stream_async is not None - ): - from mistralai import Chat - - Chat.complete = self.original_complete - Chat.complete_async = self.original_complete_async - Chat.stream = self.original_stream - Chat.stream_async = self.original_stream_async diff --git a/agentops/llms/providers/ollama.py b/agentops/llms/providers/ollama.py deleted file mode 100644 index ce2a7fc8b..000000000 --- a/agentops/llms/providers/ollama.py +++ /dev/null @@ -1,126 +0,0 @@ -import inspect -import sys -from typing import Optional - -from agentops.event import LLMEvent -from agentops.session import Session -from agentops.helpers import get_ISO_time, check_call_stack_for_agent_id -from .base import BaseProvider -from agentops.singleton import singleton - -original_func = {} - - -@singleton -class OllamaProvider(BaseProvider): - original_create = None - original_create_async = None - - def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict: - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - llm_event.session_id = session.session_id - - def handle_stream_chunk(chunk: dict): - message = chunk.get("message", {"role": None, "content": ""}) - - if chunk.get("done"): - llm_event.end_timestamp = get_ISO_time() - llm_event.model = f"ollama/{chunk.get('model')}" - llm_event.returns = chunk - llm_event.returns["message"] = llm_event.completion - llm_event.prompt = kwargs["messages"] - llm_event.agent_id = check_call_stack_for_agent_id() - self._safe_record(session, llm_event) - - if llm_event.completion is None: - llm_event.completion = { - "role": message.get("role"), - "content": message.get("content", ""), - "tool_calls": None, - "function_call": None, - } - else: - llm_event.completion["content"] += message.get("content", "") - - if inspect.isgenerator(response): - - def generator(): - for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return generator() - - llm_event.end_timestamp = get_ISO_time() - llm_event.model = f"ollama/{response['model']}" - llm_event.returns = response - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.prompt = kwargs["messages"] - llm_event.completion = { - "role": response["message"].get("role"), - "content": response["message"].get("content", ""), - "tool_calls": None, - "function_call": None, - } - self._safe_record(session, llm_event) - return response - - def override(self): - self._override_chat_client() - self._override_chat() - self._override_chat_async_client() - - def undo_override(self): - if original_func is not None and original_func != {}: - import ollama - - ollama.chat = original_func["ollama.chat"] - ollama.Client.chat = original_func["ollama.Client.chat"] - ollama.AsyncClient.chat = original_func["ollama.AsyncClient.chat"] - - def __init__(self, client): - super().__init__(client) - - def _override_chat(self): - import ollama - - original_func["ollama.chat"] = ollama.chat - - def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - result = original_func["ollama.chat"](*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=kwargs.get("session", None)) - - # Override the original method with the patched one - ollama.chat = patched_function - - def _override_chat_client(self): - from ollama import Client - - original_func["ollama.Client.chat"] = Client.chat - - def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - result = original_func["ollama.Client.chat"](*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=kwargs.get("session", None)) - - # Override the original method with the patched one - Client.chat = patched_function - - def _override_chat_async_client(self): - from ollama import AsyncClient - - original_func = {} - original_func["ollama.AsyncClient.chat"] = AsyncClient.chat - - async def patched_function(*args, **kwargs): - # Call the original function with its original arguments - init_timestamp = get_ISO_time() - result = await original_func["ollama.AsyncClient.chat"](*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=kwargs.get("session", None)) - - # Override the original method with the patched one - AsyncClient.chat = patched_function diff --git a/agentops/llms/providers/openai.py b/agentops/llms/providers/openai.py deleted file mode 100644 index 171b39fe1..000000000 --- a/agentops/llms/providers/openai.py +++ /dev/null @@ -1,344 +0,0 @@ -import pprint -from typing import Optional - -from agentops.llms.providers.base import BaseProvider -from agentops.time_travel import fetch_completion_override_from_time_travel_cache - -from agentops.event import ActionEvent, ErrorEvent, LLMEvent -from agentops.session import Session -from agentops.log_config import logger -from agentops.helpers import check_call_stack_for_agent_id, get_ISO_time -from agentops.singleton import singleton - - -@singleton -class OpenAiProvider(BaseProvider): - original_create = None - original_create_async = None - original_assistant_methods = None - assistants_run_steps = {} - - def __init__(self, client): - super().__init__(client) - self._provider_name = "OpenAI" - - def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict: - """Handle responses for OpenAI versions >v1.0.0""" - from openai import AsyncStream, Stream - from openai.resources import AsyncCompletions - from openai.types.chat import ChatCompletionChunk - - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - llm_event.session_id = session.session_id - - def handle_stream_chunk(chunk: ChatCompletionChunk): - # NOTE: prompt/completion usage not returned in response when streaming - # We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion - if llm_event.returns == None: - llm_event.returns = chunk - - try: - accumulated_delta = llm_event.returns.choices[0].delta - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = chunk.model - llm_event.prompt = kwargs["messages"] - - # NOTE: We assume for completion only choices[0] is relevant - choice = chunk.choices[0] - - if choice.delta.content: - accumulated_delta.content += choice.delta.content - - if choice.delta.role: - accumulated_delta.role = choice.delta.role - - if choice.delta.tool_calls: - accumulated_delta.tool_calls = choice.delta.tool_calls - - if choice.delta.function_call: - accumulated_delta.function_call = choice.delta.function_call - - if choice.finish_reason: - # Streaming is done. Record LLMEvent - llm_event.returns.choices[0].finish_reason = choice.finish_reason - llm_event.completion = { - "role": accumulated_delta.role, - "content": accumulated_delta.content, - "function_call": accumulated_delta.function_call, - "tool_calls": accumulated_delta.tool_calls, - } - llm_event.end_timestamp = get_ISO_time() - - self._safe_record(session, llm_event) - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - chunk = pprint.pformat(chunk) - logger.warning( - f"Unable to parse a chunk for LLM call. Skipping upload to AgentOps\n" - f"chunk:\n {chunk}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - # if the response is a generator, decorate the generator - if isinstance(response, Stream): - - def generator(): - for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return generator() - - # For asynchronous AsyncStream - elif isinstance(response, AsyncStream): - - async def async_generator(): - async for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return async_generator() - - # For async AsyncCompletion - elif isinstance(response, AsyncCompletions): - - async def async_generator(): - async for chunk in response: - handle_stream_chunk(chunk) - yield chunk - - return async_generator() - - # v1.0.0+ responses are objects - try: - llm_event.returns = response - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.prompt = kwargs["messages"] - llm_event.prompt_tokens = response.usage.prompt_tokens - llm_event.completion = response.choices[0].message.model_dump() - llm_event.completion_tokens = response.usage.completion_tokens - llm_event.model = response.model - - self._safe_record(session, llm_event) - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - response = pprint.pformat(response) - logger.warning( - f"Unable to parse response for LLM call. Skipping upload to AgentOps\n" - f"response:\n {response}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - return response - - def handle_assistant_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict: - """Handle response based on return type""" - from openai.pagination import BasePage - - action_event = ActionEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - action_event.session_id = session.session_id - - try: - # Set action type and returns - action_event.action_type = ( - response.__class__.__name__.split("[")[1][:-1] - if isinstance(response, BasePage) - else response.__class__.__name__ - ) - action_event.returns = response.model_dump() if hasattr(response, "model_dump") else response - action_event.end_timestamp = get_ISO_time() - self._safe_record(session, action_event) - - # Create LLMEvent if usage data exists - response_dict = response.model_dump() if hasattr(response, "model_dump") else {} - - if "id" in response_dict and response_dict.get("id").startswith("run"): - if response_dict["id"] not in self.assistants_run_steps: - self.assistants_run_steps[response_dict.get("id")] = {"model": response_dict.get("model")} - - if "usage" in response_dict and response_dict["usage"] is not None: - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - llm_event.session_id = session.session_id - - llm_event.model = response_dict.get("model") - llm_event.prompt_tokens = response_dict["usage"]["prompt_tokens"] - llm_event.completion_tokens = response_dict["usage"]["completion_tokens"] - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - - elif "data" in response_dict: - for item in response_dict["data"]: - if "usage" in item and item["usage"] is not None: - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - if session is not None: - llm_event.session_id = session.session_id - - llm_event.model = self.assistants_run_steps[item["run_id"]]["model"] - llm_event.prompt_tokens = item["usage"]["prompt_tokens"] - llm_event.completion_tokens = item["usage"]["completion_tokens"] - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - - except Exception as e: - self._safe_record(session, ErrorEvent(trigger_event=action_event, exception=e)) - - kwargs_str = pprint.pformat(kwargs) - response = pprint.pformat(response) - logger.warning( - f"Unable to parse response for Assistants API. Skipping upload to AgentOps\n" - f"response:\n {response}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - return response - - def override(self): - self._override_openai_v1_completion() - self._override_openai_v1_async_completion() - self._override_openai_assistants_beta() - - def _override_openai_v1_completion(self): - from openai.resources.chat import completions - from openai.types.chat import ChatCompletion, ChatCompletionChunk - - # Store the original method - self.original_create = completions.Completions.create - - def patched_function(*args, **kwargs): - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - - completion_override = fetch_completion_override_from_time_travel_cache(kwargs) - if completion_override: - result_model = None - pydantic_models = (ChatCompletion, ChatCompletionChunk) - for pydantic_model in pydantic_models: - try: - result_model = pydantic_model.model_validate_json(completion_override) - break - except Exception as e: - pass - - if result_model is None: - logger.error( - f"Time Travel: Pydantic validation failed for {pydantic_models} \n" - f"Time Travel: Completion override was:\n" - f"{pprint.pformat(completion_override)}" - ) - return None - return self.handle_response(result_model, kwargs, init_timestamp, session=session) - - # prompt_override = fetch_prompt_override_from_time_travel_cache(kwargs) - # if prompt_override: - # kwargs["messages"] = prompt_override["messages"] - - # Call the original function with its original arguments - result = self.original_create(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - completions.Completions.create = patched_function - - def _override_openai_v1_async_completion(self): - from openai.resources.chat import completions - from openai.types.chat import ChatCompletion, ChatCompletionChunk - - # Store the original method - self.original_create_async = completions.AsyncCompletions.create - - async def patched_function(*args, **kwargs): - init_timestamp = get_ISO_time() - - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - - completion_override = fetch_completion_override_from_time_travel_cache(kwargs) - if completion_override: - result_model = None - pydantic_models = (ChatCompletion, ChatCompletionChunk) - for pydantic_model in pydantic_models: - try: - result_model = pydantic_model.model_validate_json(completion_override) - break - except Exception as e: - pass - - if result_model is None: - logger.error( - f"Time Travel: Pydantic validation failed for {pydantic_models} \n" - f"Time Travel: Completion override was:\n" - f"{pprint.pformat(completion_override)}" - ) - return None - return self.handle_response(result_model, kwargs, init_timestamp, session=session) - - # prompt_override = fetch_prompt_override_from_time_travel_cache(kwargs) - # if prompt_override: - # kwargs["messages"] = prompt_override["messages"] - - # Call the original function with its original arguments - result = await self.original_create_async(*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp, session=session) - - # Override the original method with the patched one - completions.AsyncCompletions.create = patched_function - - def _override_openai_assistants_beta(self): - """Override OpenAI Assistants API methods""" - from openai._legacy_response import LegacyAPIResponse - from openai.resources import beta - - def create_patched_function(original_func): - def patched_function(*args, **kwargs): - init_timestamp = get_ISO_time() - - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - - response = original_func(*args, **kwargs) - if isinstance(response, LegacyAPIResponse): - return response - - return self.handle_assistant_response(response, kwargs, init_timestamp, session=session) - - return patched_function - - # Store and patch Assistant API methods - assistant_api_methods = { - beta.Assistants: ["create", "retrieve", "update", "delete", "list"], - beta.Threads: ["create", "retrieve", "update", "delete"], - beta.threads.Messages: ["create", "retrieve", "update", "list"], - beta.threads.Runs: ["create", "retrieve", "update", "list", "submit_tool_outputs", "cancel"], - beta.threads.runs.steps.Steps: ["retrieve", "list"], - } - - self.original_assistant_methods = { - (cls, method): getattr(cls, method) for cls, methods in assistant_api_methods.items() for method in methods - } - - # Override methods and verify - for (cls, method), original_func in self.original_assistant_methods.items(): - patched_function = create_patched_function(original_func) - setattr(cls, method, patched_function) - - def undo_override(self): - if self.original_create is not None and self.original_create_async is not None: - from openai.resources.chat import completions - - completions.AsyncCompletions.create = self.original_create_async - completions.Completions.create = self.original_create - - if self.original_assistant_methods is not None: - for (cls, method), original in self.original_assistant_methods.items(): - setattr(cls, method, original) diff --git a/agentops/llms/providers/taskweaver.py b/agentops/llms/providers/taskweaver.py deleted file mode 100644 index ae2fda1c0..000000000 --- a/agentops/llms/providers/taskweaver.py +++ /dev/null @@ -1,146 +0,0 @@ -import pprint -from typing import Optional -import json - -from agentops.event import ErrorEvent, LLMEvent, ActionEvent -from agentops.session import Session -from agentops.log_config import logger -from agentops.helpers import get_ISO_time, check_call_stack_for_agent_id -from agentops.llms.providers.base import BaseProvider -from agentops.singleton import singleton - - -@singleton -class TaskWeaverProvider(BaseProvider): - original_chat_completion = None - - def __init__(self, client): - super().__init__(client) - self._provider_name = "TaskWeaver" - self.client.add_default_tags(["taskweaver"]) - - def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict: - """Handle responses for TaskWeaver""" - llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) - action_event = ActionEvent(init_timestamp=init_timestamp) - - try: - response_dict = response.get("response", {}) - - action_event.params = kwargs.get("json_schema", None) - action_event.returns = response_dict - action_event.end_timestamp = get_ISO_time() - self._safe_record(session, action_event) - except Exception as e: - error_event = ErrorEvent( - trigger_event=action_event, exception=e, details={"response": str(response), "kwargs": str(kwargs)} - ) - self._safe_record(session, error_event) - kwargs_str = pprint.pformat(kwargs) - response_str = pprint.pformat(response) - logger.error( - f"Unable to parse response for Action call. Skipping upload to AgentOps\n" - f"response:\n {response_str}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - try: - llm_event.init_timestamp = init_timestamp - llm_event.params = kwargs - llm_event.returns = response_dict - llm_event.agent_id = check_call_stack_for_agent_id() - llm_event.model = kwargs.get("model", "unknown") - llm_event.prompt = kwargs.get("messages") - llm_event.completion = response_dict.get("message", "") - llm_event.end_timestamp = get_ISO_time() - self._safe_record(session, llm_event) - - except Exception as e: - error_event = ErrorEvent( - trigger_event=llm_event, exception=e, details={"response": str(response), "kwargs": str(kwargs)} - ) - self._safe_record(session, error_event) - kwargs_str = pprint.pformat(kwargs) - response_str = pprint.pformat(response) - logger.error( - f"Unable to parse response for LLM call. Skipping upload to AgentOps\n" - f"response:\n {response_str}\n" - f"kwargs:\n {kwargs_str}\n" - ) - - return response - - def override(self): - """Override TaskWeaver's chat completion methods""" - try: - from taskweaver.llm import llm_completion_config_map - - def create_patched_chat_completion(original_method): - """Create a new patched chat_completion function with bound original method""" - - def patched_chat_completion(service, *args, **kwargs): - init_timestamp = get_ISO_time() - session = kwargs.get("session", None) - if "session" in kwargs.keys(): - del kwargs["session"] - - result = original_method(service, *args, **kwargs) - kwargs.update( - { - "model": self._get_model_name(service), - "messages": args[0], - "stream": args[1], - "temperature": args[2], - "max_tokens": args[3], - "top_p": args[4], - "stop": args[5], - } - ) - - if kwargs["stream"]: - accumulated_content = "" - for chunk in result: - if isinstance(chunk, dict) and "content" in chunk: - accumulated_content += chunk["content"] - else: - accumulated_content += chunk - yield chunk - accumulated_content = json.loads(accumulated_content) - return self.handle_response(accumulated_content, kwargs, init_timestamp, session=session) - else: - return self.handle_response(result, kwargs, init_timestamp, session=session) - - return patched_chat_completion - - for service_name, service_class in llm_completion_config_map.items(): - if not hasattr(service_class, "_original_chat_completion"): - service_class._original_chat_completion = service_class.chat_completion - service_class.chat_completion = create_patched_chat_completion( - service_class._original_chat_completion - ) - - except Exception as e: - logger.error(f"Failed to patch method: {str(e)}", exc_info=True) - - def undo_override(self): - """Restore original TaskWeaver chat completion methods""" - try: - from taskweaver.llm import llm_completion_config_map - - for service_name, service_class in llm_completion_config_map.items(): - service_class.chat_completion = service_class._original_chat_completion - delattr(service_class, "_original_chat_completion") - - except Exception as e: - logger.error(f"Failed to restore original method: {str(e)}", exc_info=True) - - def _get_model_name(self, service) -> str: - """Extract model name from service instance""" - model_name = "unknown" - if hasattr(service, "config"): - config = service.config - if hasattr(config, "model"): - model_name = config.model or "unknown" - elif hasattr(config, "llm_module_config") and hasattr(config.llm_module_config, "model"): - model_name = config.llm_module_config.model or "unknown" - return model_name diff --git a/agentops/llms/tracker.py b/agentops/llms/tracker.py deleted file mode 100644 index 4ce5b9841..000000000 --- a/agentops/llms/tracker.py +++ /dev/null @@ -1,285 +0,0 @@ -import inspect -import sys -from importlib import import_module -from importlib.metadata import version - -from packaging.version import Version, parse - -from ..log_config import logger - -from .providers.cohere import CohereProvider -from .providers.groq import GroqProvider -from .providers.litellm import LiteLLMProvider -from .providers.ollama import OllamaProvider -from .providers.openai import OpenAiProvider -from .providers.anthropic import AnthropicProvider -from .providers.mistral import MistralProvider -from .providers.ai21 import AI21Provider -from .providers.llama_stack_client import LlamaStackClientProvider -from .providers.taskweaver import TaskWeaverProvider -from .providers.gemini import GeminiProvider - -original_func = {} -original_create = None -original_create_async = None - - -class LlmTracker: - SUPPORTED_APIS = { - "google.generativeai": { - "0.1.0": ("GenerativeModel.generate_content", "GenerativeModel.generate_content_stream"), - }, - "litellm": {"1.3.1": ("openai_chat_completions.completion",)}, - "openai": { - "1.0.0": ( - "chat.completions.create", - # Assistants - "beta.assistants.create", - "beta.assistants.retrieve", - "beta.assistants.update", - "beta.assistants.delete", - "beta.assistants.list", - "beta.assistants.files.create", - "beta.assistants.files.retrieve", - "beta.assistants.files.delete", - "beta.assistants.files.list", - # Threads - "beta.threads.create", - "beta.threads.retrieve", - "beta.threads.update", - "beta.threads.delete", - # Messages - "beta.threads.messages.create", - "beta.threads.messages.retrieve", - "beta.threads.messages.update", - "beta.threads.messages.list", - "beta.threads.messages.files.retrieve", - "beta.threads.messages.files.list", - # Runs - "beta.threads.runs.create", - "beta.threads.runs.retrieve", - "beta.threads.runs.update", - "beta.threads.runs.list", - "beta.threads.runs.cancel", - "beta.threads.runs.submit_tool_outputs", - # Run Steps - "beta.threads.runs.steps.Steps.retrieve", - "beta.threads.runs.steps.Steps.list", - ), - "0.0.0": ( - "ChatCompletion.create", - "ChatCompletion.acreate", - ), - }, - "cohere": { - "5.4.0": ("chat", "chat_stream"), - }, - "ollama": {"0.0.1": ("chat", "Client.chat", "AsyncClient.chat")}, - "llama_stack_client": { - "0.0.53": ("resources.InferenceResource.chat_completion", "lib.agents.agent.Agent.create_turn"), - }, - "groq": { - "0.9.0": ("Client.chat", "AsyncClient.chat"), - }, - "anthropic": { - "0.32.0": ("completions.create",), - }, - "mistralai": { - "1.0.1": ("chat.complete", "chat.stream"), - }, - "ai21": { - "2.0.0": ( - "chat.completions.create", - "client.answer.create", - ), - }, - "taskweaver": { - "0.0.1": ("chat_completion", "chat_completion_stream"), - }, - } - - def __init__(self, client): - self.client = client - self.litellm_initialized = False - - def _is_litellm_call(self): - """ - Detects if the API call originated from LiteLLM. - - **Issue We Are Addressing:** - - When using LiteLLM, it internally calls OpenAI methods, which results in OpenAI being initialized by default. - - This creates an issue where OpenAI is tracked as the primary provider, even when the request was routed via LiteLLM. - - We need to ensure that OpenAI is only tracked if it was explicitly used and **not** invoked indirectly through LiteLLM. - - **How This Works:** - - The function checks the call stack (execution history) to determine the order in which modules were called. - - If LiteLLM appears in the call stack **before** OpenAI, then OpenAI was invoked via LiteLLM, meaning we should ignore OpenAI. - - If OpenAI appears first without LiteLLM, then OpenAI was used directly, and we should track it as expected. - - **Return Value:** - - Returns `True` if the API call originated from LiteLLM. - - Returns `False` if OpenAI was directly called without going through LiteLLM. - """ - - stack = inspect.stack() - - litellm_seen = False # Track if LiteLLM was encountered in the stack - openai_seen = False # Track if OpenAI was encountered in the stack - - for frame in stack: - module = inspect.getmodule(frame.frame) - - module_name = module.__name__ if module else None - - filename = frame.filename.lower() - - if module_name and "litellm" in module_name or "litellm" in filename: - litellm_seen = True - - if module_name and "openai" in module_name or "openai" in filename: - openai_seen = True - - # If OpenAI is seen **before** LiteLLM, it means OpenAI was used directly, so return False - if not litellm_seen: - return False - - # If LiteLLM was seen at any point before OpenAI, return True (indicating an indirect OpenAI call via LiteLLM) - return litellm_seen - - def override_api(self): - """ - Overrides key methods of the specified API to record events. - """ - for api in self.SUPPORTED_APIS: - if api in sys.modules: - module = import_module(api) - - if api == "litellm": - module_version = version(api) - if module_version is None: - logger.warning("Cannot determine LiteLLM version. Only LiteLLM>=1.3.1 supported.") - - if Version(module_version) >= parse("1.3.1"): - provider = LiteLLMProvider(self.client) - provider.override() - self.litellm_initialized = True - else: - logger.warning(f"Only LiteLLM>=1.3.1 supported. v{module_version} found.") - - if api == "openai": - # Patch openai v1.0.0+ methods - # Ensure OpenAI is only initialized if it was NOT called inside LiteLLM - if not self._is_litellm_call(): - if hasattr(module, "__version__"): - module_version = parse(module.__version__) - if module_version >= parse("1.0.0"): - provider = OpenAiProvider(self.client) - provider.override() - else: - raise DeprecationWarning( - "OpenAI versions < 0.1 are no longer supported by AgentOps. Please upgrade OpenAI or " - "downgrade AgentOps to <=0.3.8." - ) - - if api == "cohere": - # Patch cohere v5.4.0+ methods - module_version = version(api) - if module_version is None: - logger.warning("Cannot determine Cohere version. Only Cohere>=5.4.0 supported.") - - if Version(module_version) >= parse("5.4.0"): - provider = CohereProvider(self.client) - provider.override() - else: - logger.warning(f"Only Cohere>=5.4.0 supported. v{module_version} found.") - - if api == "ollama": - module_version = version(api) - - if Version(module_version) >= parse("0.0.1"): - provider = OllamaProvider(self.client) - provider.override() - else: - logger.warning(f"Only Ollama>=0.0.1 supported. v{module_version} found.") - - if api == "groq": - module_version = version(api) - - if Version(module_version) >= parse("0.9.0"): - provider = GroqProvider(self.client) - provider.override() - else: - logger.warning(f"Only Groq>=0.9.0 supported. v{module_version} found.") - - if api == "anthropic": - module_version = version(api) - - if module_version is None: - logger.warning("Cannot determine Anthropic version. Only Anthropic>=0.32.0 supported.") - - if Version(module_version) >= parse("0.32.0"): - provider = AnthropicProvider(self.client) - provider.override() - else: - logger.warning(f"Only Anthropic>=0.32.0 supported. v{module_version} found.") - - if api == "mistralai": - module_version = version(api) - - if Version(module_version) >= parse("1.0.1"): - provider = MistralProvider(self.client) - provider.override() - else: - logger.warning(f"Only MistralAI>=1.0.1 supported. v{module_version} found.") - - if api == "ai21": - module_version = version(api) - - if module_version is None: - logger.warning("Cannot determine AI21 version. Only AI21>=2.0.0 supported.") - - if Version(module_version) >= parse("2.0.0"): - provider = AI21Provider(self.client) - provider.override() - else: - logger.warning(f"Only AI21>=2.0.0 supported. v{module_version} found.") - - if api == "llama_stack_client": - module_version = version(api) - - if Version(module_version) >= parse("0.0.53"): - provider = LlamaStackClientProvider(self.client) - provider.override() - else: - logger.warning(f"Only LlamaStackClient>=0.0.53 supported. v{module_version} found.") - - if api == "taskweaver": - module_version = version(api) - - if Version(module_version) >= parse("0.0.1"): - provider = TaskWeaverProvider(self.client) - provider.override() - else: - logger.warning(f"Only TaskWeaver>=0.0.1 supported. v{module_version} found.") - - if api == "google.generativeai": - module_version = version(api) - - if Version(module_version) >= parse("0.1.0"): - provider = GeminiProvider(self.client) - provider.override() - else: - logger.warning(f"Only google.generativeai>=0.1.0 supported. v{module_version} found.") - - def stop_instrumenting(self): - OpenAiProvider(self.client).undo_override() - GroqProvider(self.client).undo_override() - CohereProvider(self.client).undo_override() - LiteLLMProvider(self.client).undo_override() - OllamaProvider(self.client).undo_override() - AnthropicProvider(self.client).undo_override() - MistralProvider(self.client).undo_override() - AI21Provider(self.client).undo_override() - LlamaStackClientProvider(self.client).undo_override() - TaskWeaverProvider(self.client).undo_override() - GeminiProvider(self.client).undo_override() From df021ebc983eb991f5e4101a555f1c5e850edab1 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:13:36 +0200 Subject: [PATCH 11/45] cleanup tests/ from legacy Signed-off-by: Teo --- .../_test_langchain_handler.py | 104 --- tests/unit/test_agent.py | 252 ------- tests/unit/test_canary.py | 34 - tests/unit/test_patcher.py | 60 -- tests/unit/test_pre_init.py | 51 -- tests/unit/test_record_action.py | 235 ------- tests/unit/test_record_tool.py | 232 ------- tests/unit/test_session.py | 621 ------------------ tests/unit/test_singleton.py | 39 -- tests/unit/test_teardown.py | 12 - tests/unit/test_time_travel.py | 35 - 11 files changed, 1675 deletions(-) delete mode 100644 tests/langchain_handlers/_test_langchain_handler.py delete mode 100644 tests/unit/test_agent.py delete mode 100644 tests/unit/test_canary.py delete mode 100644 tests/unit/test_patcher.py delete mode 100644 tests/unit/test_pre_init.py delete mode 100644 tests/unit/test_record_action.py delete mode 100644 tests/unit/test_record_tool.py delete mode 100644 tests/unit/test_session.py delete mode 100644 tests/unit/test_singleton.py delete mode 100644 tests/unit/test_teardown.py delete mode 100644 tests/unit/test_time_travel.py diff --git a/tests/langchain_handlers/_test_langchain_handler.py b/tests/langchain_handlers/_test_langchain_handler.py deleted file mode 100644 index 53652e7d3..000000000 --- a/tests/langchain_handlers/_test_langchain_handler.py +++ /dev/null @@ -1,104 +0,0 @@ -import asyncio -import os -from langchain_openai import ChatOpenAI -from langchain_core.prompts import ChatPromptTemplate -from langchain.agents import tool, AgentExecutor, create_openai_tools_agent -from dotenv import load_dotenv -from agentops.partners.langchain_callback_handler import ( - LangchainCallbackHandler as AgentOpsLangchainCallbackHandler, - AsyncLangchainCallbackHandler as AgentOpsAsyncLangchainCallbackHandler, -) - -load_dotenv() - -AGENTOPS_API_KEY = os.environ.get("AGENTOPS_API_KEY") -OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") - - -# Sync test -def run_sync_test(): - agentops_handler = AgentOpsLangchainCallbackHandler( - api_key=AGENTOPS_API_KEY, default_tags=["Langchain", "Sync Handler Test"] - ) - - llm = ChatOpenAI( - openai_api_key=OPENAI_API_KEY, - callbacks=[agentops_handler], - model="gpt-4o-mini", - streaming=False, # Disable streaming for sync handler - ) - - @tool - def find_movie(genre) -> str: - """Find available movies""" - if genre == "drama": - return "Dune 2" - else: - return "Pineapple Express" - - tools = [find_movie] - for t in tools: - t.callbacks = [agentops_handler] - - prompt = ChatPromptTemplate.from_messages( - [ - ("system", "You are a helpful assistant. Respond only in Spanish."), - ("user", "{input}"), - ("system", "Here is the current conversation state:\n{agent_scratchpad}"), - ] - ) - - agent = create_openai_tools_agent(llm, tools, prompt) - agent_executor = AgentExecutor(agent=agent, tools=tools, callbacks=[agentops_handler]) - - return agent_executor.invoke({"input": "What comedies are playing?"}) - - -# Async test -async def run_async_test(): - agentops_handler = AgentOpsAsyncLangchainCallbackHandler( - api_key=AGENTOPS_API_KEY, default_tags=["Langchain", "Async Handler Test"] - ) - - llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY, callbacks=[agentops_handler], model="gpt-4o-mini", streaming=True) - - @tool - def find_movie(genre) -> str: - """Find available movies""" - if genre == "drama": - return "Dune 2" - else: - return "Pineapple Express" - - tools = [find_movie] - for t in tools: - t.callbacks = [agentops_handler] - - prompt = ChatPromptTemplate.from_messages( - [ - ("system", "You are a helpful assistant. Respond only in Spanish."), - ("user", "{input}"), - ("system", "Here is the current conversation state:\n{agent_scratchpad}"), - ] - ) - - agent = create_openai_tools_agent(llm, tools, prompt) - agent_executor = AgentExecutor(agent=agent, tools=tools, callbacks=[agentops_handler]) - - return await agent_executor.ainvoke({"input": "What comedies are playing?"}) - - -async def main(): - # Run sync test - print("Running sync test...") - sync_result = run_sync_test() - print(f"Sync test result: {sync_result}\n") - - # Run async test - print("Running async test...") - async_result = await run_async_test() - print(f"Async test result: {async_result}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/tests/unit/test_agent.py b/tests/unit/test_agent.py deleted file mode 100644 index 6691b166a..000000000 --- a/tests/unit/test_agent.py +++ /dev/null @@ -1,252 +0,0 @@ -from unittest import TestCase -from uuid import uuid4 -from typing import Optional - -from agentops import track_agent -from agentops.descriptor import agentops_property - - -class TrackAgentTests(TestCase): - def test_track_agent_with_class(self): - @track_agent(name="agent_name") - class TestAgentClass: - t = "a" - pass - - obj = TestAgentClass() - self.assertTrue(isinstance(obj, TestAgentClass)) - self.assertEqual(getattr(obj, "agentops_agent_name", None), "agent_name") - self.assertIsNotNone(getattr(obj, "agentops_agent_id", None)) - - def test_track_agent_with_class_name(self): - @track_agent(name="agent_name") - class TestAgentClass: - t = "a" - pass - - obj = TestAgentClass(agentops_name="agent1") - self.assertTrue(isinstance(obj, TestAgentClass)) - self.assertEqual(getattr(obj, "agentops_agent_name"), "agent1") - self.assertIsNotNone(getattr(obj, "agentops_agent_id")) - - def test_track_agent_with_post_init_name_assignment(self): - """Test setting agentops_agent_name after initialization""" - - @track_agent() - class TestAgentClass: - def __init__(self): - self.role = "test_role" - # Simulate post_init behavior like in CrewAI - self.agentops_agent_name = self.role - - obj = TestAgentClass() - self.assertEqual(getattr(obj, "agentops_agent_name"), "test_role") - self.assertIsNotNone(getattr(obj, "agentops_agent_id")) - - def test_track_agent_with_property_override(self): - """Test overriding agentops properties after initialization""" - - @track_agent() - class TestAgentClass: - def __init__(self): - self.role = "initial_role" - self.agentops_agent_name = self.role - - @property - def role(self): - return self._role - - @role.setter - def role(self, value): - self._role = value - # Update agentops_agent_name when role changes - if hasattr(self, "agentops_agent_name"): - self.agentops_agent_name = value - - # Test initial setting - obj = TestAgentClass() - self.assertEqual(getattr(obj, "agentops_agent_name"), "initial_role") - - # Test property update - obj.role = "updated_role" - self.assertEqual(getattr(obj, "agentops_agent_name"), "updated_role") - self.assertIsNotNone(getattr(obj, "agentops_agent_id")) - - def test_track_agent_with_none_values(self): - """Test handling of None values for agentops properties""" - - @track_agent() - class TestAgentClass: - def __init__(self): - self.role = None - self.agentops_agent_name = None - self._model_validate() - - def _model_validate(self): - # Simulate setting name after validation - if self.role is not None: - self.agentops_agent_name = self.role - - # Test initialization with None - obj = TestAgentClass() - self.assertIsNone(getattr(obj, "agentops_agent_name")) - self.assertIsNotNone(getattr(obj, "agentops_agent_id")) # ID should still be set - - # Test updating from None - obj.role = "new_role" - obj._model_validate() - self.assertEqual(getattr(obj, "agentops_agent_name"), "new_role") - - def test_track_agent_with_pydantic_model(self): - """Test setting agentops_agent_name with actual Pydantic BaseModel""" - try: - from typing import Optional - from pydantic import BaseModel, Field, model_validator - except ImportError: - self.skipTest("Pydantic not installed, skipping Pydantic model test") - - @track_agent() - class TestAgentModel(BaseModel): - role: str = Field(default="test_role") - agentops_agent_name: Optional[str] = None - agentops_agent_id: Optional[str] = None - - @model_validator(mode="after") - def set_agent_name(self): - # Simulate CrewAI's post_init_setup behavior - self.agentops_agent_name = self.role - return self - - # Test basic initialization - obj = TestAgentModel() - self.assertEqual(obj.agentops_agent_name, "test_role") - self.assertIsNotNone(obj.agentops_agent_id) - - # Test with custom role - obj2 = TestAgentModel(role="custom_role") - self.assertEqual(obj2.agentops_agent_name, "custom_role") - self.assertIsNotNone(obj2.agentops_agent_id) - - # Test model update - obj.role = "updated_role" - obj.set_agent_name() - self.assertEqual(obj.agentops_agent_name, "updated_role") - - -class TestAgentOpsDescriptor(TestCase): - def test_agent_property_get_set(self): - """Test basic get/set functionality of agentops_property""" - - class TestAgent: - agent_id = agentops_property() - agent_name = agentops_property() - - agent = TestAgent() - test_id = str(uuid4()) - test_name = "TestAgent" - - # Test setting values - agent.agent_id = test_id - agent.agent_name = test_name - - # Test getting values - self.assertEqual(agent.agent_id, test_id) - self.assertEqual(agent.agent_name, test_name) - - # Test getting non-existent value returns None - self.assertIsNone(TestAgent().agent_id) - - def test_from_stack_direct_call(self): - """Test from_stack when called directly from a method with an agent""" - - @track_agent(name="TestAgent") - class TestAgent: - def get_my_id(self): - return agentops_property.stack_lookup() - - agent = TestAgent() - detected_id = agent.get_my_id() - self.assertEqual(detected_id, agent.agentops_agent_id) - - def test_from_stack_nested_call(self): - """Test from_stack when called through nested function calls""" - - @track_agent(name="TestAgent") - class TestAgent: - def get_my_id(self): - def nested_func(): - return agentops_property.stack_lookup() - - return nested_func() - - agent = TestAgent() - detected_id = agent.get_my_id() - self.assertEqual(detected_id, agent.agentops_agent_id) - - def test_from_stack_multiple_agents(self): - """Test from_stack with multiple agents in different stack frames""" - - @track_agent(name="Agent1") - class Agent1: - def get_other_agent_id(self, other_agent): - return other_agent.get_my_id() - - @track_agent(name="Agent2") - class Agent2: - def get_my_id(self): - return agentops_property.stack_lookup() - - agent1 = Agent1() - agent2 = Agent2() - - # Should return agent2's ID since it's the closest in the call stack - detected_id = agent1.get_other_agent_id(agent2) - self.assertEqual(detected_id, agent2.agentops_agent_id) - self.assertNotEqual(detected_id, agent1.agentops_agent_id) - - def test_from_stack_no_agent(self): - """Test from_stack when no agent is in the call stack""" - - class NonAgent: - def get_id(self): - return agentops_property.stack_lookup() - - non_agent = NonAgent() - self.assertIsNone(non_agent.get_id()) - - def test_from_stack_with_exception(self): - """Test from_stack's behavior when exceptions occur during stack inspection""" - - class ProblemAgent: - agentops_agent_id = agentops_property() - - @property - def problematic_attr(self): - raise Exception("Simulated error") - - def get_id(self): - return agentops_property.stack_lookup() - - agent = ProblemAgent() - # Should return None and not raise exception - self.assertIsNone(agent.get_id()) - - def test_from_stack_inheritance(self): - """Test from_stack with inheritance hierarchy""" - - @track_agent(name="BaseAgent") - class BaseAgent: - def get_id_from_base(self): - return agentops_property.stack_lookup() - - @track_agent(name="DerivedAgent") - class DerivedAgent(BaseAgent): - def get_id_from_derived(self): - return agentops_property.stack_lookup() - - derived = DerivedAgent() - base_call_id = derived.get_id_from_base() - derived_call_id = derived.get_id_from_derived() - - self.assertEqual(base_call_id, derived.agentops_agent_id) - self.assertEqual(derived_call_id, derived.agentops_agent_id) diff --git a/tests/unit/test_canary.py b/tests/unit/test_canary.py deleted file mode 100644 index 90fcc65de..000000000 --- a/tests/unit/test_canary.py +++ /dev/null @@ -1,34 +0,0 @@ -import time - -import pytest -import requests_mock - -import agentops -from agentops import ActionEvent - - -class TestCanary: - def setup_method(self): - self.url = "https://api.agentops.ai" - self.api_key = "11111111-1111-4111-8111-111111111111" - agentops.init(api_key=self.api_key, max_wait_time=500, auto_start_session=False) - - def test_agent_ops_record(self, mock_req): - # Arrange - event_type = "test_event_type" - agentops.start_session() - - # Act - agentops.record(ActionEvent(event_type)) - time.sleep(2) - - # Find event requests - event_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(event_requests) > 0 - last_event_request = event_requests[-1] - - assert last_event_request.headers["X-Agentops-Api-Key"] == self.api_key - request_json = last_event_request.json() - assert request_json["events"][0]["event_type"] == event_type - - agentops.end_session("Success") diff --git a/tests/unit/test_patcher.py b/tests/unit/test_patcher.py deleted file mode 100644 index 5c5e1d8a9..000000000 --- a/tests/unit/test_patcher.py +++ /dev/null @@ -1,60 +0,0 @@ -# import pytest -# from unittest.mock import MagicMock -# from agentops.llm_tracker import LlmTracker -# -# # Mock the openai library -# -# -# @pytest.fixture -# def mock_openai(mocker): -# mock = mocker.MagicMock() -# mocker.patch.dict('sys.modules', {'openai': mock}) -# return mock -# -# # Test that the correct methods are overridden for version >= 1.0.0 -# -# -# def test_override_api_version_ge_1(mock_openai): -# mock_openai.__version__ = '1.0.0' # Version is exactly 1.0.0 -# tracker = LlmTracker(client=MagicMock()) -# -# original_method = MagicMock() -# mock_openai.chat = MagicMock(completions=MagicMock(create=original_method)) -# -# tracker.override_api('openai') -# -# # The original method should be replaced with a new method -# assert mock_openai.chat.completions.create != original_method -# assert callable(mock_openai.chat.completions.create) -# -# # Test that the correct methods are overridden for version < 1.0.0 -# -# -# def test_override_api_version_lt_1(mock_openai): -# mock_openai.__version__ = '0.9.9' # Version is less than 1.0.0 -# tracker = LlmTracker(client=MagicMock()) -# -# original_method = MagicMock() -# mock_openai.ChatCompletion = MagicMock(create=original_method) -# -# tracker.override_api('openai') -# -# # The original method should be replaced with a new method -# assert mock_openai.ChatCompletion.create != original_method -# assert callable(mock_openai.ChatCompletion.create) -# -# # Test that the override_api method handles missing __version__ attribute -# -# -# def test_override_api_missing_version_attribute(mocker): -# mock_openai = mocker.MagicMock() -# mocker.patch.dict('sys.modules', {'openai': mock_openai}) -# tracker = LlmTracker(client=MagicMock()) -# -# # This should not raise an error, and should use the methods for version < 1.0.0 -# tracker.override_api('openai') -# -# # Now you need to assert that the correct methods for version < 1.0.0 are overridden -# # Assuming 'ChatCompletion.create' is the method to be overridden for version < 1.0.0 -# assert hasattr(mock_openai, 'ChatCompletion') -# assert callable(mock_openai.ChatCompletion.create) diff --git a/tests/unit/test_pre_init.py b/tests/unit/test_pre_init.py deleted file mode 100644 index 5e8ce0684..000000000 --- a/tests/unit/test_pre_init.py +++ /dev/null @@ -1,51 +0,0 @@ -import contextlib -import time -from datetime import datetime - -import pytest -import requests_mock - -import agentops -from agentops import record_action, track_agent -from agentops.singleton import clear_singletons - - -@track_agent(name="TestAgent") -class BasicAgent: - def __init__(self): - pass - - -class TestPreInit: - def setup_method(self): - self.url = "https://api.agentops.ai" - self.api_key = "11111111-1111-4111-8111-111111111111" - - def test_track_agent(self, mock_req): - agent = BasicAgent() - - assert len(mock_req.request_history) == 0 - - agentops.init(api_key=self.api_key) - time.sleep(1) - - # Find agent creation request - agent_requests = [r for r in mock_req.request_history if "/v2/create_agent" in r.url] - assert len(agent_requests) > 0 - last_agent_request = agent_requests[-1] - - # Assert agent creation - assert last_agent_request.headers["X-Agentops-Api-Key"] == self.api_key - - # End session and wait for flush - agentops.end_session(end_state="Success") - time.sleep(1.5) - - # Find session end request - end_session_requests = [r for r in mock_req.request_history if "/v2/update_session" in r.url] - assert len(end_session_requests) > 0 - last_end_request = end_session_requests[-1] - - assert last_end_request.headers["X-Agentops-Api-Key"] == self.api_key - - mock_req.reset() diff --git a/tests/unit/test_record_action.py b/tests/unit/test_record_action.py deleted file mode 100644 index 0e781a578..000000000 --- a/tests/unit/test_record_action.py +++ /dev/null @@ -1,235 +0,0 @@ -import time -from datetime import datetime - -import pytest - -import agentops -from agentops import record_action - - -class TestRecordAction: - def setup_method(self): - self.url = "https://api.agentops.ai" - self.api_key = "11111111-1111-4111-8111-111111111111" - self.event_type = "test_event_type" - agentops.init(self.api_key, max_wait_time=50, auto_start_session=False) - - def test_record_action_decorator(self, mock_req): - agentops.start_session() - - @record_action(event_name=self.event_type) - def add_two(x, y): - return x + y - - # Act - add_two(3, 4) - time.sleep(0.1) - - # Find the record_action request - action_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(action_requests) > 0 - last_action_request = action_requests[-1] - - assert last_action_request.headers["X-Agentops-Api-Key"] == self.api_key - request_json = last_action_request.json() - assert request_json["events"][0]["action_type"] == self.event_type - assert request_json["events"][0]["params"] == {"x": 3, "y": 4} - assert request_json["events"][0]["returns"] == 7 - - agentops.end_session(end_state="Success") - - def test_record_action_default_name(self, mock_req): - agentops.start_session() - - @record_action() - def add_two(x, y): - return x + y - - # Act - add_two(3, 4) - time.sleep(0.1) - - # Find the record_action request - action_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(action_requests) > 0 - last_action_request = action_requests[-1] - - assert last_action_request.headers["X-Agentops-Api-Key"] == self.api_key - request_json = last_action_request.json() - assert request_json["events"][0]["action_type"] == "add_two" - assert request_json["events"][0]["params"] == {"x": 3, "y": 4} - assert request_json["events"][0]["returns"] == 7 - - agentops.end_session(end_state="Success") - - def test_record_action_decorator_multiple(self, mock_req): - agentops.start_session() - - # Arrange - @record_action(event_name=self.event_type) - def add_three(x, y, z=3): - return x + y + z - - # Act - add_three(1, 2) - add_three(1, 2, 4) - - time.sleep(1.5) - - # Find the record_action request - action_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(action_requests) > 0 - last_action_request = action_requests[-1] - - assert last_action_request.headers["X-Agentops-Api-Key"] == self.api_key - request_json = last_action_request.json() - - assert request_json["events"][1]["action_type"] == self.event_type - assert request_json["events"][1]["params"] == {"x": 1, "y": 2, "z": 4} - assert request_json["events"][1]["returns"] == 7 - - assert request_json["events"][0]["action_type"] == self.event_type - assert request_json["events"][0]["params"] == {"x": 1, "y": 2, "z": 3} - assert request_json["events"][0]["returns"] == 6 - - agentops.end_session(end_state="Success") - - @pytest.mark.asyncio - async def test_async_action_call(self, mock_req): - agentops.start_session() - - @record_action(self.event_type) - async def async_add(x, y): - time.sleep(0.1) - return x + y - - # Act - result = await async_add(3, 4) - time.sleep(0.1) - - # Assert - assert result == 7 - - # Find the record_action request - action_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(action_requests) > 0 - last_action_request = action_requests[-1] - - assert last_action_request.headers["X-Agentops-Api-Key"] == self.api_key - request_json = last_action_request.json() - assert request_json["events"][0]["action_type"] == self.event_type - assert request_json["events"][0]["params"] == {"x": 3, "y": 4} - assert request_json["events"][0]["returns"] == 7 - - init = datetime.fromisoformat(request_json["events"][0]["init_timestamp"]) - end = datetime.fromisoformat(request_json["events"][0]["end_timestamp"]) - - assert (end - init).total_seconds() >= 0.1 - - agentops.end_session(end_state="Success") - - def test_multiple_sessions_sync(self, mock_req): - session_1 = agentops.start_session() - session_2 = agentops.start_session() - assert session_1 is not None - assert session_2 is not None - - # Arrange - @record_action(event_name=self.event_type) - def add_three(x, y, z=3): - return x + y + z - - # Act - add_three(1, 2, session=session_1) - time.sleep(0.1) - add_three(1, 2, 3, session=session_2) - time.sleep(0.1) - - # Find action requests - action_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(action_requests) >= 2 # Should have at least 2 action requests - - # Verify session_2's request (last request) - last_request = action_requests[-1] - assert last_request.headers["X-Agentops-Api-Key"] == self.api_key - assert last_request.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session_2.session_id)]}" - request_json = last_request.json() - assert request_json["events"][0]["action_type"] == self.event_type - assert request_json["events"][0]["params"] == {"x": 1, "y": 2, "z": 3} - assert request_json["events"][0]["returns"] == 6 - - # Verify session_1's request (second to last request) - second_last_request = action_requests[-2] - assert second_last_request.headers["X-Agentops-Api-Key"] == self.api_key - assert ( - second_last_request.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session_1.session_id)]}" - ) - request_json = second_last_request.json() - assert request_json["events"][0]["action_type"] == self.event_type - assert request_json["events"][0]["params"] == {"x": 1, "y": 2, "z": 3} - assert request_json["events"][0]["returns"] == 6 - - session_1.end_session(end_state="Success") - session_2.end_session(end_state="Success") - - @pytest.mark.asyncio - async def test_multiple_sessions_async(self, mock_req): - session_1 = agentops.start_session() - session_2 = agentops.start_session() - assert session_1 is not None - assert session_2 is not None - - # Arrange - @record_action(self.event_type) - async def async_add(x, y): - time.sleep(0.1) - return x + y - - # Act - await async_add(1, 2, session=session_1) - time.sleep(0.1) - await async_add(1, 2, session=session_2) - time.sleep(0.1) - - # Assert - assert len(mock_req.request_history) == 5 - - request_json = mock_req.last_request.json() - assert mock_req.last_request.headers["X-Agentops-Api-Key"] == self.api_key - assert ( - mock_req.last_request.headers["Authorization"] - == f"Bearer {mock_req.session_jwts[str(session_2.session_id)]}" - ) - assert request_json["events"][0]["action_type"] == self.event_type - assert request_json["events"][0]["params"] == {"x": 1, "y": 2} - assert request_json["events"][0]["returns"] == 3 - - second_last_request_json = mock_req.request_history[-2].json() - assert mock_req.request_history[-2].headers["X-Agentops-Api-Key"] == self.api_key - assert ( - mock_req.request_history[-2].headers["Authorization"] - == f"Bearer {mock_req.session_jwts[str(session_1.session_id)]}" - ) - assert second_last_request_json["events"][0]["action_type"] == self.event_type - assert second_last_request_json["events"][0]["params"] == { - "x": 1, - "y": 2, - } - assert second_last_request_json["events"][0]["returns"] == 3 - - session_1.end_session(end_state="Success") - session_2.end_session(end_state="Success") - - def test_require_session_if_multiple(self, mock_req): - session_1 = agentops.start_session() - session_2 = agentops.start_session() - - # Arrange - @record_action(self.event_type) - def add_two(x, y): - time.sleep(0.1) - return x + y - - with pytest.raises(ValueError): - # Act - add_two(1, 2) diff --git a/tests/unit/test_record_tool.py b/tests/unit/test_record_tool.py deleted file mode 100644 index f2fca9023..000000000 --- a/tests/unit/test_record_tool.py +++ /dev/null @@ -1,232 +0,0 @@ -import contextlib -import time -from datetime import datetime - -import pytest -import requests_mock - -import agentops -from agentops import record_tool - -jwts = ["some_jwt", "some_jwt2", "some_jwt3"] - - -class TestRecordTool: - def setup_method(self): - self.url = "https://api.agentops.ai" - self.api_key = "11111111-1111-4111-8111-111111111111" - self.tool_name = "test_tool_name" - agentops.init(self.api_key, max_wait_time=5, auto_start_session=False) - - def test_record_tool_decorator(self, mock_req): - agentops.start_session() - - @record_tool(tool_name=self.tool_name) - def add_two(x, y): - return x + y - - # Act - add_two(3, 4) - time.sleep(0.1) - - # Find the record_tool request - tool_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(tool_requests) > 0 - last_tool_request = tool_requests[-1] - - assert last_tool_request.headers["X-Agentops-Api-Key"] == self.api_key - request_json = last_tool_request.json() - assert request_json["events"][0]["name"] == self.tool_name - assert request_json["events"][0]["params"] == {"x": 3, "y": 4} - assert request_json["events"][0]["returns"] == 7 - - agentops.end_session(end_state="Success") - - def test_record_tool_default_name(self, mock_req): - agentops.start_session() - - @record_tool() - def add_two(x, y): - return x + y - - # Act - add_two(3, 4) - time.sleep(0.1) - - # Find the record_tool request - tool_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(tool_requests) > 0 - last_tool_request = tool_requests[-1] - - assert last_tool_request.headers["X-Agentops-Api-Key"] == self.api_key - request_json = last_tool_request.json() - assert request_json["events"][0]["name"] == "add_two" - assert request_json["events"][0]["params"] == {"x": 3, "y": 4} - assert request_json["events"][0]["returns"] == 7 - - agentops.end_session(end_state="Success") - - def test_record_tool_decorator_multiple(self, mock_req): - agentops.start_session() - - # Arrange - @record_tool(tool_name=self.tool_name) - def add_three(x, y, z=3): - return x + y + z - - # Act - add_three(1, 2) - time.sleep(0.1) - add_three(1, 2) - time.sleep(0.1) - - # Find all tool requests - tool_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(tool_requests) > 0 - last_tool_request = tool_requests[-1] - - assert last_tool_request.headers["X-Agentops-Api-Key"] == self.api_key - request_json = last_tool_request.json() - assert request_json["events"][0]["name"] == self.tool_name - assert request_json["events"][0]["params"] == {"x": 1, "y": 2, "z": 3} - assert request_json["events"][0]["returns"] == 6 - - agentops.end_session(end_state="Success") - - @pytest.mark.asyncio - async def test_async_tool_call(self, mock_req): - agentops.start_session() - - @record_tool(self.tool_name) - async def async_add(x, y): - time.sleep(0.1) - return x + y - - # Act - result = await async_add(3, 4) - time.sleep(0.1) - - # Assert - assert result == 7 - - # Find the record_tool request - tool_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(tool_requests) > 0 - last_tool_request = tool_requests[-1] - - assert last_tool_request.headers["X-Agentops-Api-Key"] == self.api_key - request_json = last_tool_request.json() - assert request_json["events"][0]["name"] == self.tool_name - assert request_json["events"][0]["params"] == {"x": 3, "y": 4} - assert request_json["events"][0]["returns"] == 7 - - init = datetime.fromisoformat(request_json["events"][0]["init_timestamp"]) - end = datetime.fromisoformat(request_json["events"][0]["end_timestamp"]) - - assert (end - init).total_seconds() >= 0.1 - - agentops.end_session(end_state="Success") - - def test_multiple_sessions_sync(self, mock_req): - session_1 = agentops.start_session() - session_2 = agentops.start_session() - assert session_1 is not None - assert session_2 is not None - - # Arrange - @record_tool(tool_name=self.tool_name) - def add_three(x, y, z=3): - return x + y + z - - # Act - add_three(1, 2, session=session_1) - time.sleep(0.1) - add_three(1, 2, session=session_2) - time.sleep(0.1) - - # Find tool requests - tool_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(tool_requests) >= 2 # Should have at least 2 tool requests - - # Verify session_2's request (last request) - last_request = tool_requests[-1] - assert last_request.headers["X-Agentops-Api-Key"] == self.api_key - assert last_request.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session_2.session_id)]}" - request_json = last_request.json() - assert request_json["events"][0]["name"] == self.tool_name - assert request_json["events"][0]["params"] == {"x": 1, "y": 2, "z": 3} - assert request_json["events"][0]["returns"] == 6 - - # Verify session_1's request (second to last request) - second_last_request = tool_requests[-2] - assert second_last_request.headers["X-Agentops-Api-Key"] == self.api_key - assert ( - second_last_request.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session_1.session_id)]}" - ) - request_json = second_last_request.json() - assert request_json["events"][0]["name"] == self.tool_name - assert request_json["events"][0]["params"] == {"x": 1, "y": 2, "z": 3} - assert request_json["events"][0]["returns"] == 6 - - session_1.end_session(end_state="Success") - session_2.end_session(end_state="Success") - - @pytest.mark.asyncio - async def test_multiple_sessions_async(self, mock_req): - session_1 = agentops.start_session() - session_2 = agentops.start_session() - assert session_1 is not None - assert session_2 is not None - - # Arrange - @record_tool(tool_name=self.tool_name) - async def async_add(x, y): - time.sleep(0.1) - return x + y - - # Act - await async_add(1, 2, session=session_1) - time.sleep(0.1) - await async_add(1, 2, session=session_2) - time.sleep(0.1) - - # Find tool requests - tool_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(tool_requests) >= 2 # Should have at least 2 tool requests - - # Verify session_2's request (last request) - last_request = tool_requests[-1] - assert last_request.headers["X-Agentops-Api-Key"] == self.api_key - assert last_request.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session_2.session_id)]}" - request_json = last_request.json() - assert request_json["events"][0]["name"] == self.tool_name - assert request_json["events"][0]["params"] == {"x": 1, "y": 2} - assert request_json["events"][0]["returns"] == 3 - - # Verify session_1's request (second to last request) - second_last_request = tool_requests[-2] - assert second_last_request.headers["X-Agentops-Api-Key"] == self.api_key - assert ( - second_last_request.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session_1.session_id)]}" - ) - request_json = second_last_request.json() - assert request_json["events"][0]["name"] == self.tool_name - assert request_json["events"][0]["params"] == {"x": 1, "y": 2} - assert request_json["events"][0]["returns"] == 3 - - session_1.end_session(end_state="Success") - session_2.end_session(end_state="Success") - - def test_require_session_if_multiple(self, mock_req): - session_1 = agentops.start_session() - session_2 = agentops.start_session() - - # Arrange - @record_tool(tool_name=self.tool_name) - def add_two(x, y): - time.sleep(0.1) - return x + y - - with pytest.raises(ValueError): - # Act - add_two(1, 2) diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py deleted file mode 100644 index c8a1fc909..000000000 --- a/tests/unit/test_session.py +++ /dev/null @@ -1,621 +0,0 @@ -import json -import time -from datetime import datetime, timezone -from typing import Dict, Optional, Sequence -from unittest.mock import MagicMock, Mock, patch -from uuid import UUID - -import pytest -import requests_mock -from opentelemetry import trace -from opentelemetry.sdk.trace import ReadableSpan -from opentelemetry.sdk.trace.export import SpanExportResult -from opentelemetry.trace import SpanContext, SpanKind, Status, StatusCode -from opentelemetry.trace.span import TraceState - -import agentops -from agentops import ActionEvent, Client -from agentops.http_client import HttpClient -from agentops.singleton import clear_singletons - - -class TestNonInitializedSessions: - def setup_method(self): - self.api_key = "11111111-1111-4111-8111-111111111111" - self.event_type = "test_event_type" - - def test_non_initialized_doesnt_start_session(self, mock_req): - agentops.set_api_key(self.api_key) - session = agentops.start_session() - assert session is None - - -class TestSingleSessions: - def setup_method(self): - self.api_key = "11111111-1111-4111-8111-111111111111" - self.event_type = "test_event_type" - agentops.init(api_key=self.api_key, max_wait_time=50, auto_start_session=False) - - def test_session(self, mock_req): - session = agentops.start_session() - - agentops.record(ActionEvent(self.event_type)) - agentops.record(ActionEvent(self.event_type)) - - time.sleep(0.1) - - # Find event requests - event_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(event_requests) > 0 - last_event_request = event_requests[-1] - - assert last_event_request.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session.session_id)]}" - request_json = last_event_request.json() - assert request_json["events"][0]["event_type"] == self.event_type - - end_state = "Success" - agentops.end_session(end_state) - time.sleep(0.15) - - # Find session end request - end_session_requests = [r for r in mock_req.request_history if "/v2/update_session" in r.url] - assert len(end_session_requests) > 0 - last_end_request = end_session_requests[-1] - - assert last_end_request.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session.session_id)]}" - request_json = last_end_request.json() - assert request_json["session"]["end_state"] == end_state - assert len(request_json["session"]["tags"]) == 0 - - agentops.end_all_sessions() - - def test_add_tags(self, mock_req): - # Arrange - tags = ["GPT-4"] - agentops.start_session(tags=tags) - agentops.add_tags(["test-tag", "dupe-tag"]) - agentops.add_tags(["dupe-tag"]) - - # Act - end_state = "Success" - agentops.end_session(end_state) - time.sleep(0.15) - - # Find session end request - end_session_requests = [r for r in mock_req.request_history if "/v2/update_session" in r.url] - assert len(end_session_requests) > 0 - last_end_request = end_session_requests[-1] - - assert last_end_request.headers["X-Agentops-Api-Key"] == self.api_key - request_json = last_end_request.json() - assert request_json["session"]["end_state"] == end_state - assert request_json["session"]["tags"] == ["GPT-4", "test-tag", "dupe-tag"] - - agentops.end_all_sessions() - - def test_tags(self, mock_req): - # Arrange - tags = ["GPT-4"] - agentops.start_session(tags=tags) - - # Act - agentops.record(ActionEvent(self.event_type)) - - # Act - end_state = "Success" - agentops.end_session(end_state) - time.sleep(0.15) - - # Find session end request - end_session_requests = [r for r in mock_req.request_history if "/v2/update_session" in r.url] - assert len(end_session_requests) > 0 - last_end_request = end_session_requests[-1] - - assert last_end_request.headers["X-Agentops-Api-Key"] == self.api_key - request_json = last_end_request.json() - assert request_json["session"]["end_state"] == end_state - assert request_json["session"]["tags"] == tags - - agentops.end_all_sessions() - - def test_inherit_session_id(self, mock_req): - # Arrange - inherited_id = "4f72e834-ff26-4802-ba2d-62e7613446f1" - agentops.start_session(tags=["test"], inherited_session_id=inherited_id) - - # Find session start request - start_session_requests = [r for r in mock_req.request_history if "/v2/create_session" in r.url] - assert len(start_session_requests) > 0 - last_start_request = start_session_requests[-1] - - # Act - # session_id correct - request_json = last_start_request.json() - assert request_json["session"]["session_id"] == inherited_id - - # Act - end_state = "Success" - agentops.end_session(end_state) - time.sleep(0.15) - - agentops.end_all_sessions() - - def test_add_tags_with_string(self, mock_req): - agentops.start_session() - agentops.add_tags("wrong-type-tags") - - request_json = mock_req.last_request.json() - assert request_json["session"]["tags"] == ["wrong-type-tags"] - - def test_session_add_tags_with_string(self, mock_req): - session = agentops.start_session() - session.add_tags("wrong-type-tags") - - request_json = mock_req.last_request.json() - assert request_json["session"]["tags"] == ["wrong-type-tags"] - - def test_set_tags_with_string(self, mock_req): - agentops.start_session() - agentops.set_tags("wrong-type-tags") - - request_json = mock_req.last_request.json() - assert request_json["session"]["tags"] == ["wrong-type-tags"] - - def test_session_set_tags_with_string(self, mock_req): - session = agentops.start_session() - assert session is not None - - session.set_tags("wrong-type-tags") - - request_json = mock_req.last_request.json() - assert request_json["session"]["tags"] == ["wrong-type-tags"] - - def test_set_tags_before_session(self, mock_req): - agentops.configure(default_tags=["pre-session-tag"]) - agentops.start_session() - - request_json = mock_req.last_request.json() - assert request_json["session"]["tags"] == ["pre-session-tag"] - - def test_safe_get_session_no_session(self, mock_req): - session = Client()._safe_get_session() - assert session is None - - def test_safe_get_session_with_session(self, mock_req): - agentops.start_session() - session = Client()._safe_get_session() - assert session is not None - - def test_safe_get_session_with_multiple_sessions(self, mock_req): - agentops.start_session() - agentops.start_session() - - session = Client()._safe_get_session() - assert session is None - - def test_get_analytics(self, mock_req): - # Arrange - session = agentops.start_session() - session.add_tags(["test-session-analytics-tag"]) - assert session is not None - - # Record some events to increment counters - session.record(ActionEvent("llms")) - session.record(ActionEvent("tools")) - session.record(ActionEvent("actions")) - session.record(ActionEvent("errors")) - time.sleep(0.1) - - # Act - analytics = session.get_analytics() - - # Assert - assert isinstance(analytics, dict) - assert all( - key in analytics - for key in [ - "LLM calls", - "Tool calls", - "Actions", - "Errors", - "Duration", - "Cost", - ] - ) - - # Check specific values - assert analytics["LLM calls"] == 1 - assert analytics["Tool calls"] == 1 - assert analytics["Actions"] == 1 - assert analytics["Errors"] == 1 - - # Check duration format - assert isinstance(analytics["Duration"], str) - assert "s" in analytics["Duration"] - - # Check cost format (mock returns token_cost: 5) - assert analytics["Cost"] == "5.000000" - - # End session and cleanup - session.end_session(end_state="Success") - agentops.end_all_sessions() - - -class TestMultiSessions: - def setup_method(self): - self.api_key = "11111111-1111-4111-8111-111111111111" - self.event_type = "test_event_type" - agentops.init(api_key=self.api_key, max_wait_time=500, auto_start_session=False) - - def test_two_sessions(self, mock_req): - session_1 = agentops.start_session() - session_2 = agentops.start_session() - assert session_1 is not None - assert session_2 is not None - - assert len(agentops.Client().current_session_ids) == 2 - assert agentops.Client().current_session_ids == [ - str(session_1.session_id), - str(session_2.session_id), - ] - time.sleep(0.1) - - session_1.record(ActionEvent(self.event_type)) - session_2.record(ActionEvent(self.event_type)) - - time.sleep(1.5) - - # Find event requests - event_requests = [r for r in mock_req.request_history if "/v2/create_events" in r.url] - assert len(event_requests) >= 2 - - # Verify session_1's request - session_1_request = event_requests[-2] - assert ( - session_1_request.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session_1.session_id)]}" - ) - assert session_1_request.json()["events"][0]["event_type"] == self.event_type - - # Verify session_2's request - session_2_request = event_requests[-1] - assert ( - session_2_request.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session_2.session_id)]}" - ) - assert session_2_request.json()["events"][0]["event_type"] == self.event_type - - end_state = "Success" - - session_1.end_session(end_state) - time.sleep(1.5) - - # Find session end requests - end_session_requests = [r for r in mock_req.request_history if "/v2/update_session" in r.url] - assert len(end_session_requests) > 0 - session_1_end = end_session_requests[-1] - - assert session_1_end.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session_1.session_id)]}" - request_json = session_1_end.json() - assert request_json["session"]["end_state"] == end_state - assert len(request_json["session"]["tags"]) == 0 - - session_2.end_session(end_state) - time.sleep(0.1) - - # Verify session 2 end request - end_session_requests = [r for r in mock_req.request_history if "/v2/update_session" in r.url] - session_2_end = end_session_requests[-1] - assert session_2_end.headers["Authorization"] == f"Bearer {mock_req.session_jwts[str(session_2.session_id)]}" - request_json = session_2_end.json() - assert request_json["session"]["end_state"] == end_state - assert len(request_json["session"]["tags"]) == 0 - - def test_add_tags(self, mock_req): - # Arrange - session_1_tags = ["session-1"] - session_2_tags = ["session-2"] - - session_1 = agentops.start_session(tags=session_1_tags) - session_2 = agentops.start_session(tags=session_2_tags) - assert session_1 is not None - assert session_2 is not None - - session_1.add_tags(["session-1-added", "session-1-added-2"]) - session_2.add_tags(["session-2-added"]) - - # Act - end_state = "Success" - session_1.end_session(end_state) - session_2.end_session(end_state) - time.sleep(0.15) - - # Assert 3 requests, 1 for session init, 1 for event, 1 for end session - req1 = mock_req.request_history[-1].json() - req2 = mock_req.request_history[-2].json() - - session_1_req = req1 if req1["session"]["session_id"] == session_1.session_id else req2 - session_2_req = req2 if req2["session"]["session_id"] == session_2.session_id else req1 - - assert session_1_req["session"]["end_state"] == end_state - assert session_2_req["session"]["end_state"] == end_state - - assert session_1_req["session"]["tags"] == [ - "session-1", - "session-1-added", - "session-1-added-2", - ] - - assert session_2_req["session"]["tags"] == [ - "session-2", - "session-2-added", - ] - - def test_get_analytics_multiple_sessions(self, mock_req): - session_1 = agentops.start_session() - session_1.add_tags(["session-1", "test-analytics-tag"]) - session_2 = agentops.start_session() - session_2.add_tags(["session-2", "test-analytics-tag"]) - assert session_1 is not None - assert session_2 is not None - - # Record events in the sessions - session_1.record(ActionEvent("llms")) - session_1.record(ActionEvent("tools")) - session_2.record(ActionEvent("actions")) - session_2.record(ActionEvent("errors")) - - time.sleep(1.5) - - # Act - analytics_1 = session_1.get_analytics() - analytics_2 = session_2.get_analytics() - - # Assert 2 record_event requests - 2 for each session - assert analytics_1["LLM calls"] == 1 - assert analytics_1["Tool calls"] == 1 - assert analytics_1["Actions"] == 0 - assert analytics_1["Errors"] == 0 - - assert analytics_2["LLM calls"] == 0 - assert analytics_2["Tool calls"] == 0 - assert analytics_2["Actions"] == 1 - assert analytics_2["Errors"] == 1 - - # Check duration format - assert isinstance(analytics_1["Duration"], str) - assert "s" in analytics_1["Duration"] - assert isinstance(analytics_2["Duration"], str) - assert "s" in analytics_2["Duration"] - - # Check cost format (mock returns token_cost: 5) - assert analytics_1["Cost"] == "5.000000" - assert analytics_2["Cost"] == "5.000000" - - end_state = "Success" - - session_1.end_session(end_state) - session_2.end_session(end_state) - - -class TestSessionExporter: - def setup_method(self): - self.api_key = "11111111-1111-4111-8111-111111111111" - # Initialize agentops first - agentops.init(api_key=self.api_key, max_wait_time=50, auto_start_session=False) - self.session = agentops.start_session() - assert self.session is not None # Verify session was created - self.exporter = self.session._otel_exporter - - def teardown_method(self): - """Clean up after each test""" - if self.session: - self.session.end_session("Success") - agentops.end_all_sessions() - clear_singletons() - - def create_test_span(self, name="test_span", attributes=None): - """Helper to create a test span with required attributes""" - if attributes is None: - attributes = {} - - # Ensure required attributes are present - base_attributes = { - "event.id": str(UUID(int=1)), - "event.type": "test_type", - "event.timestamp": datetime.now(timezone.utc).isoformat(), - "event.end_timestamp": datetime.now(timezone.utc).isoformat(), - "event.data": json.dumps({"test": "data"}), - "session.id": str(self.session.session_id), - } - base_attributes.update(attributes) - - context = SpanContext( - trace_id=0x000000000000000000000000DEADBEEF, - span_id=0x00000000DEADBEF0, - is_remote=False, - trace_state=TraceState(), - ) - - return ReadableSpan( - name=name, - context=context, - kind=SpanKind.INTERNAL, - status=Status(StatusCode.OK), - start_time=123, - end_time=456, - attributes=base_attributes, - events=[], - links=[], - resource=self.session._tracer_provider.resource, - ) - - def test_export_basic_span(self, mock_req): - """Test basic span export with all required fields""" - span = self.create_test_span() - result = self.exporter.export([span]) - - assert result == SpanExportResult.SUCCESS - assert len(mock_req.request_history) > 0 - - last_request = mock_req.last_request.json() - assert "events" in last_request - event = last_request["events"][0] - - # Verify required fields - assert "id" in event - assert "event_type" in event - assert "init_timestamp" in event - assert "end_timestamp" in event - assert "session_id" in event - - def test_export_action_event(self, mock_req): - """Test export of action event with specific formatting""" - action_attributes = { - "event.data": json.dumps( - { - "action_type": "test_action", - "params": {"param1": "value1"}, - "returns": "test_return", - } - ) - } - - span = self.create_test_span(name="actions", attributes=action_attributes) - result = self.exporter.export([span]) - - assert result == SpanExportResult.SUCCESS - - last_request = mock_req.request_history[-1].json() - event = last_request["events"][0] - - assert event["action_type"] == "test_action" - assert event["params"] == {"param1": "value1"} - assert event["returns"] == "test_return" - - def test_export_tool_event(self, mock_req): - """Test export of tool event with specific formatting""" - tool_attributes = { - "event.data": json.dumps( - { - "name": "test_tool", - "params": {"param1": "value1"}, - "returns": "test_return", - } - ) - } - - span = self.create_test_span(name="tools", attributes=tool_attributes) - result = self.exporter.export([span]) - - assert result == SpanExportResult.SUCCESS - - last_request = mock_req.request_history[-1].json() - event = last_request["events"][0] - - assert event["name"] == "test_tool" - assert event["params"] == {"param1": "value1"} - assert event["returns"] == "test_return" - - def test_export_with_missing_timestamp(self, mock_req): - """Test handling of missing end_timestamp""" - attributes = {"event.end_timestamp": None} # This should be handled gracefully - - span = self.create_test_span(attributes=attributes) - result = self.exporter.export([span]) - - assert result == SpanExportResult.SUCCESS - - last_request = mock_req.request_history[-1].json() - event = last_request["events"][0] - - # Verify end_timestamp is present and valid - assert "end_timestamp" in event - assert event["end_timestamp"] is not None - - def test_export_with_missing_timestamps_advanced(self, mock_req): - """Test handling of missing timestamps""" - attributes = {"event.timestamp": None, "event.end_timestamp": None} - - span = self.create_test_span(attributes=attributes) - result = self.exporter.export([span]) - - assert result == SpanExportResult.SUCCESS - - last_request = mock_req.request_history[-1].json() - event = last_request["events"][0] - - # Verify timestamps are present and valid - assert "init_timestamp" in event - assert "end_timestamp" in event - assert event["init_timestamp"] is not None - assert event["end_timestamp"] is not None - - # Verify timestamps are in ISO format - try: - datetime.fromisoformat(event["init_timestamp"].replace("Z", "+00:00")) - datetime.fromisoformat(event["end_timestamp"].replace("Z", "+00:00")) - except ValueError: - pytest.fail("Timestamps are not in valid ISO format") - - def test_export_with_shutdown(self, mock_req): - """Test export behavior when shutdown""" - self.exporter._shutdown.set() - span = self.create_test_span() - - result = self.exporter.export([span]) - assert result == SpanExportResult.SUCCESS - - # Verify no request was made - assert not any(req.url.endswith("/v2/create_events") for req in mock_req.request_history[-1:]) - - def test_export_llm_event(self, mock_req): - """Test export of LLM event with specific handling of timestamps""" - llm_attributes = { - "event.data": json.dumps( - { - "prompt": "test prompt", - "completion": "test completion", - "model": "test-model", - "tokens": 100, - "cost": 0.002, - } - ) - } - - span = self.create_test_span(name="llms", attributes=llm_attributes) - result = self.exporter.export([span]) - - assert result == SpanExportResult.SUCCESS - - last_request = mock_req.request_history[-1].json() - event = last_request["events"][0] - - # Verify LLM specific fields - assert event["prompt"] == "test prompt" - assert event["completion"] == "test completion" - assert event["model"] == "test-model" - assert event["tokens"] == 100 - assert event["cost"] == 0.002 - - # Verify timestamps - assert event["init_timestamp"] is not None - assert event["end_timestamp"] is not None - - def test_export_with_missing_id(self, mock_req): - """Test handling of missing event ID""" - attributes = {"event.id": None} - - span = self.create_test_span(attributes=attributes) - result = self.exporter.export([span]) - - assert result == SpanExportResult.SUCCESS - - last_request = mock_req.request_history[-1].json() - event = last_request["events"][0] - - # Verify ID is present and valid UUID - assert "id" in event - assert event["id"] is not None - try: - UUID(event["id"]) - except ValueError: - pytest.fail("Event ID is not a valid UUID") diff --git a/tests/unit/test_singleton.py b/tests/unit/test_singleton.py deleted file mode 100644 index a9de287b1..000000000 --- a/tests/unit/test_singleton.py +++ /dev/null @@ -1,39 +0,0 @@ -import uuid - -from agentops.singleton import clear_singletons, conditional_singleton, singleton - - -@singleton -class SingletonClass: - def __init__(self): - self.id = str(uuid.uuid4()) - - -@conditional_singleton -class ConditionalSingletonClass: - def __init__(self): - self.id = str(uuid.uuid4()) - - -class TestSingleton: - def test_singleton(self): - c1 = SingletonClass() - c2 = SingletonClass() - - assert c1.id == c2.id - - def test_conditional_singleton(self): - c1 = ConditionalSingletonClass() - c2 = ConditionalSingletonClass() - noSingleton = ConditionalSingletonClass(use_singleton=False) - - assert c1.id == c2.id - assert c1.id != noSingleton.id - assert c2.id != noSingleton.id - - def test_clear_singletons(self): - c1 = SingletonClass() - clear_singletons() - c2 = SingletonClass() - - assert c1.id != c2.id diff --git a/tests/unit/test_teardown.py b/tests/unit/test_teardown.py deleted file mode 100644 index eadb5b549..000000000 --- a/tests/unit/test_teardown.py +++ /dev/null @@ -1,12 +0,0 @@ -import pytest -import requests_mock - -import agentops - - -class TestSessions: - def test_exit(self, mock_req): - url = "https://api.agentops.ai" - api_key = "11111111-1111-4111-8111-111111111111" - tool_name = "test_tool_name" - agentops.init(api_key, max_wait_time=5, auto_start_session=False) diff --git a/tests/unit/test_time_travel.py b/tests/unit/test_time_travel.py deleted file mode 100644 index 7c00930a1..000000000 --- a/tests/unit/test_time_travel.py +++ /dev/null @@ -1,35 +0,0 @@ -import unittest -from unittest.mock import patch, mock_open, Mock - -from agentops.time_travel import ( - TimeTravel, - check_time_travel_active, -) - - -class TestTimeTravel(unittest.TestCase): - @patch("os.path.dirname") - @patch("os.path.abspath") - @patch( - "builtins.open", - new_callable=mock_open, - read_data='{"completion_overrides": {}}', - ) - def test_init(self, mock_open, mock_abspath, mock_dirname): - mock_abspath.return_value = "/path/to/script" - mock_dirname.return_value = "/path/to" - instance = TimeTravel() - self.assertEqual(instance._completion_overrides, {}) - - @patch("os.path.dirname") - @patch("os.path.abspath") - @patch( - "builtins.open", - new_callable=mock_open, - read_data='{"Time_Travel_Debugging_Active": true}', - ) - def test_check_time_travel_active(self, mock_open, mock_abspath, mock_dirname): - mock_abspath.return_value = "/path/to/script" - mock_dirname.return_value = "/path/to" - result = check_time_travel_active() - self.assertTrue(result) From 6822086c73914df2599765b94a7a7a5727223d97 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:15:22 +0200 Subject: [PATCH 12/45] cleanup partners/ Signed-off-by: Teo --- agentops/partners/__init__.py | 0 agentops/partners/autogen_logger.py | 119 --- .../partners/langchain_callback_handler.py | 883 ------------------ agentops/partners/taskweaver_event_handler.py | 191 ---- 4 files changed, 1193 deletions(-) delete mode 100644 agentops/partners/__init__.py delete mode 100644 agentops/partners/autogen_logger.py delete mode 100644 agentops/partners/langchain_callback_handler.py delete mode 100755 agentops/partners/taskweaver_event_handler.py diff --git a/agentops/partners/__init__.py b/agentops/partners/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/agentops/partners/autogen_logger.py b/agentops/partners/autogen_logger.py deleted file mode 100644 index 9fc85fb28..000000000 --- a/agentops/partners/autogen_logger.py +++ /dev/null @@ -1,119 +0,0 @@ -from __future__ import annotations - -import logging -import threading -import uuid -from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Union, TypeVar, Callable - -import agentops -from openai import AzureOpenAI, OpenAI -from openai.types.chat import ChatCompletion - -from autogen.logger.base_logger import BaseLogger, LLMConfig - -from agentops.session import EndState -from agentops.helpers import get_ISO_time - -from agentops import LLMEvent, ToolEvent, ActionEvent -from uuid import uuid4 - -if TYPE_CHECKING: - from autogen import Agent, ConversableAgent, OpenAIWrapper - -logger = logging.getLogger(__name__) -lock = threading.Lock() - -__all__ = ("AutogenLogger",) - -F = TypeVar("F", bound=Callable[..., Any]) - - -class AutogenLogger(BaseLogger): - agent_store: [{"agentops_id": str, "autogen_id": str}] = [] - - def __init__(self): - pass - - def start(self) -> str: - pass - - def _get_agentops_id_from_agent(self, autogen_id: str) -> str: - for agent in self.agent_store: - if agent["autogen_id"] == autogen_id: - return agent["agentops_id"] - - def log_chat_completion( - self, - invocation_id: uuid.UUID, - client_id: int, - wrapper_id: int, - agent: Union[str, Agent], - request: Dict[str, Union[float, str, List[Dict[str, str]]]], - response: Union[str, ChatCompletion], - is_cached: int, - cost: float, - start_time: str, - ) -> None: - """Records an LLMEvent to AgentOps session""" - - completion = response.choices[len(response.choices) - 1] - - # Note: Autogen tokens are not included in the request and function call tokens are not counted in the completion - llm_event = LLMEvent( - prompt=request["messages"], - completion=completion.message.to_dict(), - model=response.model, - cost=cost, - returns=completion.message.to_json(), - ) - llm_event.init_timestamp = start_time - llm_event.end_timestamp = get_ISO_time() - llm_event.agent_id = self._get_agentops_id_from_agent(str(id(agent))) - agentops.record(llm_event) - - def log_new_agent(self, agent: ConversableAgent, init_args: Dict[str, Any]) -> None: - """Calls agentops.create_agent""" - ao_agent_id = agentops.create_agent(agent.name, str(uuid4())) - self.agent_store.append({"agentops_id": ao_agent_id, "autogen_id": str(id(agent))}) - - def log_event(self, source: Union[str, Agent], name: str, **kwargs: Dict[str, Any]) -> None: - """Records an ActionEvent to AgentOps session""" - event = ActionEvent(action_type=name) - agentops_id = self._get_agentops_id_from_agent(str(id(source))) - event.agent_id = agentops_id - event.params = kwargs - agentops.record(event) - - def log_function_use(self, source: Union[str, Agent], function: F, args: Dict[str, Any], returns: any): - """Records a ToolEvent to AgentOps session""" - event = ToolEvent() - agentops_id = self._get_agentops_id_from_agent(str(id(source))) - event.agent_id = agentops_id - event.function = function # TODO: this is not a parameter - event.params = args - event.returns = returns - event.name = getattr(function, "__name__") - agentops.record(event) - - def log_new_wrapper( - self, - wrapper: OpenAIWrapper, - init_args: Dict[str, Union[LLMConfig, List[LLMConfig]]], - ) -> None: - pass - - def log_new_client( - self, - client: Union[AzureOpenAI, OpenAI], - wrapper: OpenAIWrapper, - init_args: Dict[str, Any], - ) -> None: - pass - - def stop(self) -> None: - """Ends AgentOps session""" - agentops.end_session(end_state=EndState.INDETERMINATE.value) - - def get_connection(self) -> None: - """Method intentionally left blank""" - pass diff --git a/agentops/partners/langchain_callback_handler.py b/agentops/partners/langchain_callback_handler.py deleted file mode 100644 index 768097dcf..000000000 --- a/agentops/partners/langchain_callback_handler.py +++ /dev/null @@ -1,883 +0,0 @@ -from typing import Dict, Any, List, Optional, Sequence, Union -from collections import defaultdict -from uuid import UUID -import logging -import os - -from tenacity import RetryCallState - -from langchain_core.agents import AgentFinish, AgentAction -from langchain_core.documents import Document -from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult -from langchain_core.callbacks.base import BaseCallbackHandler, AsyncCallbackHandler -from langchain_core.messages import BaseMessage - -from agentops import Client as AOClient -from agentops import ActionEvent, LLMEvent, ToolEvent, ErrorEvent -from agentops.helpers import get_ISO_time, debug_print_function_params -from ..log_config import logger - - -def get_model_from_kwargs(kwargs: any) -> str: - if "model" in kwargs["invocation_params"]: - return kwargs["invocation_params"]["model"] - elif "_type" in kwargs["invocation_params"]: - return kwargs["invocation_params"]["_type"] - else: - return "unknown_model" - - -class Events: - llm: Dict[str, LLMEvent] = {} - tool: Dict[str, ToolEvent] = {} - chain: Dict[str, ActionEvent] = {} - retriever: Dict[str, ActionEvent] = {} - error: Dict[str, ErrorEvent] = {} - - -class LangchainCallbackHandler(BaseCallbackHandler): - """Callback handler for Langchain agents.""" - - def __init__( - self, - api_key: Optional[str] = None, - endpoint: Optional[str] = None, - max_wait_time: Optional[int] = None, - max_queue_size: Optional[int] = None, - default_tags: List[str] = ["langchain", "sync"], - ): - logging_level = os.getenv("AGENTOPS_LOGGING_LEVEL") - log_levels = { - "CRITICAL": logging.CRITICAL, - "ERROR": logging.ERROR, - "INFO": logging.INFO, - "WARNING": logging.WARNING, - "DEBUG": logging.DEBUG, - } - logger.setLevel(log_levels.get(logging_level or "INFO", "INFO")) - - client_params: Dict[str, Any] = { - "api_key": api_key, - "endpoint": endpoint, - "max_wait_time": max_wait_time, - "max_queue_size": max_queue_size, - "default_tags": default_tags, - } - - self.ao_client = AOClient() - if self.ao_client.session_count == 0: - self.ao_client.configure( - **{k: v for k, v in client_params.items() if v is not None}, - instrument_llm_calls=False, - ) - - if not self.ao_client.is_initialized: - self.ao_client.initialize() - - self.agent_actions: Dict[UUID, List[ActionEvent]] = defaultdict(list) - self.events = Events() - - @debug_print_function_params - def on_llm_start( - self, - serialized: Dict[str, Any], - prompts: List[str], - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> Any: - self.events.llm[str(run_id)] = LLMEvent( - params={ - "serialized": serialized, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - }, - model=get_model_from_kwargs(kwargs), - prompt=prompts[0], - ) - - @debug_print_function_params - def on_chat_model_start( - self, - serialized: Dict[str, Any], - messages: List[List[BaseMessage]], - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> Any: - """Run when a chat model starts running.""" - parsed_messages = [ - {"role": message.type, "content": message.content} - for message in messages[0] - if message.type in ["system", "human"] - ] - - action_event = ActionEvent( - params={ - "serialized": serialized, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - "messages": parsed_messages, - }, - action_type="on_chat_model_start", - ) - self.ao_client.record(action_event) - - # Initialize LLMEvent here since on_llm_start isn't called for chat models - self.events.llm[str(run_id)] = LLMEvent( - params={ - "serialized": serialized, - "messages": messages, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - }, - model=get_model_from_kwargs(kwargs), - prompt=parsed_messages, - completion="", - returns={}, - ) - - @debug_print_function_params - def on_llm_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> Any: - llm_event: LLMEvent = self.events.llm[str(run_id)] - error_event = ErrorEvent( - trigger_event=llm_event, - exception=error, - details={"run_id": run_id, "parent_run_id": parent_run_id, "kwargs": kwargs}, - ) - self.ao_client.record(error_event) - - @debug_print_function_params - def on_llm_end( - self, - response: LLMResult, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> Any: - llm_event: LLMEvent = self.events.llm[str(run_id)] - llm_event.returns = response - llm_event.end_timestamp = get_ISO_time() - - if len(response.generations) == 0: - error_event = ErrorEvent( - trigger_event=self.events.llm[str(run_id)], - error_type="NoGenerations", - details={"run_id": run_id, "parent_run_id": parent_run_id, "kwargs": kwargs}, - ) - self.ao_client.record(error_event) - else: - for generation in response.generations[0]: - if ( - generation.message.type == "AIMessage" - and generation.text - and llm_event.completion != generation.text - ): - llm_event.completion = generation.text - elif ( - generation.message.type == "AIMessageChunk" - and generation.message.content - and llm_event.completion != generation.message.content - ): - llm_event.completion += generation.message.content - - if response.llm_output is not None: - llm_event.prompt_tokens = response.llm_output["token_usage"]["prompt_tokens"] - llm_event.completion_tokens = response.llm_output["token_usage"]["completion_tokens"] - self.ao_client.record(llm_event) - - @debug_print_function_params - def on_chain_start( - self, - serialized: Dict[str, Any], - inputs: Dict[str, Any], - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> Any: - # Initialize with empty dicts if None - serialized = serialized or {} - inputs = inputs or {} - metadata = metadata or {} - - self.events.chain[str(run_id)] = ActionEvent( - params={ - "serialized": serialized, - "inputs": inputs, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - **kwargs, - }, - action_type="on_chain_start", - ) - - @debug_print_function_params - def on_chain_end( - self, - outputs: Dict[str, Any], - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> Any: - action_event: ActionEvent = self.events.chain[str(run_id)] - action_event.returns = outputs - action_event.end_timestamp = get_ISO_time() - self.ao_client.record(action_event) - - @debug_print_function_params - def on_chain_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> Any: - # Create a new ActionEvent if one doesn't exist for this run_id - if str(run_id) not in self.events.chain: - self.events.chain[str(run_id)] = ActionEvent(params=kwargs, action_type="on_chain_error") - - action_event = self.events.chain[str(run_id)] - error_event = ErrorEvent(trigger_event=action_event, exception=error) - self.ao_client.record(error_event) - - @debug_print_function_params - def on_tool_start( - self, - serialized: Dict[str, Any], - input_str: str, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - inputs: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> Any: - self.events.tool[str(run_id)] = ToolEvent( - params=inputs, - name=serialized.get("name"), - logs={ - "serialized": serialized, - "input_str": input_str, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - }, - ) - - @debug_print_function_params - def on_tool_end( - self, - output: str, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> Any: - tool_event: ToolEvent = self.events.tool[str(run_id)] - tool_event.end_timestamp = get_ISO_time() - tool_event.returns = output - - if kwargs.get("name") == "_Exception": - error_event = ErrorEvent( - trigger_event=tool_event, - error_type="LangchainToolException", - details=output, - ) - self.ao_client.record(error_event) - else: - self.ao_client.record(tool_event) - - @debug_print_function_params - def on_tool_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> Any: - tool_event: ToolEvent = self.events.tool[str(run_id)] - error_event = ErrorEvent(trigger_event=tool_event, exception=error) - self.ao_client.record(error_event) - - @debug_print_function_params - def on_retriever_start( - self, - serialized: Dict[str, Any], - query: str, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> Any: - self.events.retriever[str(run_id)] = ActionEvent( - params={ - "serialized": serialized, - "query": query, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - }, - action_type="on_retriever_start", - ) - - @debug_print_function_params - def on_retriever_end( - self, - documents: Sequence[Document], - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - **kwargs: Any, - ) -> Any: - action_event: ActionEvent = self.events.retriever[str(run_id)] - action_event.returns = documents - action_event.end_timestamp = get_ISO_time() - self.ao_client.record(action_event) - - @debug_print_function_params - def on_retriever_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - **kwargs: Any, - ) -> Any: - action_event: ActionEvent = self.events.retriever[str(run_id)] - error_event = ErrorEvent(trigger_event=action_event, exception=error) - self.ao_client.record(error_event) - - @debug_print_function_params - def on_agent_action( - self, - action: AgentAction, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> Any: - self.agent_actions[run_id].append( - ActionEvent(params={"action": action, **kwargs}, action_type="on_agent_action") - ) - - @debug_print_function_params - def on_agent_finish( - self, - finish: AgentFinish, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> Any: - self.agent_actions[run_id][-1].returns = finish.to_json() - for agentAction in self.agent_actions[run_id]: - self.ao_client.record(agentAction) - - @debug_print_function_params - def on_retry( - self, - retry_state: RetryCallState, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> Any: - action_event = ActionEvent( - params={ - "retry_state": retry_state, - "run_id": run_id, - "parent_run_id": parent_run_id, - "kwargs": kwargs, - }, - action_type="on_retry", - ) - self.ao_client.record(action_event) - - @debug_print_function_params - def on_llm_new_token( - self, - token: str, - *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - **kwargs: Any, - ) -> Any: - """Run on new LLM token. Only available when streaming is enabled.""" - if str(run_id) not in self.events.llm: - self.events.llm[str(run_id)] = LLMEvent(params=kwargs) - self.events.llm[str(run_id)].completion = "" - - llm_event = self.events.llm[str(run_id)] - # Always append the new token to the existing completion - llm_event.completion += token - - @property - def current_session_ids(self): - return self.ao_client.current_session_ids - - -class AsyncLangchainCallbackHandler(AsyncCallbackHandler): - """Callback handler for Langchain agents.""" - - def __init__( - self, - api_key: Optional[str] = None, - endpoint: Optional[str] = None, - max_wait_time: Optional[int] = None, - max_queue_size: Optional[int] = None, - default_tags: List[str] = ["langchain", "async"], - ): - logging_level = os.getenv("AGENTOPS_LOGGING_LEVEL") - log_levels = { - "CRITICAL": logging.CRITICAL, - "ERROR": logging.ERROR, - "INFO": logging.INFO, - "WARNING": logging.WARNING, - "DEBUG": logging.DEBUG, - } - logger.setLevel(log_levels.get(logging_level or "INFO", "INFO")) - - client_params: Dict[str, Any] = { - "api_key": api_key, - "endpoint": endpoint, - "max_wait_time": max_wait_time, - "max_queue_size": max_queue_size, - "default_tags": default_tags, - } - - self.ao_client = AOClient() - if self.ao_client.session_count == 0: - self.ao_client.configure( - **{k: v for k, v in client_params.items() if v is not None}, - instrument_llm_calls=False, - default_tags=["langchain"], - ) - - if not self.ao_client.is_initialized: - self.ao_client.initialize() - - self.agent_actions: Dict[UUID, List[ActionEvent]] = defaultdict(list) - self.events = Events() - - @debug_print_function_params - async def on_llm_start( - self, - serialized: Dict[str, Any], - prompts: List[str], - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> None: - self.events.llm[str(run_id)] = LLMEvent( - params={ - "serialized": serialized, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - }, - model=get_model_from_kwargs(kwargs), - prompt=prompts[0], - ) - - @debug_print_function_params - async def on_chat_model_start( - self, - serialized: Dict[str, Any], - messages: List[List[BaseMessage]], - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> None: - """Run when a chat model starts running.""" - parsed_messages = [ - {"role": message.type, "content": message.content} - for message in messages[0] - if message.type in ["system", "human"] - ] - - action_event = ActionEvent( - params={ - "serialized": serialized, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - "messages": parsed_messages, - }, - action_type="on_chat_model_start", - ) - self.ao_client.record(action_event) - - # Initialize LLMEvent here since on_llm_start isn't called for chat models - self.events.llm[str(run_id)] = LLMEvent( - params={ - "serialized": serialized, - "messages": messages, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - }, - model=get_model_from_kwargs(kwargs), - prompt=parsed_messages, - completion="", - returns={}, - ) - - @debug_print_function_params - async def on_llm_new_token( - self, - token: str, - *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - **kwargs: Any, - ) -> None: - """Run on new LLM token. Only available when streaming is enabled.""" - if str(run_id) not in self.events.llm: - self.events.llm[str(run_id)] = LLMEvent(params=kwargs) - self.events.llm[str(run_id)].completion = "" - - llm_event = self.events.llm[str(run_id)] - # Always append the new token to the existing completion - llm_event.completion += token - - @debug_print_function_params - async def on_llm_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> None: - llm_event: LLMEvent = self.events.llm[str(run_id)] - error_event = ErrorEvent( - trigger_event=llm_event, - exception=error, - details={"run_id": run_id, "parent_run_id": parent_run_id, "kwargs": kwargs}, - ) - self.ao_client.record(error_event) - - @debug_print_function_params - async def on_llm_end( - self, - response: LLMResult, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> None: - llm_event: LLMEvent = self.events.llm[str(run_id)] - llm_event.returns = response - llm_event.end_timestamp = get_ISO_time() - - if len(response.generations) == 0: - error_event = ErrorEvent( - trigger_event=self.events.llm[str(run_id)], - error_type="NoGenerations", - details={"run_id": run_id, "parent_run_id": parent_run_id, "kwargs": kwargs}, - ) - self.ao_client.record(error_event) - else: - for generation in response.generations[0]: - if ( - generation.message.type == "AIMessage" - and generation.text - and llm_event.completion != generation.text - ): - llm_event.completion = generation.text - elif ( - generation.message.type == "AIMessageChunk" - and generation.message.content - and llm_event.completion != generation.message.content - ): - llm_event.completion += generation.message.content - - if response.llm_output is not None: - llm_event.prompt_tokens = response.llm_output["token_usage"]["prompt_tokens"] - llm_event.completion_tokens = response.llm_output["token_usage"]["completion_tokens"] - self.ao_client.record(llm_event) - - @debug_print_function_params - async def on_chain_start( - self, - serialized: Dict[str, Any], - inputs: Dict[str, Any], - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> None: - # Initialize with empty dicts if None - serialized = serialized or {} - inputs = inputs or {} - metadata = metadata or {} - - self.events.chain[str(run_id)] = ActionEvent( - params={ - "serialized": serialized, - "inputs": inputs, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - }, - action_type="on_chain_start", - ) - - @debug_print_function_params - async def on_chain_end( - self, - outputs: Dict[str, Any], - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> None: - action_event: ActionEvent = self.events.chain[str(run_id)] - action_event.returns = outputs - action_event.end_timestamp = get_ISO_time() - self.ao_client.record(action_event) - - @debug_print_function_params - async def on_chain_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> None: - # Create a new ActionEvent if one doesn't exist for this run_id - if str(run_id) not in self.events.chain: - self.events.chain[str(run_id)] = ActionEvent(params=kwargs, action_type="on_chain_error") - - action_event = self.events.chain[str(run_id)] - error_event = ErrorEvent(trigger_event=action_event, exception=error) - self.ao_client.record(error_event) - - @debug_print_function_params - async def on_tool_start( - self, - serialized: Dict[str, Any], - input_str: str, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - inputs: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> None: - self.events.tool[str(run_id)] = ToolEvent( - params=inputs, - name=serialized.get("name"), - logs={ - "serialized": serialized, - "input_str": input_str, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - }, - ) - - @debug_print_function_params - async def on_tool_end( - self, - output: str, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> None: - tool_event: ToolEvent = self.events.tool[str(run_id)] - tool_event.end_timestamp = get_ISO_time() - tool_event.returns = output - - if kwargs.get("name") == "_Exception": - error_event = ErrorEvent( - trigger_event=tool_event, - error_type="LangchainToolException", - details=output, - ) - self.ao_client.record(error_event) - else: - self.ao_client.record(tool_event) - - @debug_print_function_params - async def on_tool_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> None: - tool_event: ToolEvent = self.events.tool[str(run_id)] - error_event = ErrorEvent(trigger_event=tool_event, exception=error) - self.ao_client.record(error_event) - - @debug_print_function_params - async def on_retriever_start( - self, - serialized: Dict[str, Any], - query: str, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> None: - self.events.retriever[str(run_id)] = ActionEvent( - params={ - "serialized": serialized, - "query": query, - "metadata": ({} if metadata is None else metadata), - "kwargs": kwargs, - "run_id": run_id, - "parent_run_id": parent_run_id, - "tags": tags, - }, - action_type="on_retriever_start", - ) - - @debug_print_function_params - async def on_retriever_end( - self, - documents: Sequence[Document], - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - **kwargs: Any, - ) -> None: - action_event: ActionEvent = self.events.retriever[str(run_id)] - action_event.returns = documents - action_event.end_timestamp = get_ISO_time() - self.ao_client.record(action_event) - - @debug_print_function_params - async def on_retriever_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - tags: Optional[List[str]] = None, - **kwargs: Any, - ) -> None: - action_event: ActionEvent = self.events.retriever[str(run_id)] - error_event = ErrorEvent(trigger_event=action_event, exception=error) - self.ao_client.record(error_event) - - @debug_print_function_params - async def on_agent_action( - self, - action: AgentAction, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> None: - self.agent_actions[run_id].append( - ActionEvent(params={"action": action, **kwargs}, action_type="on_agent_action") - ) - - @debug_print_function_params - async def on_agent_finish( - self, - finish: AgentFinish, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> None: - self.agent_actions[run_id][-1].returns = finish.to_json() - for agentAction in self.agent_actions[run_id]: - self.ao_client.record(agentAction) - - @debug_print_function_params - async def on_retry( - self, - retry_state: RetryCallState, - *, - run_id: UUID, - parent_run_id: Optional[UUID] = None, - **kwargs: Any, - ) -> None: - action_event = ActionEvent( - params={ - "retry_state": retry_state, - "run_id": run_id, - "parent_run_id": parent_run_id, - "kwargs": kwargs, - }, - action_type="on_retry", - ) - self.ao_client.record(action_event) - - @property - def current_session_ids(self): - return self.ao_client.current_session_ids diff --git a/agentops/partners/taskweaver_event_handler.py b/agentops/partners/taskweaver_event_handler.py deleted file mode 100755 index 18a179d7c..000000000 --- a/agentops/partners/taskweaver_event_handler.py +++ /dev/null @@ -1,191 +0,0 @@ -from taskweaver.module.event_emitter import ( - SessionEventHandlerBase, - SessionEventType, - RoundEventType, - PostEventType, -) -import agentops -from agentops.event import ActionEvent, ErrorEvent, ToolEvent -from datetime import datetime, timezone -from typing import Dict, Any -from agentops.log_config import logger - -ATTACHMENT_TOOLS = [ - "thought", - "reply_type", - "reply_content", - "verification", - "code_error", - "execution_status", - "execution_result", - "artifact_paths", - "revise_message", - "function", - "web_exploring_plan", - "web_exploring_screenshot", - "web_exploring_link", -] - - -class TaskWeaverEventHandler(SessionEventHandlerBase): - def __init__(self): - super().__init__() - self._message_buffer: Dict[str, Dict[str, Any]] = {} - self._attachment_buffer: Dict[str, Dict[str, Any]] = {} - self._active_agents: Dict[str, str] = {} - - def _get_or_create_agent(self, role: str): - """Get existing agent ID or create new agent for role+round combination""" - if role not in self._active_agents: - agent_id = agentops.create_agent(name=role) - if agent_id: - self._active_agents[role] = agent_id - return self._active_agents.get(role) - - def handle_session(self, type: SessionEventType, msg: str, extra: Any, **kwargs: Any): - agentops.record(ActionEvent(action_type=type.value, params={"extra": extra, "message": msg})) - - def handle_round(self, type: RoundEventType, msg: str, extra: Any, round_id: str, **kwargs: Any): - if type == RoundEventType.round_error: - agentops.record( - ErrorEvent(error_type=type.value, details={"round_id": round_id, "message": msg, "extra": extra}) - ) - logger.error(f"Could not record the Round event: {msg}") - self.cleanup_round() - else: - agentops.record( - ActionEvent( - action_type=type.value, - params={"round_id": round_id, "extra": extra}, - returns=msg, - ) - ) - if type == RoundEventType.round_end: - self.cleanup_round() - - def handle_post(self, type: PostEventType, msg: str, extra: Any, post_id: str, round_id: str, **kwargs: Any): - role = extra.get("role", "Planner") - agent_id = self._get_or_create_agent(role=role) - - if type == PostEventType.post_error: - agentops.record( - ErrorEvent( - error_type=type.value, - details={"post_id": post_id, "round_id": round_id, "message": msg, "extra": extra}, - ) - ) - logger.error(f"Could not record the Post event: {msg}") - - elif type == PostEventType.post_start or type == PostEventType.post_end: - agentops.record( - ActionEvent( - action_type=type.value, - params={"post_id": post_id, "round_id": round_id, "extra": extra}, - returns=msg, - agent_id=agent_id, - ) - ) - - elif type == PostEventType.post_status_update: - agentops.record( - ActionEvent( - action_type=type.value, - params={"post_id": post_id, "round_id": round_id, "extra": extra}, - returns=msg, - agent_id=agent_id, - ) - ) - - elif type == PostEventType.post_attachment_update: - attachment_id = extra["id"] - attachment_type = extra["type"].value - is_end = extra["is_end"] - - if attachment_id not in self._attachment_buffer: - self._attachment_buffer[attachment_id] = { - "role": attachment_type, - "content": [], - "init_timestamp": datetime.now(timezone.utc).isoformat(), - "end_timestamp": None, - } - - self._attachment_buffer[attachment_id]["content"].append(str(msg)) - - if is_end: - self._attachment_buffer[attachment_id]["end_timestamp"] = datetime.now(timezone.utc).isoformat() - complete_message = "".join(self._attachment_buffer[attachment_id]["content"]) - - if attachment_type in ATTACHMENT_TOOLS: - agentops.record( - ToolEvent( - name=type.value, - init_timestamp=self._attachment_buffer[attachment_id]["init_timestamp"], - end_timestamp=self._attachment_buffer[attachment_id]["end_timestamp"], - params={ - "post_id": post_id, - "round_id": round_id, - "attachment_id": attachment_id, - "attachment_type": self._attachment_buffer[attachment_id]["role"], - "extra": extra, - }, - returns=complete_message, - agent_id=agent_id, - ) - ) - else: - agentops.record( - ActionEvent( - action_type=type.value, - init_timestamp=self._attachment_buffer[attachment_id]["init_timestamp"], - end_timestamp=self._attachment_buffer[attachment_id]["end_timestamp"], - params={ - "post_id": post_id, - "round_id": round_id, - "attachment_id": attachment_id, - "attachment_type": self._attachment_buffer[attachment_id]["role"], - "extra": extra, - }, - returns=complete_message, - agent_id=agent_id, - ) - ) - - self._attachment_buffer.pop(attachment_id, None) - - elif type == PostEventType.post_message_update: - is_end = extra["is_end"] - - if post_id not in self._message_buffer: - self._message_buffer[post_id] = { - "content": [], - "init_timestamp": datetime.now(timezone.utc).isoformat(), - "end_timestamp": None, - } - - self._message_buffer[post_id]["content"].append(str(msg)) - - if is_end: - self._message_buffer[post_id]["end_timestamp"] = datetime.now(timezone.utc).isoformat() - complete_message = "".join(self._message_buffer[post_id]["content"]) - agentops.record( - ActionEvent( - action_type=type.value, - init_timestamp=self._message_buffer[post_id]["init_timestamp"], - end_timestamp=self._message_buffer[post_id]["end_timestamp"], - params={ - "post_id": post_id, - "round_id": round_id, - "extra": extra, - }, - returns=complete_message, - agent_id=agent_id, - ) - ) - - self._message_buffer.pop(post_id, None) - - def cleanup_round(self): - """Cleanup agents and buffers for a completed round""" - self._active_agents.clear() - self._message_buffer.clear() - self._attachment_buffer.clear() From 6d0175e127d815529a81845cbbba852b885829d7 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:18:16 +0200 Subject: [PATCH 13/45] Move sdk examples to examples/sdk/ Signed-off-by: Teo --- examples/{ => sdk}/basic.py | 0 examples/{ => sdk}/basic_session_example.py | 0 examples/{ => sdk}/session_commands_example.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename examples/{ => sdk}/basic.py (100%) rename examples/{ => sdk}/basic_session_example.py (100%) rename examples/{ => sdk}/session_commands_example.py (100%) diff --git a/examples/basic.py b/examples/sdk/basic.py similarity index 100% rename from examples/basic.py rename to examples/sdk/basic.py diff --git a/examples/basic_session_example.py b/examples/sdk/basic_session_example.py similarity index 100% rename from examples/basic_session_example.py rename to examples/sdk/basic_session_example.py diff --git a/examples/session_commands_example.py b/examples/sdk/session_commands_example.py similarity index 100% rename from examples/session_commands_example.py rename to examples/sdk/session_commands_example.py From 2a41d1b559c484e7a5639b9fa3ad1d02931e8524 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:19:54 +0200 Subject: [PATCH 14/45] fix: tracing utility span_kind closure nonlocal reference Signed-off-by: Teo --- agentops/sdk/tracing/utility.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/agentops/sdk/tracing/utility.py b/agentops/sdk/tracing/utility.py index d570a5048..30b2dbf66 100644 --- a/agentops/sdk/tracing/utility.py +++ b/agentops/sdk/tracing/utility.py @@ -224,7 +224,7 @@ def _finalize_span(span: trace.Span, token: Any) -> None: def instrument_operation( - span_kind: Optional[str] = SpanKind.OPERATION, + span_kind: str = SpanKind.OPERATION, name: Optional[str] = None, version: Optional[int] = None, ): @@ -242,6 +242,7 @@ def decorator(fn): is_async = _is_coroutine_or_generator(fn) operation_name = name or fn.__name__ # Use default span_kind if None is provided + nonlocal span_kind span_kind = span_kind or SpanKind.OPERATION # noqa: F823 if is_async: From a095fac8e30a79fd4c5d7902aff22ccfb3198db5 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:22:34 +0200 Subject: [PATCH 15/45] examples/sdk/basic: add a second nest level Signed-off-by: Teo --- examples/sdk/basic.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/examples/sdk/basic.py b/examples/sdk/basic.py index bbbce33ed..412466b32 100644 --- a/examples/sdk/basic.py +++ b/examples/sdk/basic.py @@ -1,15 +1,20 @@ -from agentops.sdk.decorators.agentops import session, agent, operation, record import agentops - +from agentops.sdk.decorators.agentops import agent, operation, record, session agentops.init() @agent class Agent: + + @operation + def nested_operation(self): + print("Hello, world!") + @operation def my_operation(self): print("Hello, world!") + self.nested_operation() @session From dcc79ffc65a39e5b2575a75b28fc3e5195fa3a98 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:42:03 +0200 Subject: [PATCH 16/45] cleanup tests/factory Signed-off-by: Teo --- tests/unit/sdk/test_factory.py | 172 --------------------------------- 1 file changed, 172 deletions(-) delete mode 100644 tests/unit/sdk/test_factory.py diff --git a/tests/unit/sdk/test_factory.py b/tests/unit/sdk/test_factory.py deleted file mode 100644 index f942ea3cb..000000000 --- a/tests/unit/sdk/test_factory.py +++ /dev/null @@ -1,172 +0,0 @@ -import pytest -from unittest.mock import MagicMock, patch -from uuid import UUID - -from agentops.sdk.factory import SpanFactory -from agentops.sdk.traced import TracedObject - - -# Create concrete span classes for testing -class TestSessionSpan(TracedObject): - """Test session span class.""" - - pass - - -class TestAgentSpan(TracedObject): - """Test agent span class.""" - - pass - - -class TestToolSpan(TracedObject): - """Test tool span class.""" - - pass - - -@pytest.fixture -def setup_span_factory(): - """Set up the test by registering test span types.""" - # Register test span types - SpanFactory._span_types = {} # Clear existing registrations - SpanFactory.register_span_type("session", TestSessionSpan) - SpanFactory.register_span_type("agent", TestAgentSpan) - SpanFactory.register_span_type("tool", TestToolSpan) - yield - # Clean up after tests - SpanFactory._span_types = {} - - -def test_register_span_type(setup_span_factory): - """Test registering a span type.""" - - # Test registering a new span type - class CustomSpan(TracedObject): - pass - - SpanFactory.register_span_type("custom", CustomSpan) - assert SpanFactory._span_types["custom"] == CustomSpan - - # Test overriding an existing span type - class NewSessionSpan(TracedObject): - pass - - SpanFactory.register_span_type("session", NewSessionSpan) - assert SpanFactory._span_types["session"] == NewSessionSpan - - -def test_create_span(setup_span_factory): - """Test creating a span.""" - # Test creating a session span - span = SpanFactory.create_span(kind="session", name="test_session", auto_start=False) - assert isinstance(span, TestSessionSpan) - assert span.name == "test_session" - assert span.kind == "session" - assert not span.is_started - - # Test creating a span with auto_start=True - with patch.object(TestAgentSpan, "start") as mock_start: - span = SpanFactory.create_span(kind="agent", name="test_agent", auto_start=True) - mock_start.assert_called_once() - - # Test creating a span with unknown kind - with pytest.raises(ValueError): - SpanFactory.create_span(kind="unknown", name="test_unknown") - - -def test_create_session_span(setup_span_factory): - """Test creating a session span.""" - with patch.object(SpanFactory, "create_span") as mock_create_span: - SpanFactory.create_session_span( - name="test_session", attributes={"key": "value"}, auto_start=True, immediate_export=True - ) - mock_create_span.assert_called_once_with( - kind="session", - name="test_session", - parent=None, - attributes={"key": "value"}, - auto_start=True, - immediate_export=True, - ) - - -def test_create_agent_span(setup_span_factory): - """Test creating an agent span.""" - with patch.object(SpanFactory, "create_span") as mock_create_span: - parent = MagicMock() - SpanFactory.create_agent_span( - name="test_agent", parent=parent, attributes={"key": "value"}, auto_start=True, immediate_export=True - ) - mock_create_span.assert_called_once_with( - kind="agent", - name="test_agent", - parent=parent, - attributes={"key": "value"}, - auto_start=True, - immediate_export=True, - ) - - -def test_create_tool_span(setup_span_factory): - """Test creating a tool span.""" - with patch.object(SpanFactory, "create_span") as mock_create_span: - parent = MagicMock() - SpanFactory.create_tool_span( - name="test_tool", parent=parent, attributes={"key": "value"}, auto_start=True, immediate_export=False - ) - mock_create_span.assert_called_once_with( - kind="tool", - name="test_tool", - parent=parent, - attributes={"key": "value"}, - auto_start=True, - immediate_export=False, - ) - - -def test_create_custom_span(setup_span_factory): - """Test creating a custom span.""" - with patch.object(SpanFactory, "create_span") as mock_create_span: - parent = MagicMock() - SpanFactory.create_custom_span( - kind="custom", - name="test_custom", - parent=parent, - attributes={"key": "value"}, - auto_start=True, - immediate_export=False, - ) - mock_create_span.assert_called_once_with( - kind="custom", - name="test_custom", - parent=parent, - attributes={"key": "value"}, - auto_start=True, - immediate_export=False, - ) - - -def test_auto_register_span_types(): - """Test that the SpanFactory can auto-register span types.""" - # Clear existing registrations - SpanFactory._span_types = {} - SpanFactory._initialized = False - - # Call auto-register method - SpanFactory.auto_register_span_types() - - # Verify that standard span types are registered - from agentops.sdk.spans import SessionSpan, AgentSpan, ToolSpan, CustomSpan - - assert "session" in SpanFactory._span_types - assert SpanFactory._span_types["session"] == SessionSpan - - assert "agent" in SpanFactory._span_types - assert SpanFactory._span_types["agent"] == AgentSpan - - assert "tool" in SpanFactory._span_types - assert SpanFactory._span_types["tool"] == ToolSpan - - assert "custom" in SpanFactory._span_types - assert SpanFactory._span_types["custom"] == CustomSpan From 6ca3cf4d15cbda12d751058ac4a8e5a2d38f1475 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:42:57 +0200 Subject: [PATCH 17/45] fix instrumentation_tester Signed-off-by: Teo --- tests/unit/sdk/instrumentation_tester.py | 89 +++++++++--------------- 1 file changed, 32 insertions(+), 57 deletions(-) diff --git a/tests/unit/sdk/instrumentation_tester.py b/tests/unit/sdk/instrumentation_tester.py index d6d0456b0..48e7fd63c 100644 --- a/tests/unit/sdk/instrumentation_tester.py +++ b/tests/unit/sdk/instrumentation_tester.py @@ -1,9 +1,11 @@ from typing import Any, Dict, List, Optional, Protocol, Tuple, Union +import importlib from opentelemetry import trace as trace_api from opentelemetry.sdk.trace import ReadableSpan, Span, TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter +from opentelemetry.sdk.trace.export.in_memory_span_exporter import \ + InMemorySpanExporter from opentelemetry.util.types import Attributes import agentops @@ -13,31 +15,34 @@ def create_tracer_provider( **kwargs, -) -> Tuple[TracerProvider, InMemorySpanExporter, LiveSpanProcessor, SimpleSpanProcessor]: +) -> Tuple[TracerProvider, InMemorySpanExporter, SimpleSpanProcessor]: """Helper to create a configured tracer provider. Creates and configures a `TracerProvider` with a - `LiveSpanProcessor` and a `InMemorySpanExporter`. + `SimpleSpanProcessor` and a `InMemorySpanExporter`. All the parameters passed are forwarded to the TracerProvider constructor. Returns: - A tuple with the tracer provider in the first element and the - in-memory span exporter in the second. + A tuple with the tracer provider, memory exporter, and span processor. """ tracer_provider = TracerProvider(**kwargs) memory_exporter = InMemorySpanExporter() - # Create a processor for the exporter - # Use a shorter interval for testing - span_processor = LiveSpanProcessor(memory_exporter, schedule_delay_millis=100) + # Use SimpleSpanProcessor instead of both processors to avoid duplication + span_processor = SimpleSpanProcessor(memory_exporter) tracer_provider.add_span_processor(span_processor) - # Also add a SimpleSpanProcessor as a backup to ensure spans are exported - simple_processor = SimpleSpanProcessor(memory_exporter) - tracer_provider.add_span_processor(simple_processor) + return tracer_provider, memory_exporter, span_processor - return tracer_provider, memory_exporter, span_processor, simple_processor + +def reset_trace_globals(): + """Reset the global trace state to avoid conflicts.""" + # Reset tracer provider + trace_api._TRACER_PROVIDER = None + + # Reload the trace module to clear warning state + importlib.reload(trace_api) class HasAttributesViaProperty(Protocol): @@ -63,26 +68,26 @@ class InstrumentationTester: tracer_provider: TracerProvider memory_exporter: InMemorySpanExporter - span_processor: LiveSpanProcessor - simple_processor: SimpleSpanProcessor + span_processor: SimpleSpanProcessor def __init__(self): """Initialize the instrumentation tester.""" - # Create a new tracer provider and memory exporter with both processors + # Reset any global state first + reset_trace_globals() + + # Shut down any existing tracing core + self._shutdown_core() + + # Create a new tracer provider and memory exporter ( self.tracer_provider, self.memory_exporter, self.span_processor, - self.simple_processor, ) = create_tracer_provider() - # Reset the global tracer provider and set the new one - trace_api._TRACER_PROVIDER = None + # Set the tracer provider trace_api.set_tracer_provider(self.tracer_provider) - # Shut down any existing tracing core - self._shutdown_core() - # Get a fresh instance of the tracing core core = TracingCore.get_instance() @@ -90,16 +95,6 @@ def __init__(self): core._provider = self.tracer_provider core._initialized = True - # Reset the factory - from agentops.sdk.factory import SpanFactory - - SpanFactory._span_types = {} - SpanFactory._initialized = False - - # Auto-register span types - SpanFactory.auto_register_span_types() - - # Clear any existing spans self.clear_spans() def _shutdown_core(self): @@ -111,12 +106,8 @@ def _shutdown_core(self): def clear_spans(self): """Clear all spans from the memory exporter.""" - # First export any in-flight spans - self.span_processor.export_in_flight_spans() - # Force flush spans self.span_processor.force_flush() - self.simple_processor.force_flush() # Then clear the memory self.memory_exporter.clear() @@ -124,20 +115,17 @@ def clear_spans(self): def reset(self): """Reset the instrumentation tester.""" - # Export any in-flight spans before clearing - self.span_processor.export_in_flight_spans() - # Force flush any pending spans self.span_processor.force_flush() - self.simple_processor.force_flush() # Clear any existing spans self.clear_spans() - - # Reset the global tracer provider if needed - if trace_api._TRACER_PROVIDER != self.tracer_provider: - trace_api._TRACER_PROVIDER = None - trace_api.set_tracer_provider(self.tracer_provider) + + # Reset global trace state + reset_trace_globals() + + # Set our tracer provider again + trace_api.set_tracer_provider(self.tracer_provider) # Shut down and re-initialize the tracing core self._shutdown_core() @@ -149,23 +137,10 @@ def reset(self): core._provider = self.tracer_provider core._initialized = True - # Reset the factory - from agentops.sdk.factory import SpanFactory - - SpanFactory._span_types = {} - SpanFactory._initialized = False - - # Auto-register span types - SpanFactory.auto_register_span_types() - def get_finished_spans(self) -> List[ReadableSpan]: """Get all finished spans.""" - # First, export any in-flight spans to make sure they're captured - self.span_processor.export_in_flight_spans() - # Force flush any pending spans self.span_processor.force_flush() - self.simple_processor.force_flush() # Get the spans spans = list(self.memory_exporter.get_finished_spans()) From b5f473e514c873a099604d31a6e313316f42177a Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:48:01 +0200 Subject: [PATCH 18/45] cleanup tests/integration/test_time_factory Signed-off-by: Teo --- tests/integration/test_time_travel.py | 40 --------------------------- 1 file changed, 40 deletions(-) delete mode 100644 tests/integration/test_time_travel.py diff --git a/tests/integration/test_time_travel.py b/tests/integration/test_time_travel.py deleted file mode 100644 index 133c256a7..000000000 --- a/tests/integration/test_time_travel.py +++ /dev/null @@ -1,40 +0,0 @@ -import pytest -from openai import OpenAI - - -@pytest.fixture -def openai_client(): - return OpenAI() - - -@pytest.mark.vcr() -def test_time_travel_story_generation(openai_client): - """Test the complete time travel story generation flow.""" - # Step 1: Get superpower - response1 = openai_client.chat.completions.create( - messages=[ - { - "content": "Come up with a random superpower that isn't time travel. Just return the superpower in the format: 'Superpower: [superpower]'", - "role": "user", - } - ], - model="gpt-3.5-turbo-0125", - ) - superpower = response1.choices[0].message.content.split("Superpower:")[1].strip() - assert superpower - - # Step 2: Get superhero name - response2 = openai_client.chat.completions.create( - messages=[ - { - "content": f"Come up with a superhero name given this superpower: {superpower}. Just return the superhero name in this format: 'Superhero: [superhero name]'", - "role": "user", - } - ], - model="gpt-3.5-turbo-0125", - ) - superhero = response2.choices[0].message.content.split("Superhero:")[1].strip() - assert superhero - - # We can continue with more steps, but this shows the pattern - # The test verifies the complete story generation flow works From 2c1a19ab8c0df8bf3043d285f429624ae4e0457e Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:51:15 +0200 Subject: [PATCH 19/45] tests/unit/sdk/test_decorators.py Signed-off-by: Teo --- tests/unit/sdk/test_decorators.py | 459 ++++++++++++++---------------- 1 file changed, 212 insertions(+), 247 deletions(-) diff --git a/tests/unit/sdk/test_decorators.py b/tests/unit/sdk/test_decorators.py index 49c3659c4..3d413f02f 100644 --- a/tests/unit/sdk/test_decorators.py +++ b/tests/unit/sdk/test_decorators.py @@ -1,250 +1,215 @@ import pytest -from unittest.mock import patch, MagicMock, ANY - from opentelemetry import trace -from opentelemetry.trace import Span, SpanContext, TraceFlags - -from agentops.sdk.decorators.session import session -from agentops.sdk.decorators.agent import agent -from agentops.sdk.decorators.tool import tool -from agentops.sdk.spans.session import SessionSpan -from agentops.sdk.spans.agent import AgentSpan -from agentops.sdk.spans.tool import ToolSpan - - -# Session Decorator Tests -@patch("agentops.sdk.decorators.session.TracingCore") -def test_session_class_decoration(mock_tracing_core): - """Test decorating a class with session.""" - # Setup mock - mock_span = MagicMock(spec=SessionSpan) - mock_span.span = MagicMock(spec=Span) - mock_instance = mock_tracing_core.get_instance.return_value - mock_instance.create_span.return_value = mock_span - - # Create a decorated class - @session(name="test_session", tags=["tag1", "tag2"]) - class TestClass: - def __init__(self, arg1, arg2=None): - self.arg1 = arg1 - self.arg2 = arg2 - - def method(self): - return f"{self.arg1}:{self.arg2}" - - # Instantiate and test - test = TestClass("test1", "test2") - assert test.arg1 == "test1" - assert test.arg2 == "test2" - assert test._session_span == mock_span - - # Verify that TracingCore was called correctly - mock_instance.create_span.assert_called_once_with( - kind="session", name="test_session", attributes={}, immediate_export=True, config=ANY, tags=["tag1", "tag2"] - ) - - # Verify the span was started - mock_span.start.assert_called_once() - - -@patch("agentops.sdk.decorators.session.TracingCore") -def test_session_function_decoration(mock_tracing_core): - """Test decorating a function with session.""" - # Setup mock - mock_span = MagicMock(spec=SessionSpan) - mock_span.span = MagicMock(spec=Span) - mock_instance = mock_tracing_core.get_instance.return_value - mock_instance.create_span.return_value = mock_span - - # Create a decorated function - @session(name="test_session", tags=["tag1", "tag2"]) - def test_function(arg1, arg2=None): - current_span = trace.get_current_span() - return f"{arg1}:{arg2}:{current_span}" - - # Mock trace.get_current_span to return our mock span - with patch("opentelemetry.trace.get_current_span", return_value=mock_span.span): - # Call and test - result = test_function("test1", "test2") - - # Verify that TracingCore was called correctly - mock_instance.create_span.assert_called_once_with( - kind="session", name="test_session", attributes={}, immediate_export=True, config=ANY, tags=["tag1", "tag2"] - ) - - # Verify the span was started and ended - mock_span.start.assert_called_once() - mock_span.end.assert_called_once_with("SUCCEEDED") - - # Result should include the mock_span - assert "test1:test2:" in result - assert str(mock_span.span) in result - - -# Agent Decorator Tests -@patch("agentops.sdk.decorators.agent.trace.get_current_span") -@patch("agentops.sdk.decorators.agent.TracingCore") -def test_agent_class_decoration(mock_tracing_core, mock_get_current_span): - """Test decorating a class with agent.""" - # Setup mocks - mock_parent_span = MagicMock(spec=Span) - mock_parent_span.is_recording.return_value = True - mock_parent_context = SpanContext( - trace_id=0x12345678901234567890123456789012, - span_id=0x1234567890123456, - trace_flags=TraceFlags(TraceFlags.SAMPLED), - is_remote=False, - ) - mock_parent_span.get_span_context.return_value = mock_parent_context - mock_get_current_span.return_value = mock_parent_span - - mock_agent_span = MagicMock(spec=AgentSpan) - mock_agent_span.span = MagicMock(spec=Span) - mock_instance = mock_tracing_core.get_instance.return_value - mock_instance.create_span.return_value = mock_agent_span - - # Create a decorated class - @agent(name="test_agent", agent_type="assistant") - class TestAgent: - def __init__(self, arg1, arg2=None): - self.arg1 = arg1 - self.arg2 = arg2 - - def method(self): - return f"{self.arg1}:{self.arg2}" - - # Instantiate and test - test = TestAgent("test1", "test2") - assert test.arg1 == "test1" - assert test.arg2 == "test2" - assert test._agent_span == mock_agent_span - - # Verify that trace.get_current_span was called - mock_get_current_span.assert_called() - - # Verify that TracingCore was called correctly - mock_instance.create_span.assert_called_once_with( - kind="agent", - name="test_agent", - parent=mock_parent_span, - attributes={}, - immediate_export=True, - agent_type="assistant", - ) - - # Verify the span was started - mock_agent_span.start.assert_called_once() - - # Test a method call - result = test.method() - assert result == "test1:test2" - - -@patch("agentops.sdk.decorators.agent.trace.get_current_span") -@patch("agentops.sdk.decorators.agent.TracingCore") -def test_agent_function_decoration(mock_tracing_core, mock_get_current_span): - """Test decorating a function with agent.""" - # Setup mocks - mock_parent_span = MagicMock(spec=Span) - mock_parent_span.is_recording.return_value = True - mock_parent_context = SpanContext( - trace_id=0x12345678901234567890123456789012, - span_id=0x1234567890123456, - trace_flags=TraceFlags(TraceFlags.SAMPLED), - is_remote=False, - ) - mock_parent_span.get_span_context.return_value = mock_parent_context - mock_get_current_span.return_value = mock_parent_span - - mock_agent_span = MagicMock(spec=AgentSpan) - mock_agent_span.span = MagicMock(spec=Span) - mock_instance = mock_tracing_core.get_instance.return_value - mock_instance.create_span.return_value = mock_agent_span - - # Create a decorated function that uses trace.get_current_span() - @agent(name="test_agent", agent_type="assistant") - def test_function(arg1, arg2=None): - current_span = trace.get_current_span() - return f"{arg1}:{arg2}:{current_span}" - - # Mock trace.get_current_span inside the function to return our agent span - with patch("opentelemetry.trace.get_current_span", side_effect=[mock_parent_span, mock_agent_span.span]): - # Call and test - result = test_function("test1", "test2") - - # Verify that TracingCore was called correctly - mock_instance.create_span.assert_called_once_with( - kind="agent", - name="test_agent", - parent=mock_parent_span, - attributes={}, - immediate_export=True, - agent_type="assistant", - ) - - # Verify the span was started - mock_agent_span.start.assert_called_once() - - # Result should include the mock_span - assert "test1:test2:" in result - assert str(mock_agent_span.span) in result - - # Test when no parent span is found - mock_get_current_span.return_value = None - result = test_function("test1", "test2") - assert result == "test1:test2:None" - - -# Tool Decorator Tests -@patch("agentops.sdk.decorators.tool.trace.get_current_span") -@patch("agentops.sdk.decorators.tool.TracingCore") -def test_tool_function_decoration(mock_tracing_core, mock_get_current_span): - """Test decorating a function with tool.""" - # Setup mocks - mock_parent_span = MagicMock(spec=Span) - mock_parent_span.is_recording.return_value = True - mock_parent_context = SpanContext( - trace_id=0x12345678901234567890123456789012, - span_id=0x1234567890123456, - trace_flags=TraceFlags(TraceFlags.SAMPLED), - is_remote=False, - ) - mock_parent_span.get_span_context.return_value = mock_parent_context - mock_get_current_span.return_value = mock_parent_span - - mock_tool_span = MagicMock(spec=ToolSpan) - mock_tool_span.span = MagicMock(spec=Span) - mock_instance = mock_tracing_core.get_instance.return_value - mock_instance.create_span.return_value = mock_tool_span - - # Create a decorated function that uses trace.get_current_span() - @tool(name="test_tool", tool_type="search") - def test_function(arg1, arg2=None): - current_span = trace.get_current_span() - return f"{arg1}:{arg2}:{current_span}" - - # Mock trace.get_current_span inside the function to return our tool span - with patch("opentelemetry.trace.get_current_span", side_effect=[mock_parent_span, mock_tool_span.span]): - # Call and test - result = test_function("test1", "test2") - - # Verify that TracingCore was called correctly - mock_instance.create_span.assert_called_once_with( - kind="tool", name="test_tool", parent=mock_parent_span, attributes={}, immediate_export=True, tool_type="search" - ) - - # Verify the span was started - mock_tool_span.start.assert_called_once() - - # Result should include the mock_span - assert "test1:test2:" in result - assert str(mock_tool_span.span) in result - - # Test set_input and set_output - mock_tool_span.set_input.assert_called_once() - mock_tool_span.set_output.assert_called_once() - # Test when no parent span is found - mock_get_current_span.return_value = None - result = test_function("test1", "test2") - assert result == "test1:test2:None" +from agentops.sdk.decorators.agentops import agent, operation, session +from agentops.semconv import SpanKind + + +class TestSpanNesting: + """Tests for proper nesting of spans in the tracing hierarchy.""" + + def test_operation_nests_under_agent(self, instrumentation): + """Test that operation spans are properly nested under their agent spans.""" + + # Define the test agent with nested operations + @agent + class NestedAgent: + def __init__(self): + pass # No logic needed + + @operation + def nested_operation(self, message): + """Nested operation that should appear as a child of the agent""" + return f"Processed: {message}" + + @operation + def main_operation(self): + """Main operation that calls the nested operation""" + # Call the nested operation + result = self.nested_operation("test message") + return result + + # Test session with the agent + @session + def test_session(): + agent = NestedAgent() + return agent.main_operation() + + # Run the test with our instrumentor + result = test_session() + + # Verify the result + assert result == "Processed: test message" + + # Get all spans from the instrumentation + spans = instrumentation.get_finished_spans() + + # Verify we have the expected number of spans (1 session + 1 agent + 2 operations) + assert len(spans) == 4 + + # Find the spans by their names + session_span = None + agent_span = None + main_op_span = None + nested_op_span = None + + for span in spans: + if span.name.endswith(f".{SpanKind.SESSION}"): + session_span = span + elif span.name.endswith(f".{SpanKind.AGENT}"): + agent_span = span + elif span.name == "main_operation.operation": + main_op_span = span + elif span.name == "nested_operation.operation": + nested_op_span = span + + # Verify all spans were found + assert session_span is not None, "Session span not found" + assert agent_span is not None, "Agent span not found" + assert main_op_span is not None, "Main operation span not found" + assert nested_op_span is not None, "Nested operation span not found" + + # Verify the hierarchy: + # 1. Session span is the root + # 2. Agent span is a child of the session span + # 3. Main operation span is a child of the agent span + # 4. Nested operation span is a child of the agent span + + # Check parent-child relationships using span's context + # ReadableSpan doesn't have parent_span_id attribute directly, + # use parent_span_id from span context or attributes + assert session_span.parent is None, "Session span should not have a parent" + + # Agent should be a child of session + assert agent_span.parent is not None + assert agent_span.parent.span_id == session_span.context.span_id + + # Main operation should be a child of agent + assert main_op_span.parent is not None + assert main_op_span.parent.span_id == agent_span.context.span_id + + # Nested operation should be a child of its immediate caller (main operation) + assert nested_op_span.parent is not None + assert nested_op_span.parent.span_id == main_op_span.context.span_id + + # All spans should have the same trace ID + trace_id = session_span.context.trace_id + assert agent_span.context.trace_id == trace_id + assert main_op_span.context.trace_id == trace_id + assert nested_op_span.context.trace_id == trace_id + + # Check proper span nesting timing (a parent's time range should contain its children) + assert session_span.start_time <= agent_span.start_time + assert agent_span.end_time <= session_span.end_time + + assert agent_span.start_time <= main_op_span.start_time + assert main_op_span.end_time <= agent_span.end_time + + assert main_op_span.start_time <= nested_op_span.start_time + assert nested_op_span.end_time <= main_op_span.end_time + + # Verify span attributes for proper classification + assert session_span.attributes.get("agentops.span.kind") == SpanKind.SESSION + assert agent_span.attributes.get("agentops.span.kind") == SpanKind.AGENT + assert main_op_span.attributes.get("agentops.span.kind") == SpanKind.OPERATION + assert nested_op_span.attributes.get("agentops.span.kind") == SpanKind.OPERATION + + + def test_nested_operations_maintain_proper_hierarchy(self, instrumentation): + """Test that deeply nested operations maintain the correct parent-child hierarchy.""" + + @agent + class DeepNestedAgent: + def __init__(self): + pass + + @operation + def level3_operation(self, message): + """Deepest level operation (level 3)""" + return f"L3: {message}" + + @operation + def level2_operation(self, message): + """Level 2 operation that calls level 3""" + result = self.level3_operation(f"{message} → L3") + return f"L2: {result}" + + @operation + def level1_operation(self, message): + """Level 1 operation that calls level 2""" + result = self.level2_operation(f"{message} → L2") + return f"L1: {result}" + + @operation + def root_operation(self): + """Root operation that starts the chain""" + result = self.level1_operation("start") + return result + + # Run the test + @session + def deep_test_session(): + agent = DeepNestedAgent() + return agent.root_operation() + + result = deep_test_session() + + # Verify result + assert result == "L1: L2: L3: start → L2 → L3" + + # Get spans + spans = instrumentation.get_finished_spans() + + # Expect 6 spans (session + agent + 4 operations) + assert len(spans) == 6 + + # Extract spans by name + spans_by_name = {} + + # We need to find the spans more carefully since they might have different formats + session_span = None + agent_span = None + root_op_span = None + l1_op_span = None + l2_op_span = None + l3_op_span = None + + for span in spans: + if span.attributes.get("agentops.span.kind") == SpanKind.SESSION: + session_span = span + elif span.attributes.get("agentops.span.kind") == SpanKind.AGENT: + agent_span = span + elif span.name == "root_operation.operation": + root_op_span = span + elif span.name == "level1_operation.operation": + l1_op_span = span + elif span.name == "level2_operation.operation": + l2_op_span = span + elif span.name == "level3_operation.operation": + l3_op_span = span + + assert session_span is not None, "Session span not found" + assert agent_span is not None, "Agent span not found" + assert root_op_span is not None, "Root operation span not found" + assert l1_op_span is not None, "Level 1 operation span not found" + assert l2_op_span is not None, "Level 2 operation span not found" + assert l3_op_span is not None, "Level 3 operation span not found" + + # Check the chain of parents + assert session_span.parent is None + assert agent_span.parent is not None + assert agent_span.parent.span_id == session_span.context.span_id + assert root_op_span.parent is not None + assert root_op_span.parent.span_id == agent_span.context.span_id + assert l1_op_span.parent is not None + assert l1_op_span.parent.span_id == root_op_span.context.span_id + assert l2_op_span.parent is not None + assert l2_op_span.parent.span_id == l1_op_span.context.span_id + assert l3_op_span.parent is not None + assert l3_op_span.parent.span_id == l2_op_span.context.span_id + + # Same trace ID for all spans + trace_id = session_span.context.trace_id + all_spans = [session_span, agent_span, root_op_span, l1_op_span, l2_op_span, l3_op_span] + for span in all_spans: + assert span.context.trace_id == trace_id From c5963aa0a80870461cfadd2e6aa6049c50cdd2cc Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 00:52:58 +0200 Subject: [PATCH 20/45] cleanup tests/unit/test_decorators.py (old) Signed-off-by: Teo --- tests/unit/test_decorators.py | 135 ---------------------------------- 1 file changed, 135 deletions(-) delete mode 100644 tests/unit/test_decorators.py diff --git a/tests/unit/test_decorators.py b/tests/unit/test_decorators.py deleted file mode 100644 index e9f7942d7..000000000 --- a/tests/unit/test_decorators.py +++ /dev/null @@ -1,135 +0,0 @@ -import pytest -from collections import namedtuple -from typing import Tuple -import json - -from agentops.decorators import record_action, record_tool -from agentops.client import Client -from agentops.session import Session -from agentops.event import ActionEvent -from agentops.helpers import filter_unjsonable - - -class TestDecorators: - # Test data - Point = namedtuple("Point", ["x", "y"]) - Person = namedtuple("Person", ["name", "age"]) - # Custom namedtuple to test specific subclass behavior mentioned in PR - CustomTuple = namedtuple("CustomTuple", ["data"]) - - @pytest.fixture(autouse=True) - def setup(self): - """Reset client state before each test""" - Client._instance = None - Client().configure(api_key="test_key") - - @staticmethod - @record_action("test_regular_tuple") - def function_with_regular_tuple() -> Tuple[int, str]: - return (1, "test") - - @staticmethod - @record_action("test_named_tuple") - def function_with_named_tuple() -> Point: - return TestDecorators.Point(x=1, y=2) - - @staticmethod - @record_action("test_multiple_named_tuples") - def function_with_multiple_named_tuples() -> Tuple[Point, Person]: - return (TestDecorators.Point(x=1, y=2), TestDecorators.Person(name="John", age=30)) - - @staticmethod - @record_action("test_custom_tuple") - def function_with_custom_tuple() -> CustomTuple: - """Test case for PR #608 where code checks for specific tuple subclass""" - return TestDecorators.CustomTuple(data={"key": "value"}) - - @staticmethod - @record_tool("test_tool_tuple") - def tool_with_tuple() -> Tuple[int, str]: - return (1, "test") - - @staticmethod - @record_tool("test_tool_named_tuple") - def tool_with_named_tuple() -> Point: - return TestDecorators.Point(x=1, y=2) - - def test_type_preservation(self): - """Test that tuple types are preserved after PR #608 changes. - These tests verify that the decorator no longer modifies return types.""" - # Regular tuple - result = self.function_with_regular_tuple() - assert isinstance(result, tuple), "Regular tuples should be preserved" - assert result == (1, "test") - - # Named tuple - result = self.function_with_named_tuple() - assert isinstance(result, self.Point), "Named tuples should be preserved" - assert result.x == 1 - assert result.y == 2 - - # Multiple named tuples - result = self.function_with_multiple_named_tuples() - assert isinstance(result, tuple), "Tuple of named tuples should be preserved" - assert isinstance(result[0], self.Point) - assert isinstance(result[1], self.Person) - assert result[0].x == 1 - assert result[1].name == "John" - - # Custom tuple subclass (specific to PR #608 issue) - result = self.function_with_custom_tuple() - assert isinstance(result, self.CustomTuple), "Custom tuple subclass should be preserved" - assert result.data == {"key": "value"} - - # Tool returns - tool_result = self.tool_with_tuple() - assert isinstance(tool_result, tuple), "Tool tuples should be preserved" - assert tool_result == (1, "test") - - tool_named_result = self.tool_with_named_tuple() - assert isinstance(tool_named_result, self.Point), "Tool named tuples should be preserved" - assert tool_named_result.x == 1 - assert tool_named_result.y == 2 - - def test_json_serialization(self): - """Test that events can be properly serialized with tuples. - This demonstrates @teocns's point that JSON serialization works fine with tuples, - as they are naturally converted to lists during JSON serialization.""" - config = Client()._config - session = Session(session_id="test_session", config=config) - - # Test with regular tuple - direct_tuple = (1, "test") - event1 = ActionEvent(action_type="test_action", params={"test": "params"}, returns=direct_tuple) - event1_dict = filter_unjsonable(event1.__dict__) - event1_json = json.dumps(event1_dict) - assert event1_json, "Event with tuple returns should be JSON serializable" - - # Verify the serialized data structure - event1_data = json.loads(event1_json) - assert isinstance(event1_data["returns"], list), "JSON naturally converts tuples to lists" - assert event1_data["returns"] == [1, "test"], "Tuple data should be preserved in JSON" - - # Test with named tuple - named_tuple = self.Point(x=1, y=2) - event2 = ActionEvent(action_type="test_action", params={"test": "params"}, returns=named_tuple) - event2_dict = filter_unjsonable(event2.__dict__) - event2_json = json.dumps(event2_dict) - assert event2_json, "Event with named tuple returns should be JSON serializable" - - # Verify the serialized data structure - event2_data = json.loads(event2_json) - assert isinstance(event2_data["returns"], list), "JSON naturally converts named tuples to lists" - assert event2_data["returns"] == [1, 2], "Named tuple data should be preserved in JSON" - - # Test with custom tuple subclass - custom_tuple = self.CustomTuple(data={"key": "value"}) - event3 = ActionEvent(action_type="test_action", params={"test": "params"}, returns=custom_tuple) - event3_dict = filter_unjsonable(event3.__dict__) - event3_json = json.dumps(event3_dict) - assert event3_json, "Event with custom tuple subclass should be JSON serializable" - - # Verify the serialized data structure - event3_data = json.loads(event3_json) - assert isinstance(event3_data["returns"], list), "JSON naturally converts custom tuples to lists" - assert event3_data["returns"] == [{"key": "value"}], "Custom tuple data should be preserved in JSON" From 024d347251fba84e76387a23d9579b113b8ae3ee Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 01:13:08 +0200 Subject: [PATCH 21/45] test_decorators: more precise testing Signed-off-by: Teo --- tests/unit/sdk/test_decorators.py | 214 ++++++------------------------ 1 file changed, 38 insertions(+), 176 deletions(-) diff --git a/tests/unit/sdk/test_decorators.py b/tests/unit/sdk/test_decorators.py index 3d413f02f..953d9ecda 100644 --- a/tests/unit/sdk/test_decorators.py +++ b/tests/unit/sdk/test_decorators.py @@ -1,215 +1,77 @@ +from typing import TYPE_CHECKING + import pytest from opentelemetry import trace from agentops.sdk.decorators.agentops import agent, operation, session from agentops.semconv import SpanKind +from agentops.semconv.span_attributes import SpanAttributes +from tests.unit.sdk.instrumentation_tester import InstrumentationTester class TestSpanNesting: """Tests for proper nesting of spans in the tracing hierarchy.""" - def test_operation_nests_under_agent(self, instrumentation): + def test_operation_nests_under_agent(self, instrumentation: InstrumentationTester): """Test that operation spans are properly nested under their agent spans.""" - + # Define the test agent with nested operations @agent class NestedAgent: def __init__(self): pass # No logic needed - + @operation def nested_operation(self, message): """Nested operation that should appear as a child of the agent""" return f"Processed: {message}" - + @operation def main_operation(self): """Main operation that calls the nested operation""" # Call the nested operation result = self.nested_operation("test message") return result - + # Test session with the agent @session def test_session(): agent = NestedAgent() return agent.main_operation() - + # Run the test with our instrumentor result = test_session() - + + instrumentation.get_finished_spans() + # Verify the result assert result == "Processed: test message" - - # Get all spans from the instrumentation + + # Get all spans captured during the test spans = instrumentation.get_finished_spans() - - # Verify we have the expected number of spans (1 session + 1 agent + 2 operations) + + # We should have 3 spans: session, agent, and two operations assert len(spans) == 4 - - # Find the spans by their names - session_span = None - agent_span = None - main_op_span = None - nested_op_span = None - - for span in spans: - if span.name.endswith(f".{SpanKind.SESSION}"): - session_span = span - elif span.name.endswith(f".{SpanKind.AGENT}"): - agent_span = span - elif span.name == "main_operation.operation": - main_op_span = span - elif span.name == "nested_operation.operation": - nested_op_span = span - - # Verify all spans were found - assert session_span is not None, "Session span not found" - assert agent_span is not None, "Agent span not found" - assert main_op_span is not None, "Main operation span not found" - assert nested_op_span is not None, "Nested operation span not found" - - # Verify the hierarchy: - # 1. Session span is the root - # 2. Agent span is a child of the session span - # 3. Main operation span is a child of the agent span - # 4. Nested operation span is a child of the agent span - - # Check parent-child relationships using span's context - # ReadableSpan doesn't have parent_span_id attribute directly, - # use parent_span_id from span context or attributes - assert session_span.parent is None, "Session span should not have a parent" - - # Agent should be a child of session - assert agent_span.parent is not None - assert agent_span.parent.span_id == session_span.context.span_id - - # Main operation should be a child of agent - assert main_op_span.parent is not None - assert main_op_span.parent.span_id == agent_span.context.span_id - - # Nested operation should be a child of its immediate caller (main operation) - assert nested_op_span.parent is not None - assert nested_op_span.parent.span_id == main_op_span.context.span_id - - # All spans should have the same trace ID - trace_id = session_span.context.trace_id - assert agent_span.context.trace_id == trace_id - assert main_op_span.context.trace_id == trace_id - assert nested_op_span.context.trace_id == trace_id - - # Check proper span nesting timing (a parent's time range should contain its children) - assert session_span.start_time <= agent_span.start_time - assert agent_span.end_time <= session_span.end_time - - assert agent_span.start_time <= main_op_span.start_time - assert main_op_span.end_time <= agent_span.end_time - - assert main_op_span.start_time <= nested_op_span.start_time - assert nested_op_span.end_time <= main_op_span.end_time - - # Verify span attributes for proper classification - assert session_span.attributes.get("agentops.span.kind") == SpanKind.SESSION - assert agent_span.attributes.get("agentops.span.kind") == SpanKind.AGENT - assert main_op_span.attributes.get("agentops.span.kind") == SpanKind.OPERATION - assert nested_op_span.attributes.get("agentops.span.kind") == SpanKind.OPERATION - - - def test_nested_operations_maintain_proper_hierarchy(self, instrumentation): - """Test that deeply nested operations maintain the correct parent-child hierarchy.""" - - @agent - class DeepNestedAgent: - def __init__(self): - pass - - @operation - def level3_operation(self, message): - """Deepest level operation (level 3)""" - return f"L3: {message}" - - @operation - def level2_operation(self, message): - """Level 2 operation that calls level 3""" - result = self.level3_operation(f"{message} → L3") - return f"L2: {result}" - - @operation - def level1_operation(self, message): - """Level 1 operation that calls level 2""" - result = self.level2_operation(f"{message} → L2") - return f"L1: {result}" - - @operation - def root_operation(self): - """Root operation that starts the chain""" - result = self.level1_operation("start") - return result - - # Run the test - @session - def deep_test_session(): - agent = DeepNestedAgent() - return agent.root_operation() - - result = deep_test_session() - - # Verify result - assert result == "L1: L2: L3: start → L2 → L3" - - # Get spans - spans = instrumentation.get_finished_spans() - - # Expect 6 spans (session + agent + 4 operations) - assert len(spans) == 6 - - # Extract spans by name - spans_by_name = {} - - # We need to find the spans more carefully since they might have different formats - session_span = None - agent_span = None - root_op_span = None - l1_op_span = None - l2_op_span = None - l3_op_span = None - - for span in spans: - if span.attributes.get("agentops.span.kind") == SpanKind.SESSION: - session_span = span - elif span.attributes.get("agentops.span.kind") == SpanKind.AGENT: - agent_span = span - elif span.name == "root_operation.operation": - root_op_span = span - elif span.name == "level1_operation.operation": - l1_op_span = span - elif span.name == "level2_operation.operation": - l2_op_span = span - elif span.name == "level3_operation.operation": - l3_op_span = span - - assert session_span is not None, "Session span not found" - assert agent_span is not None, "Agent span not found" - assert root_op_span is not None, "Root operation span not found" - assert l1_op_span is not None, "Level 1 operation span not found" - assert l2_op_span is not None, "Level 2 operation span not found" - assert l3_op_span is not None, "Level 3 operation span not found" - - # Check the chain of parents + + # Verify span kinds + session_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.SESSION] + agent_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.AGENT] + operation_spans = [s for s in spans if s.attributes.get( + SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.OPERATION] + + assert len(session_spans) == 1 + assert len(agent_spans) == 1 + assert len(operation_spans) == 2 + + # Verify span hierarchy by checking parent-child relationships + # The session span should be the root + session_span = session_spans[0] assert session_span.parent is None - assert agent_span.parent is not None + + # The agent span should be a child of the session span + agent_span = agent_spans[0] assert agent_span.parent.span_id == session_span.context.span_id - assert root_op_span.parent is not None - assert root_op_span.parent.span_id == agent_span.context.span_id - assert l1_op_span.parent is not None - assert l1_op_span.parent.span_id == root_op_span.context.span_id - assert l2_op_span.parent is not None - assert l2_op_span.parent.span_id == l1_op_span.context.span_id - assert l3_op_span.parent is not None - assert l3_op_span.parent.span_id == l2_op_span.context.span_id - - # Same trace ID for all spans - trace_id = session_span.context.trace_id - all_spans = [session_span, agent_span, root_op_span, l1_op_span, l2_op_span, l3_op_span] - for span in all_spans: - assert span.context.trace_id == trace_id + + # The operation spans should be children of the agent span + for op_span in operation_spans: + assert op_span.parent.span_id == agent_span.context.span_id From d2de1b90977a04a31c532fafbea7c0fa9d99957a Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 01:25:45 +0200 Subject: [PATCH 22/45] refactor decorators Signed-off-by: Teo --- agentops/helpers/validation.py | 7 + agentops/legacy/__init__.py | 1 - agentops/sdk/__init__.py | 8 +- agentops/sdk/commands.py | 2 +- agentops/sdk/converters.py | 13 +- agentops/sdk/decorators/__init__.py | 102 +++++++- agentops/sdk/decorators/agentops.py | 221 ------------------ agentops/sdk/decorators/context.py | 37 --- agentops/sdk/decorators/utility.py | 155 +++++++++++++ agentops/sdk/decorators/wrappers.py | 144 ++++++++++++ agentops/sdk/tracing/utility.py | 346 ---------------------------- examples/sdk/basic.py | 2 +- 12 files changed, 421 insertions(+), 617 deletions(-) create mode 100644 agentops/helpers/validation.py delete mode 100644 agentops/sdk/decorators/agentops.py delete mode 100644 agentops/sdk/decorators/context.py create mode 100644 agentops/sdk/decorators/utility.py create mode 100644 agentops/sdk/decorators/wrappers.py delete mode 100644 agentops/sdk/tracing/utility.py diff --git a/agentops/helpers/validation.py b/agentops/helpers/validation.py new file mode 100644 index 000000000..2a0c219cf --- /dev/null +++ b/agentops/helpers/validation.py @@ -0,0 +1,7 @@ +from typing import Any + + +def is_coroutine_or_generator(fn: Any) -> bool: + """Check if a function is asynchronous (coroutine or async generator)""" + import inspect + return inspect.iscoroutinefunction(fn) or inspect.isasyncgenfunction(fn) diff --git a/agentops/legacy/__init__.py b/agentops/legacy/__init__.py index 58a505d01..aadbf204e 100644 --- a/agentops/legacy/__init__.py +++ b/agentops/legacy/__init__.py @@ -54,7 +54,6 @@ def end_session(span, token) -> None: Args: span: The span returned by start_session - token: The token returned by start_session """ end_span(span, token) diff --git a/agentops/sdk/__init__.py b/agentops/sdk/__init__.py index c22970b08..024f1109f 100644 --- a/agentops/sdk/__init__.py +++ b/agentops/sdk/__init__.py @@ -7,13 +7,11 @@ # Import command functions from agentops.sdk.commands import end_span, record, start_span - # Import core components from agentops.sdk.core import TracingCore - # Import decorators -from agentops.sdk.decorators.agentops import agent, operation, record as record_decorator, session - +from agentops.sdk.decorators import agent +from agentops.sdk.decorators import task as operation # from agentops.sdk.traced import TracedObject # Merged into TracedObject from agentops.sdk.types import TracingConfig @@ -25,9 +23,7 @@ "TracingCore", "TracingConfig", # Decorators - "session", "operation", - "record_decorator", "agent", # Command functions "start_span", diff --git a/agentops/sdk/commands.py b/agentops/sdk/commands.py index e0542f4a4..9d9d263e0 100644 --- a/agentops/sdk/commands.py +++ b/agentops/sdk/commands.py @@ -15,7 +15,7 @@ from agentops.exceptions import AgentOpsClientNotInitializedException from agentops.sdk.core import TracingCore -from agentops.sdk.tracing.utility import _finalize_span, _make_span +from agentops.sdk.decorators.utility import _finalize_span, _make_span from agentops.semconv.span_attributes import SpanAttributes from agentops.semconv.span_kinds import SpanKind diff --git a/agentops/sdk/converters.py b/agentops/sdk/converters.py index 867e55ac3..fee21e257 100644 --- a/agentops/sdk/converters.py +++ b/agentops/sdk/converters.py @@ -2,11 +2,12 @@ Legacy helpers that were being used throughout the SDK """ -from opentelemetry.util.types import Attributes, AttributeValue +import uuid from datetime import datetime, timezone from typing import Optional from uuid import UUID -import uuid + +from opentelemetry.util.types import Attributes, AttributeValue def ns_to_iso(ns_time: Optional[int]) -> Optional[str]: @@ -115,3 +116,11 @@ def int_to_uuid(integer): # Return as UUID object return uuid.UUID(uuid_str) + + +def camel_to_snake(text: str) -> str: + """Convert CamelCase class names to snake_case format""" + import re + + text = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", text) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", text).lower() diff --git a/agentops/sdk/decorators/__init__.py b/agentops/sdk/decorators/__init__.py index 63f392938..0adf485e9 100644 --- a/agentops/sdk/decorators/__init__.py +++ b/agentops/sdk/decorators/__init__.py @@ -1,3 +1,101 @@ -from .agentops import session, agent, operation, record -__all__ = ["session", "agent", "operation", "record"] +""" +Decorators for instrumenting code with AgentOps. + +This module provides a simplified set of decorators for instrumenting functions +and methods with appropriate span kinds. Decorators can be used with or without parentheses. +""" +import inspect +from typing import (Any, Callable, Optional, Type, TypeVar, Union, cast, + overload) + +import wrapt + +from agentops.sdk.decorators.wrappers import wrap_class, wrap_method +from agentops.semconv.span_kinds import SpanKind + +# Type variables for better type hinting +F = TypeVar("F", bound=Callable[..., Any]) +C = TypeVar("C", bound=Type) + + +def task( + name: Optional[str] = None, + version: Optional[int] = None, + method_name: Optional[str] = None, + entity_kind=SpanKind.TASK, +): + if method_name is None: + return wrap_method(name=name, version=version, entity_kind=entity_kind) + else: + return wrap_class( + name=name, + version=version, + method_name=method_name, + entity_kind=entity_kind, + ) + + +def workflow( + name: Optional[str] = None, + version: Optional[int] = None, + method_name: Optional[str] = None, + entity_kind=SpanKind.WORKFLOW, +): + if method_name is None: + return wrap_method(name=name, version=version, entity_kind=entity_kind) + else: + return wrap_class( + name=name, + version=version, + method_name=method_name, + entity_kind=entity_kind, + ) + + +def agent( + name: Optional[str] = None, + version: Optional[int] = None, + method_name: Optional[str] = None, +): + if method_name is None: + return wrap_method(name=name, version=version, entity_kind=SpanKind.AGENT) + else: + return wrap_class( + name=name, + version=version, + method_name=method_name, + entity_kind=SpanKind.AGENT, + ) + + +def tool( + name: Optional[str] = None, + version: Optional[int] = None, + method_name: Optional[str] = None, +): + return task( + name=name, + version=version, + method_name=method_name, + entity_kind=SpanKind.TOOL, + ) + + +def session( + name: Optional[str] = None, + version: Optional[int] = None, + method_name: Optional[str] = None, +): + if method_name is None: + return wrap_method(name=name, version=version, entity_kind=SpanKind.SESSION) + else: + return wrap_class( + name=name, + version=version, + method_name=method_name, + entity_kind=SpanKind.SESSION, + ) + + +operation = task \ No newline at end of file diff --git a/agentops/sdk/decorators/agentops.py b/agentops/sdk/decorators/agentops.py deleted file mode 100644 index 93c5a2e36..000000000 --- a/agentops/sdk/decorators/agentops.py +++ /dev/null @@ -1,221 +0,0 @@ -""" -Decorators for instrumenting code with AgentOps. - -This module provides a simplified set of decorators for instrumenting functions -and methods with appropriate span kinds. Decorators can be used with or without parentheses. -""" - -import inspect -from typing import (Any, Callable, Optional, Type, TypeVar, Union, cast, - overload) - -import wrapt - -from agentops.sdk.tracing.utility import instrument_class, instrument_operation -from agentops.semconv.span_kinds import SpanKind - -# Type variables for better type hinting -F = TypeVar("F", bound=Callable[..., Any]) -C = TypeVar("C", bound=Type) - - -def _create_decorator(span_kind: str): - """ - Factory function that creates a universal decorator that can be applied to - both functions and class methods. - - Args: - span_kind: The span kind to use for the decorator - - Returns: - A universal decorator function - """ - - @wrapt.decorator - def universal_wrapper(wrapped, instance, args, kwargs): - # First parameter might be the method name if called as decorator factory - if len(args) > 0 and isinstance(args[0], str) and instance is None and inspect.isclass(wrapped): - # Being used as a class decorator with the first argument as method_name - method_name = args[0] - name = kwargs.get("name") - version = kwargs.get("version") - - # Create and return a class decorator - return instrument_class(method_name=method_name, name=name, version=version, span_kind=span_kind)(wrapped) - else: - # Being used as a normal function/method decorator - return wrapped(*args, **kwargs) - - # We need to handle optional parameters for the decorator - def decorator_factory(*args, **kwargs): - name = kwargs.pop("name", None) - version = kwargs.pop("version", None) - - if len(args) == 1 and callable(args[0]) and not kwargs: - # Called as @decorator without parentheses - return instrument_operation(span_kind=span_kind)(args[0]) - else: - # Called as @decorator() or @decorator(name="name") - return lambda wrapped: instrument_operation(span_kind=span_kind, name=name, version=version)(wrapped) - - return decorator_factory - - -def _create_decorator_specifiable(default_span_kind: Optional[str] = None): - """ - Factory function that creates a universal decorator that allows specifying the span kind. - - Args: - default_span_kind: The default span kind to use if none is specified - - Returns: - A universal decorator function that accepts span_kind - """ - - def decorator_factory(*args, **kwargs): - span_kind = kwargs.pop("span_kind", default_span_kind) - name = kwargs.pop("name", None) - version = kwargs.pop("version", None) - - if len(args) == 1 and callable(args[0]) and not kwargs: - # Called as @decorator without parentheses - return instrument_operation(span_kind=span_kind)(args[0]) - elif len(args) == 1 and isinstance(args[0], str) and "method_name" not in kwargs: - # Handle the class decorator case where the first arg is method_name - method_name = args[0] - - def class_decorator(cls): - return instrument_class(method_name=method_name, name=name, version=version, span_kind=span_kind)(cls) - - return class_decorator - else: - # Called as @decorator() or @decorator(name="name") - return lambda wrapped: instrument_operation(span_kind=span_kind, name=name, version=version)(wrapped) - - return decorator_factory - - -# Create the universal decorators -session = _create_decorator(SpanKind.SESSION) -session.__doc__ = """ - Universal decorator for instrumenting functions or class methods as a session operation. - - Can be used in multiple ways: - - 1. On a function: - @session - def function(): ... - - @session(name="custom_name") - def function(): ... - - 2. On a class to instrument a specific method: - @session("method_name") - class MyClass: ... - - @session("method_name", name="custom_name") - class MyClass: ... - - Args: - method_name: When decorating a class, the name of the method to instrument - name: Optional custom name for the operation (defaults to function name) - version: Optional version identifier for the operation - - Returns: - Decorated function or class -""" - -agent = _create_decorator(SpanKind.AGENT) -agent.__doc__ = """ - Universal decorator for instrumenting functions or class methods as an agent operation. - - Can be used in multiple ways: - - 1. On a function: - @agent - def function(): ... - - @agent(name="custom_name") - def function(): ... - - 2. On a class to instrument a specific method: - @agent("method_name") - class MyClass: ... - - @agent("method_name", name="custom_name") - class MyClass: ... - - Args: - method_name: When decorating a class, the name of the method to instrument - name: Optional custom name for the operation (defaults to function name) - version: Optional version identifier for the operation - - Returns: - Decorated function or class -""" - -operation = _create_decorator(SpanKind.OPERATION) -operation.__doc__ = """ - Universal decorator for instrumenting functions or class methods as an operation. - - This is a general-purpose decorator for tracking operations that don't fit - into the specific categories of session or agent. - - Can be used in multiple ways: - - 1. On a function: - @operation - def function(): ... - - @operation(name="custom_name") - def function(): ... - - 2. On a class to instrument a specific method: - @operation("method_name") - class MyClass: ... - - @operation("method_name", name="custom_name") - class MyClass: ... - - By default, this uses the OPERATION span kind. - - Args: - method_name: When decorating a class, the name of the method to instrument - name: Optional custom name for the operation (defaults to function name) - version: Optional version identifier for the operation - - Returns: - Decorated function or class -""" - -record = _create_decorator_specifiable() -record.__doc__ = """ - Universal decorator for instrumenting functions or class methods with a specific span kind. - - Use this when you need control over which specific span kind to use. - - Can be used in multiple ways: - - 1. On a function: - @record(span_kind=SpanKind.TOOL) - def function(): ... - - @record(span_kind=SpanKind.LLM_CALL, name="custom_name") - def function(): ... - - 2. On a class to instrument a specific method: - @record("method_name", span_kind=SpanKind.TOOL) - class MyClass: ... - - @record("method_name", span_kind=SpanKind.LLM_CALL, name="custom_name") - class MyClass: ... - - Args: - method_name: When decorating a class, the name of the method to instrument - span_kind: The specific SpanKind to use for this operation - name: Optional custom name for the operation (defaults to function name) - version: Optional version identifier for the operation - - Returns: - Decorated function or class -""" diff --git a/agentops/sdk/decorators/context.py b/agentops/sdk/decorators/context.py deleted file mode 100644 index 9e2d8e154..000000000 --- a/agentops/sdk/decorators/context.py +++ /dev/null @@ -1,37 +0,0 @@ -# TODO: Move me or find better module name - -import contextlib -from typing import Any, Dict, Optional - -from agentops.sdk.commands import end_session, start_session - - -@contextlib.contextmanager -def session_context( - name: str = "session_context", attributes: Optional[Dict[str, Any]] = None, version: Optional[int] = None -): - """ - Context manager for an AgentOps session. - - This provides a convenient way to create a session span that automatically - ends when the context exits. - - Args: - name: Name of the session - attributes: Optional attributes to set on the session span - version: Optional version identifier for the session - - Example: - ```python - # Use as a context manager - with agentops.session_context("my_session"): - # Operations within this block will be part of the session - # ... - # Session automatically ends when the context exits - ``` - """ - span, token = start_session(name, attributes, version) - try: - yield - finally: - end_session(span, token) diff --git a/agentops/sdk/decorators/utility.py b/agentops/sdk/decorators/utility.py new file mode 100644 index 000000000..0ed4fe136 --- /dev/null +++ b/agentops/sdk/decorators/utility.py @@ -0,0 +1,155 @@ +import inspect +import os +import types +import warnings +from functools import wraps +from typing import Any, Dict, Optional + +from opentelemetry import context as context_api +from opentelemetry import trace +from opentelemetry.context import attach, set_value + +from agentops.helpers.serialization import safe_serialize +from agentops.logging import logger +from agentops.sdk.core import TracingCore +from agentops.semconv import SpanKind +from agentops.semconv.span_attributes import SpanAttributes + +""" +!! NOTE !! +References to SpanKind, span_kind, etc. are NOT destined towards `span.kind`, +but instead used as an `agentops.semconv.span_attributes.AGENTOPS_SPAN_KIND` +""" + + +def set_workflow_name(workflow_name: str) -> None: + attach(set_value("workflow_name", workflow_name)) + + +def set_entity_path(entity_path: str) -> None: + attach(set_value("entity_path", entity_path)) + +# Helper functions for content management +def _check_content_size(content_json: str) -> bool: + """Verify that a JSON string is within acceptable size limits (1MB)""" + return len(content_json) < 1_000_000 + +def _process_sync_generator(span: trace.Span, generator: types.GeneratorType): + """Process a synchronous generator and manage its span lifecycle""" + # Ensure span context is attached to the generator context + context_api.attach(trace.set_span_in_context(span)) + + # Yield from the generator while maintaining span context + yield from generator + + # End the span when generator is exhausted + span.end() + # No detach because of OpenTelemetry issue #2606 + # Context will be detached during garbage collection + + +async def _process_async_generator(span: trace.Span, context_token: Any, generator: types.AsyncGeneratorType): + """Process an asynchronous generator and manage its span lifecycle""" + try: + async for item in generator: + yield item + finally: + # Always ensure span is ended and context detached + span.end() + context_api.detach(context_token) + +def _make_span( + operation_name: str, span_kind: str, version: Optional[int] = None, attributes: Dict[str, Any] = {} +) -> tuple: + """ + Create and initialize a new instrumentation span with proper context. + + This function: + - Creates a span with proper naming convention ({operation_name}.{span_kind}) + - Gets the current context to establish parent-child relationships + - Creates the span with the current context + - Sets up a new context with the span + - Attaches the context + - Adds standard attributes to the span + + Args: + operation_name: Name of the operation being traced + span_kind: Type of operation (from SpanKind) + version: Optional version identifier for the operation + attributes: Optional dictionary of attributes to set on the span + + Returns: + A tuple of (span, context, token) for span management + """ + # Set session-level information for specified operation types + if span_kind in [SpanKind.SESSION, SpanKind.AGENT]: + # Nesting logic would go here + pass + + # Create span with proper naming convention + span_name = f"{operation_name}.{span_kind}" + + # Get tracer and create span + tracer = TracingCore.get_instance().get_tracer() + + # Get current context to establish parent-child relationship + current_context = context_api.get_current() + + attributes.update( + { + SpanAttributes.AGENTOPS_SPAN_KIND: span_kind, + } + ) + + # Create span with current context to maintain parent-child relationship + span = tracer.start_span(span_name, context=current_context, attributes=attributes) + + # Set up context + context = trace.set_span_in_context(span) + token = context_api.attach(context) + + # Add standard attributes + # FIXME: Use SpanAttributes + span.set_attribute("agentops.operation.name", operation_name) + if version is not None: + span.set_attribute("agentops.operation.version", version) + + # Set attributes during creation + if attributes: + for key, value in attributes.items(): + span.set_attribute(key, value) + + return span, context, token + + +def _record_entity_input(span: trace.Span, args: tuple, kwargs: Dict[str, Any]) -> None: + """Record operation input parameters to span if content tracing is enabled""" + try: + input_data = {"args": args, "kwargs": kwargs} + json_data = safe_serialize(input_data) + + if _check_content_size(json_data): + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_INPUT, json_data) + else: + logger.debug("Operation input exceeds size limit, not recording") + except Exception as err: + logger.warning(f"Failed to serialize operation input: {err}") + + +def _record_entity_output(span: trace.Span, result: Any) -> None: + """Record operation output value to span if content tracing is enabled""" + try: + json_data = safe_serialize(result) + + if _check_content_size(json_data): + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_OUTPUT, json_data) + else: + logger.debug("Operation output exceeds size limit, not recording") + except Exception as err: + logger.warning(f"Failed to serialize operation output: {err}") + + +def _finalize_span(span: trace.Span, token: Any) -> None: + """End the span and detach the context token""" + span.end() + context_api.detach(token) diff --git a/agentops/sdk/decorators/wrappers.py b/agentops/sdk/decorators/wrappers.py new file mode 100644 index 000000000..95f97d1aa --- /dev/null +++ b/agentops/sdk/decorators/wrappers.py @@ -0,0 +1,144 @@ + +import inspect +import os +import types +import warnings +from functools import wraps +from typing import Any, Dict, Optional + +from opentelemetry import context as context_api +from opentelemetry import trace + +from agentops.helpers.serialization import safe_serialize +from agentops.helpers.validation import is_coroutine_or_generator +from agentops.logging import logger +from agentops.sdk.converters import camel_to_snake +from agentops.sdk.core import TracingCore +from agentops.semconv import SpanKind +from agentops.semconv.span_attributes import SpanAttributes + +from .utility import (_finalize_span, _make_span, _process_async_generator, + _process_sync_generator, _record_entity_input, + _record_entity_output) + + +def wrap_method( + entity_kind: str = SpanKind.OPERATION, + name: Optional[str] = None, + version: Optional[int] = None, +): + """ + Decorator to instrument a function or method with OpenTelemetry tracing. + Works with both synchronous and asynchronous functions. + + Args: + entity_kind: The type of operation being performed + name: Custom name for the operation (defaults to function name) + version: Optional version identifier for the operation + """ + + def decorator(fn): + is_async = is_coroutine_or_generator(fn) + operation_name = name or fn.__name__ + # Use default entity_kind if None is provided + nonlocal entity_kind + entity_kind = entity_kind or SpanKind.OPERATION # noqa: F823 + + if is_async: + + @wraps(fn) + async def async_wrapper(*args, **kwargs): + # Skip instrumentation if tracer not initialized + if not TracingCore.get_instance()._initialized: + return await fn(*args, **kwargs) + + # Create and configure span + span, ctx, token = _make_span(operation_name, entity_kind, version) + + # Record function inputs + _record_entity_input(span, args, kwargs) + + # Execute the function + result = fn(*args, **kwargs) + + # Handle async generators + if isinstance(result, types.AsyncGeneratorType): + return _process_async_generator(span, token, result) + + # Handle coroutines + result = await result + + # Record function outputs + _record_entity_output(span, result) + + # Clean up + _finalize_span(span, token) + return result + + return async_wrapper + else: + + @wraps(fn) + def sync_wrapper(*args, **kwargs): + # Skip instrumentation if tracer not initialized + if not TracingCore.get_instance()._initialized: + return fn(*args, **kwargs) + + # Create and configure span + span, ctx, token = _make_span(operation_name, entity_kind, version) + + # Record function inputs + _record_entity_input(span, args, kwargs) + + # Execute the function + result = fn(*args, **kwargs) + + # Handle generators + if isinstance(result, types.GeneratorType): + return _process_sync_generator(span, result) + + # Record function outputs + _record_entity_output(span, result) + + # Clean up + _finalize_span(span, token) + return result + + return sync_wrapper + + return decorator + +def wrap_class( + method_name: str, + name: Optional[str] = None, + version: Optional[int] = None, + entity_kind: Optional[str] = SpanKind.OPERATION, +): + """ + Decorator to instrument a specific method on a class. + + Args: + method_name: The name of the method to instrument + name: Custom name for the operation (defaults to snake_case class name) + version: Optional version identifier + entity_kind: The type of operation being performed + """ + + def decorator(cls): + # Derive operation name from class name if not provided + operation_name = name if name else camel_to_snake(cls.__name__) + + # Get the target method from the class + target_method = getattr(cls, method_name) + + # Create an instrumented version of the method + instrumented_method = wrap_method(entity_kind=entity_kind, name=operation_name, version=version)( + target_method + ) + + # Replace the original method with the instrumented version + setattr(cls, method_name, instrumented_method) + + return cls + + return decorator diff --git a/agentops/sdk/tracing/utility.py b/agentops/sdk/tracing/utility.py deleted file mode 100644 index 30b2dbf66..000000000 --- a/agentops/sdk/tracing/utility.py +++ /dev/null @@ -1,346 +0,0 @@ -import inspect -import os -import types -import warnings -from functools import wraps -from typing import Any, Dict, Optional - -from opentelemetry import context as context_api -from opentelemetry import trace - -from agentops.helpers.serialization import safe_serialize -from agentops.logging import logger -from agentops.sdk.core import TracingCore -from agentops.semconv import SpanKind -from agentops.semconv.span_attributes import SpanAttributes - -""" -!! NOTE !! -References to SpanKind, span_kind, etc. are NOT destined towards `span.kind`, -but instead used as an `agentops.semconv.span_attributes.AGENTOPS_SPAN_KIND` -""" - - -# Helper functions for content management -def _check_content_size(content_json: str) -> bool: - """Verify that a JSON string is within acceptable size limits (1MB)""" - return len(content_json) < 1_000_000 - - -def _should_trace_content() -> bool: - """Determine if content tracing is enabled based on environment or context""" - env_setting = os.getenv("AGENTOPS_TRACE_CONTENT", "true").lower() == "true" - context_override = bool(context_api.get_value("override_enable_content_tracing")) - return env_setting or context_override - - -# Legacy async decorators - Marked for deprecation - - -def aentity_method( - span_kind: Optional[str] = SpanKind.OPERATION, - name: Optional[str] = None, - version: Optional[int] = None, -): - warnings.warn( - "DeprecationWarning: The @aentity_method decorator is deprecated. " - "Please use @instrument_operation for both sync and async methods.", - DeprecationWarning, - stacklevel=2, - ) - - return instrument_operation( - span_kind=span_kind, - name=name, - version=version, - ) - - -def aentity_class( - method_name: str, - name: Optional[str] = None, - version: Optional[int] = None, - span_kind: Optional[str] = SpanKind.OPERATION, -): - warnings.warn( - "DeprecationWarning: The @aentity_class decorator is deprecated. " - "Please use @instrument_class for both sync and async classes.", - DeprecationWarning, - stacklevel=2, - ) - - return instrument_class( - method_name=method_name, - name=name, - version=version, - span_kind=span_kind, - ) - - -# Function analysis helpers - - -def _is_coroutine_or_generator(fn: Any) -> bool: - """Check if a function is asynchronous (coroutine or async generator)""" - return inspect.iscoroutinefunction(fn) or inspect.isasyncgenfunction(fn) - - -def _convert_camel_to_snake(text: str) -> str: - """Convert CamelCase class names to snake_case format""" - import re - - text = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", text) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", text).lower() - - -# Generator handling - - -def _process_sync_generator(span: trace.Span, generator: types.GeneratorType): - """Process a synchronous generator and manage its span lifecycle""" - # Ensure span context is attached to the generator context - context_api.attach(trace.set_span_in_context(span)) - - # Yield from the generator while maintaining span context - yield from generator - - # End the span when generator is exhausted - span.end() - # No detach because of OpenTelemetry issue #2606 - # Context will be detached during garbage collection - - -async def _process_async_generator(span: trace.Span, context_token: Any, generator: types.AsyncGeneratorType): - """Process an asynchronous generator and manage its span lifecycle""" - try: - async for item in generator: - yield item - finally: - # Always ensure span is ended and context detached - span.end() - context_api.detach(context_token) - - -# Span creation and management - - -def _make_span( - operation_name: str, span_kind: str, version: Optional[int] = None, attributes: Dict[str, Any] = {} -) -> tuple: - """ - Create and initialize a new instrumentation span with proper context. - - This function: - - Creates a span with proper naming convention ({operation_name}.{span_kind}) - - Gets the current context to establish parent-child relationships - - Creates the span with the current context - - Sets up a new context with the span - - Attaches the context - - Adds standard attributes to the span - - Args: - operation_name: Name of the operation being traced - span_kind: Type of operation (from SpanKind) - version: Optional version identifier for the operation - attributes: Optional dictionary of attributes to set on the span - - Returns: - A tuple of (span, context, token) for span management - """ - # Set session-level information for specified operation types - if span_kind in [SpanKind.SESSION, SpanKind.AGENT]: - # Session tracking logic would go here - pass - - # Create span with proper naming convention - span_name = f"{operation_name}.{span_kind}" - - # Get tracer and create span - tracer = TracingCore.get_instance().get_tracer() - - # Get current context to establish parent-child relationship - current_context = context_api.get_current() - - attributes.update( - { - SpanAttributes.AGENTOPS_SPAN_KIND: span_kind, - } - ) - - # Create span with current context to maintain parent-child relationship - span = tracer.start_span(span_name, context=current_context, attributes=attributes) - - # Set up context - context = trace.set_span_in_context(span) - token = context_api.attach(context) - - # Add standard attributes - # FIXME: Use SpanAttributes - span.set_attribute("agentops.operation.name", operation_name) - if version is not None: - span.set_attribute("agentops.operation.version", version) - - # Set attributes during creation - if attributes: - for key, value in attributes.items(): - span.set_attribute(key, value) - - return span, context, token - - -def _record_operation_input(span: trace.Span, args: tuple, kwargs: Dict[str, Any]) -> None: - """Record operation input parameters to span if content tracing is enabled""" - try: - if _should_trace_content(): - input_data = {"args": args, "kwargs": kwargs} - json_data = safe_serialize(input_data) - - if _check_content_size(json_data): - span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_INPUT, json_data) - else: - logger.debug("Operation input exceeds size limit, not recording") - except Exception as err: - logger.warning(f"Failed to serialize operation input: {err}") - - -def _record_operation_output(span: trace.Span, result: Any) -> None: - """Record operation output value to span if content tracing is enabled""" - try: - if _should_trace_content(): - json_data = safe_serialize(result) - - if _check_content_size(json_data): - span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_OUTPUT, json_data) - else: - logger.debug("Operation output exceeds size limit, not recording") - except Exception as err: - logger.warning(f"Failed to serialize operation output: {err}") - - -def _finalize_span(span: trace.Span, token: Any) -> None: - """End the span and detach the context token""" - span.end() - context_api.detach(token) - - -def instrument_operation( - span_kind: str = SpanKind.OPERATION, - name: Optional[str] = None, - version: Optional[int] = None, -): - """ - Decorator to instrument a function or method with OpenTelemetry tracing. - Works with both synchronous and asynchronous functions. - - Args: - span_kind: The type of operation being performed - name: Custom name for the operation (defaults to function name) - version: Optional version identifier for the operation - """ - - def decorator(fn): - is_async = _is_coroutine_or_generator(fn) - operation_name = name or fn.__name__ - # Use default span_kind if None is provided - nonlocal span_kind - span_kind = span_kind or SpanKind.OPERATION # noqa: F823 - - if is_async: - - @wraps(fn) - async def async_wrapper(*args, **kwargs): - # Skip instrumentation if tracer not initialized - if not TracingCore.get_instance()._initialized: - return await fn(*args, **kwargs) - - # Create and configure span - span, ctx, token = _make_span(operation_name, span_kind, version) - - # Record function inputs - _record_operation_input(span, args, kwargs) - - # Execute the function - result = fn(*args, **kwargs) - - # Handle async generators - if isinstance(result, types.AsyncGeneratorType): - return _process_async_generator(span, token, result) - - # Handle coroutines - result = await result - - # Record function outputs - _record_operation_output(span, result) - - # Clean up - _finalize_span(span, token) - return result - - return async_wrapper - else: - - @wraps(fn) - def sync_wrapper(*args, **kwargs): - # Skip instrumentation if tracer not initialized - if not TracingCore.get_instance()._initialized: - return fn(*args, **kwargs) - - # Create and configure span - span, ctx, token = _make_span(operation_name, span_kind, version) - - # Record function inputs - _record_operation_input(span, args, kwargs) - - # Execute the function - result = fn(*args, **kwargs) - - # Handle generators - if isinstance(result, types.GeneratorType): - return _process_sync_generator(span, result) - - # Record function outputs - _record_operation_output(span, result) - - # Clean up - _finalize_span(span, token) - return result - - return sync_wrapper - - return decorator - - -def instrument_class( - method_name: str, - name: Optional[str] = None, - version: Optional[int] = None, - span_kind: Optional[str] = SpanKind.OPERATION, -): - """ - Decorator to instrument a specific method on a class. - - Args: - method_name: The name of the method to instrument - name: Custom name for the operation (defaults to snake_case class name) - version: Optional version identifier - span_kind: The type of operation being performed - """ - - def decorator(cls): - # Derive operation name from class name if not provided - operation_name = name if name else _convert_camel_to_snake(cls.__name__) - - # Get the target method from the class - target_method = getattr(cls, method_name) - - # Create an instrumented version of the method - instrumented_method = instrument_operation(span_kind=span_kind, name=operation_name, version=version)( - target_method - ) - - # Replace the original method with the instrumented version - setattr(cls, method_name, instrumented_method) - - return cls - - return decorator diff --git a/examples/sdk/basic.py b/examples/sdk/basic.py index 412466b32..c8dcb879b 100644 --- a/examples/sdk/basic.py +++ b/examples/sdk/basic.py @@ -1,5 +1,5 @@ import agentops -from agentops.sdk.decorators.agentops import agent, operation, record, session +from agentops.sdk.decorators import agent, operation, session agentops.init() From afc7a8db386b65624410d632d131cc56a1c0d8bb Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 01:37:52 +0200 Subject: [PATCH 23/45] semconv.span_kinds: +workflow Signed-off-by: Teo --- agentops/sdk/decorators/__init__.py | 108 ++++----------------- agentops/sdk/decorators/factory.py | 89 +++++++++++++++++ agentops/sdk/decorators/wrappers.py | 144 ---------------------------- agentops/semconv/span_kinds.py | 1 + 4 files changed, 106 insertions(+), 236 deletions(-) create mode 100644 agentops/sdk/decorators/factory.py delete mode 100644 agentops/sdk/decorators/wrappers.py diff --git a/agentops/sdk/decorators/__init__.py b/agentops/sdk/decorators/__init__.py index 0adf485e9..4aa1d6e96 100644 --- a/agentops/sdk/decorators/__init__.py +++ b/agentops/sdk/decorators/__init__.py @@ -1,101 +1,25 @@ - """ Decorators for instrumenting code with AgentOps. This module provides a simplified set of decorators for instrumenting functions and methods with appropriate span kinds. Decorators can be used with or without parentheses. """ -import inspect -from typing import (Any, Callable, Optional, Type, TypeVar, Union, cast, - overload) - -import wrapt -from agentops.sdk.decorators.wrappers import wrap_class, wrap_method +from agentops.sdk.decorators.factory import create_entity_decorator from agentops.semconv.span_kinds import SpanKind -# Type variables for better type hinting -F = TypeVar("F", bound=Callable[..., Any]) -C = TypeVar("C", bound=Type) - - -def task( - name: Optional[str] = None, - version: Optional[int] = None, - method_name: Optional[str] = None, - entity_kind=SpanKind.TASK, -): - if method_name is None: - return wrap_method(name=name, version=version, entity_kind=entity_kind) - else: - return wrap_class( - name=name, - version=version, - method_name=method_name, - entity_kind=entity_kind, - ) - - -def workflow( - name: Optional[str] = None, - version: Optional[int] = None, - method_name: Optional[str] = None, - entity_kind=SpanKind.WORKFLOW, -): - if method_name is None: - return wrap_method(name=name, version=version, entity_kind=entity_kind) - else: - return wrap_class( - name=name, - version=version, - method_name=method_name, - entity_kind=entity_kind, - ) - - -def agent( - name: Optional[str] = None, - version: Optional[int] = None, - method_name: Optional[str] = None, -): - if method_name is None: - return wrap_method(name=name, version=version, entity_kind=SpanKind.AGENT) - else: - return wrap_class( - name=name, - version=version, - method_name=method_name, - entity_kind=SpanKind.AGENT, - ) - - -def tool( - name: Optional[str] = None, - version: Optional[int] = None, - method_name: Optional[str] = None, -): - return task( - name=name, - version=version, - method_name=method_name, - entity_kind=SpanKind.TOOL, - ) - - -def session( - name: Optional[str] = None, - version: Optional[int] = None, - method_name: Optional[str] = None, -): - if method_name is None: - return wrap_method(name=name, version=version, entity_kind=SpanKind.SESSION) - else: - return wrap_class( - name=name, - version=version, - method_name=method_name, - entity_kind=SpanKind.SESSION, - ) - - -operation = task \ No newline at end of file +# Create decorators for specific entity types using the factory +agent = create_entity_decorator(SpanKind.AGENT) +task = create_entity_decorator(SpanKind.TASK) +workflow = create_entity_decorator(SpanKind.WORKFLOW) +session = create_entity_decorator(SpanKind.SESSION) +operation = task + +__all__ = [ + 'agent', + 'task', + 'workflow', + 'session', +] + +# Create decorators task, workflow, session, agent diff --git a/agentops/sdk/decorators/factory.py b/agentops/sdk/decorators/factory.py new file mode 100644 index 000000000..752e2da1d --- /dev/null +++ b/agentops/sdk/decorators/factory.py @@ -0,0 +1,89 @@ +import inspect +import types +import functools + +import wrapt + +from agentops.logging import logger +from agentops.sdk.core import TracingCore + +from .utility import (_finalize_span, _make_span, _process_async_generator, + _process_sync_generator, _record_entity_input, + _record_entity_output) + + +def create_entity_decorator(entity_kind: str): + """ + Factory function that creates decorators for specific entity kinds. + + Args: + entity_kind: The type of operation being performed (SpanKind.*) + + Returns: + A decorator with optional arguments for name and version + """ + def decorator(wrapped=None, *, name=None, version=None): + # Handle case where decorator is called with parameters + if wrapped is None: + return functools.partial(decorator, name=name, version=version) + + # Create the actual decorator wrapper function + @wrapt.decorator + def wrapper(wrapped, instance, args, kwargs): + # Skip instrumentation if tracer not initialized + if not TracingCore.get_instance()._initialized: + return wrapped(*args, **kwargs) + + # Use provided name or function name + operation_name = name or wrapped.__name__ + + # Create and configure span + span, ctx, token = _make_span(operation_name, entity_kind, version) + + try: + # Record function inputs - safely handle potential serialization issues + try: + _record_entity_input(span, args, kwargs) + except Exception as e: + logger.warning(f"Failed to record entity input: {e}") + + # Execute the function and handle result based on its type + result = wrapped(*args, **kwargs) + + if isinstance(result, types.GeneratorType): + return _process_sync_generator(span, result) + elif isinstance(result, types.AsyncGeneratorType): + return _process_async_generator(span, token, result) + elif inspect.iscoroutine(result): + # For async functions, we need to create a wrapper that awaits the result + async def _awaited_result(): + try: + awaited_result = await result + try: + _record_entity_output(span, awaited_result) + except Exception as e: + logger.warning(f"Failed to record entity output: {e}") + return awaited_result + finally: + _finalize_span(span, token) + + return _awaited_result() + else: + # Handle regular return values + try: + _record_entity_output(span, result) + except Exception as e: + logger.warning(f"Failed to record entity output: {e}") + _finalize_span(span, token) + return result + except Exception as e: + # Ensure span is properly ended even if there's an exception + span.record_exception(e) + _finalize_span(span, token) + raise + + return wrapper(wrapped) + + return decorator + + diff --git a/agentops/sdk/decorators/wrappers.py b/agentops/sdk/decorators/wrappers.py deleted file mode 100644 index 95f97d1aa..000000000 --- a/agentops/sdk/decorators/wrappers.py +++ /dev/null @@ -1,144 +0,0 @@ - -import inspect -import os -import types -import warnings -from functools import wraps -from typing import Any, Dict, Optional - -from opentelemetry import context as context_api -from opentelemetry import trace - -from agentops.helpers.serialization import safe_serialize -from agentops.helpers.validation import is_coroutine_or_generator -from agentops.logging import logger -from agentops.sdk.converters import camel_to_snake -from agentops.sdk.core import TracingCore -from agentops.semconv import SpanKind -from agentops.semconv.span_attributes import SpanAttributes - -from .utility import (_finalize_span, _make_span, _process_async_generator, - _process_sync_generator, _record_entity_input, - _record_entity_output) - - -def wrap_method( - entity_kind: str = SpanKind.OPERATION, - name: Optional[str] = None, - version: Optional[int] = None, -): - """ - Decorator to instrument a function or method with OpenTelemetry tracing. - Works with both synchronous and asynchronous functions. - - Args: - entity_kind: The type of operation being performed - name: Custom name for the operation (defaults to function name) - version: Optional version identifier for the operation - """ - - def decorator(fn): - is_async = is_coroutine_or_generator(fn) - operation_name = name or fn.__name__ - # Use default entity_kind if None is provided - nonlocal entity_kind - entity_kind = entity_kind or SpanKind.OPERATION # noqa: F823 - - if is_async: - - @wraps(fn) - async def async_wrapper(*args, **kwargs): - # Skip instrumentation if tracer not initialized - if not TracingCore.get_instance()._initialized: - return await fn(*args, **kwargs) - - # Create and configure span - span, ctx, token = _make_span(operation_name, entity_kind, version) - - # Record function inputs - _record_entity_input(span, args, kwargs) - - # Execute the function - result = fn(*args, **kwargs) - - # Handle async generators - if isinstance(result, types.AsyncGeneratorType): - return _process_async_generator(span, token, result) - - # Handle coroutines - result = await result - - # Record function outputs - _record_entity_output(span, result) - - # Clean up - _finalize_span(span, token) - return result - - return async_wrapper - else: - - @wraps(fn) - def sync_wrapper(*args, **kwargs): - # Skip instrumentation if tracer not initialized - if not TracingCore.get_instance()._initialized: - return fn(*args, **kwargs) - - # Create and configure span - span, ctx, token = _make_span(operation_name, entity_kind, version) - - # Record function inputs - _record_entity_input(span, args, kwargs) - - # Execute the function - result = fn(*args, **kwargs) - - # Handle generators - if isinstance(result, types.GeneratorType): - return _process_sync_generator(span, result) - - # Record function outputs - _record_entity_output(span, result) - - # Clean up - _finalize_span(span, token) - return result - - return sync_wrapper - - return decorator - -def wrap_class( - method_name: str, - name: Optional[str] = None, - version: Optional[int] = None, - entity_kind: Optional[str] = SpanKind.OPERATION, -): - """ - Decorator to instrument a specific method on a class. - - Args: - method_name: The name of the method to instrument - name: Custom name for the operation (defaults to snake_case class name) - version: Optional version identifier - entity_kind: The type of operation being performed - """ - - def decorator(cls): - # Derive operation name from class name if not provided - operation_name = name if name else camel_to_snake(cls.__name__) - - # Get the target method from the class - target_method = getattr(cls, method_name) - - # Create an instrumented version of the method - instrumented_method = wrap_method(entity_kind=entity_kind, name=operation_name, version=version)( - target_method - ) - - # Replace the original method with the instrumented version - setattr(cls, method_name, instrumented_method) - - return cls - - return decorator diff --git a/agentops/semconv/span_kinds.py b/agentops/semconv/span_kinds.py index 9b3e89753..190afacbe 100644 --- a/agentops/semconv/span_kinds.py +++ b/agentops/semconv/span_kinds.py @@ -16,6 +16,7 @@ class SpanKind: # Workflow kinds WORKFLOW_STEP = "workflow.step" # Step in a workflow + WORKFLOW = 'workflow' SESSION = "session" TASK = "task" OPERATION = "operation" From 2bf8cf3653df616ff08a21663d377453287fa08a Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 02:15:29 +0200 Subject: [PATCH 24/45] suppress wrapper linter Signed-off-by: Teo --- agentops/sdk/decorators/factory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agentops/sdk/decorators/factory.py b/agentops/sdk/decorators/factory.py index 752e2da1d..b88eb181e 100644 --- a/agentops/sdk/decorators/factory.py +++ b/agentops/sdk/decorators/factory.py @@ -82,7 +82,7 @@ async def _awaited_result(): _finalize_span(span, token) raise - return wrapper(wrapped) + return wrapper(wrapped) # type: ignore return decorator From 07d28ea27f29a2cbac714b31125f5f4276335845 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 02:20:12 +0200 Subject: [PATCH 25/45] test_decorators: adapt to refactor Signed-off-by: Teo --- tests/unit/sdk/test_decorators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/sdk/test_decorators.py b/tests/unit/sdk/test_decorators.py index 953d9ecda..11aa07d8d 100644 --- a/tests/unit/sdk/test_decorators.py +++ b/tests/unit/sdk/test_decorators.py @@ -3,7 +3,7 @@ import pytest from opentelemetry import trace -from agentops.sdk.decorators.agentops import agent, operation, session +from agentops.sdk.decorators import agent, operation, session from agentops.semconv import SpanKind from agentops.semconv.span_attributes import SpanAttributes from tests.unit.sdk.instrumentation_tester import InstrumentationTester From 7c573741b222a3178ae1c18b445e06e830bc55a4 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 02:22:07 +0200 Subject: [PATCH 26/45] operation decorator Signed-off-by: Teo --- agentops/sdk/decorators/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/agentops/sdk/decorators/__init__.py b/agentops/sdk/decorators/__init__.py index 4aa1d6e96..a7ffad8a9 100644 --- a/agentops/sdk/decorators/__init__.py +++ b/agentops/sdk/decorators/__init__.py @@ -11,6 +11,7 @@ # Create decorators for specific entity types using the factory agent = create_entity_decorator(SpanKind.AGENT) task = create_entity_decorator(SpanKind.TASK) +operation = create_entity_decorator(SpanKind.OPERATION) workflow = create_entity_decorator(SpanKind.WORKFLOW) session = create_entity_decorator(SpanKind.SESSION) operation = task @@ -20,6 +21,7 @@ 'task', 'workflow', 'session', + 'operation' ] # Create decorators task, workflow, session, agent From 75dfd685d3031dfd7a8cf66aa650025913b96ba3 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 02:22:36 +0200 Subject: [PATCH 27/45] utility-rebase Signed-off-by: Teo --- agentops/sdk/decorators/utility.py | 32 ++++++++++-------------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/agentops/sdk/decorators/utility.py b/agentops/sdk/decorators/utility.py index 0ed4fe136..ad383f1de 100644 --- a/agentops/sdk/decorators/utility.py +++ b/agentops/sdk/decorators/utility.py @@ -64,13 +64,8 @@ def _make_span( """ Create and initialize a new instrumentation span with proper context. - This function: - - Creates a span with proper naming convention ({operation_name}.{span_kind}) - - Gets the current context to establish parent-child relationships - - Creates the span with the current context - - Sets up a new context with the span - - Attaches the context - - Adds standard attributes to the span + This function creates a span that will automatically be nested properly + within any parent span based on the current execution context. Args: operation_name: Name of the operation being traced @@ -81,40 +76,33 @@ def _make_span( Returns: A tuple of (span, context, token) for span management """ - # Set session-level information for specified operation types - if span_kind in [SpanKind.SESSION, SpanKind.AGENT]: - # Nesting logic would go here - pass - # Create span with proper naming convention span_name = f"{operation_name}.{span_kind}" - # Get tracer and create span + # Get tracer tracer = TracingCore.get_instance().get_tracer() - # Get current context to establish parent-child relationship + # Get current context - this automatically maintains the parent-child relationship current_context = context_api.get_current() - attributes.update( - { - SpanAttributes.AGENTOPS_SPAN_KIND: span_kind, - } - ) + # Add span kind to attributes + attributes.update({ + SpanAttributes.AGENTOPS_SPAN_KIND: span_kind, + }) # Create span with current context to maintain parent-child relationship span = tracer.start_span(span_name, context=current_context, attributes=attributes) - # Set up context + # Create a new context with this span and attach it context = trace.set_span_in_context(span) token = context_api.attach(context) # Add standard attributes - # FIXME: Use SpanAttributes span.set_attribute("agentops.operation.name", operation_name) if version is not None: span.set_attribute("agentops.operation.version", version) - # Set attributes during creation + # Set additional attributes if attributes: for key, value in attributes.items(): span.set_attribute(key, value) From 363ae37755045cdd388674bcc462da2b6fe8c972 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 02:58:38 +0200 Subject: [PATCH 28/45] threadlocal context mgmt Signed-off-by: Teo --- agentops/sdk/decorators/factory.py | 125 ++++++++++++++++++++++++++++- agentops/sdk/decorators/utility.py | 24 +++--- 2 files changed, 133 insertions(+), 16 deletions(-) diff --git a/agentops/sdk/decorators/factory.py b/agentops/sdk/decorators/factory.py index b88eb181e..804e45a9b 100644 --- a/agentops/sdk/decorators/factory.py +++ b/agentops/sdk/decorators/factory.py @@ -1,6 +1,8 @@ import inspect import types import functools +import threading +from typing import Dict, Any, Optional import wrapt @@ -12,6 +14,17 @@ _record_entity_output) +# Thread-local storage for active spans by entity kind +class SpanContext(threading.local): + def __init__(self): + # Initialize thread-local storage + self.active_spans: Dict[str, Any] = {} + self.agent_instances: Dict[int, Any] = {} + +# Thread-local span context store +_span_ctx = SpanContext() + + def create_entity_decorator(entity_kind: str): """ Factory function that creates decorators for specific entity kinds. @@ -27,6 +40,76 @@ def decorator(wrapped=None, *, name=None, version=None): if wrapped is None: return functools.partial(decorator, name=name, version=version) + # Special handling for class decorator + if inspect.isclass(wrapped): + # Store original __init__ method + original_init = wrapped.__init__ + + # Define a new __init__ that sets up the agent span + @functools.wraps(original_init) + def init_wrapper(self, *args, **kwargs): + # Skip instrumentation if tracer not initialized + if not TracingCore.get_instance()._initialized: + return original_init(self, *args, **kwargs) + + # Use class name as operation name if not provided + operation_name = name or wrapped.__name__ + + # Get current context (likely session context if in a session) + current_ctx = None + + # Create span + span, ctx, token = _make_span(operation_name, entity_kind, version, parent_context=current_ctx) + + # Store agent context for this instance + instance_id = id(self) + _span_ctx.agent_instances[instance_id] = { + 'span': span, + 'ctx': ctx, + 'token': token + } + + # Record input + try: + _record_entity_input(span, args, kwargs) + except Exception as e: + logger.warning(f"Failed to record entity input: {e}") + + # Call original __init__ + result = original_init(self, *args, **kwargs) + + # Record output (instance) + try: + _record_entity_output(span, self) + except Exception as e: + logger.warning(f"Failed to record entity output: {e}") + + return result + + # Replace __init__ with our wrapper + wrapped.__init__ = init_wrapper + + # Store original __del__ or create one if it doesn't exist + original_del = getattr(wrapped, '__del__', None) + + def del_wrapper(self): + # Clean up agent span when instance is deleted + instance_id = id(self) + + if instance_id in _span_ctx.agent_instances: + agent_ctx = _span_ctx.agent_instances[instance_id] + _finalize_span(agent_ctx['span'], agent_ctx['token']) + del _span_ctx.agent_instances[instance_id] + + # Call original __del__ if it exists + if original_del is not None: + original_del(self) + + # Set the __del__ method + wrapped.__del__ = del_wrapper + + return wrapped + # Create the actual decorator wrapper function @wrapt.decorator def wrapper(wrapped, instance, args, kwargs): @@ -36,12 +119,27 @@ def wrapper(wrapped, instance, args, kwargs): # Use provided name or function name operation_name = name or wrapped.__name__ - + + # Find parent context - look for agent instance if this is a method + parent_ctx = None + if instance is not None and entity_kind != "agent": + instance_id = id(instance) + if instance_id in _span_ctx.agent_instances: + parent_ctx = _span_ctx.agent_instances[instance_id]['ctx'] + # Create and configure span - span, ctx, token = _make_span(operation_name, entity_kind, version) + span, ctx, token = _make_span(operation_name, entity_kind, version, parent_context=parent_ctx) + + # Store in current active spans + prev_span = _span_ctx.active_spans.get(entity_kind) + _span_ctx.active_spans[entity_kind] = { + 'span': span, + 'ctx': ctx, + 'token': token + } try: - # Record function inputs - safely handle potential serialization issues + # Record function inputs try: _record_entity_input(span, args, kwargs) except Exception as e: @@ -66,6 +164,11 @@ async def _awaited_result(): return awaited_result finally: _finalize_span(span, token) + # Restore previous span + if prev_span: + _span_ctx.active_spans[entity_kind] = prev_span + else: + _span_ctx.active_spans.pop(entity_kind, None) return _awaited_result() else: @@ -75,14 +178,28 @@ async def _awaited_result(): except Exception as e: logger.warning(f"Failed to record entity output: {e}") _finalize_span(span, token) + + # Restore previous span + if prev_span: + _span_ctx.active_spans[entity_kind] = prev_span + else: + _span_ctx.active_spans.pop(entity_kind, None) + return result except Exception as e: # Ensure span is properly ended even if there's an exception span.record_exception(e) _finalize_span(span, token) + + # Restore previous span + if prev_span: + _span_ctx.active_spans[entity_kind] = prev_span + else: + _span_ctx.active_spans.pop(entity_kind, None) + raise - return wrapper(wrapped) # type: ignore + return wrapper(wrapped) if not inspect.isclass(wrapped) else wrapped return decorator diff --git a/agentops/sdk/decorators/utility.py b/agentops/sdk/decorators/utility.py index ad383f1de..2c7b1c109 100644 --- a/agentops/sdk/decorators/utility.py +++ b/agentops/sdk/decorators/utility.py @@ -59,7 +59,8 @@ async def _process_async_generator(span: trace.Span, context_token: Any, generat context_api.detach(context_token) def _make_span( - operation_name: str, span_kind: str, version: Optional[int] = None, attributes: Dict[str, Any] = {} + operation_name: str, span_kind: str, version: Optional[int] = None, attributes: Dict[str, Any] = {}, + parent_context: Optional[Any] = None ) -> tuple: """ Create and initialize a new instrumentation span with proper context. @@ -72,6 +73,7 @@ def _make_span( span_kind: Type of operation (from SpanKind) version: Optional version identifier for the operation attributes: Optional dictionary of attributes to set on the span + parent_context: Optional explicit parent context to use instead of current context Returns: A tuple of (span, context, token) for span management @@ -82,16 +84,17 @@ def _make_span( # Get tracer tracer = TracingCore.get_instance().get_tracer() - # Get current context - this automatically maintains the parent-child relationship - current_context = context_api.get_current() + # Use provided parent context or get current context + current_context = parent_context if parent_context is not None else context_api.get_current() - # Add span kind to attributes - attributes.update({ - SpanAttributes.AGENTOPS_SPAN_KIND: span_kind, - }) + # Create a new attributes dictionary to avoid modifying the input + span_attributes = attributes.copy() + + # Add span kind to attributes using the correct attribute name + span_attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = span_kind # Create span with current context to maintain parent-child relationship - span = tracer.start_span(span_name, context=current_context, attributes=attributes) + span = tracer.start_span(span_name, context=current_context, attributes=span_attributes) # Create a new context with this span and attach it context = trace.set_span_in_context(span) @@ -102,10 +105,7 @@ def _make_span( if version is not None: span.set_attribute("agentops.operation.version", version) - # Set additional attributes - if attributes: - for key, value in attributes.items(): - span.set_attribute(key, value) + # No need to set attributes again, as we've already included them when creating the span return span, context, token From fe99181fa3256b1c575ecdbbe3daa531a675b400 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 03:01:42 +0200 Subject: [PATCH 29/45] test_decorators correction Signed-off-by: Teo --- tests/unit/sdk/test_decorators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/sdk/test_decorators.py b/tests/unit/sdk/test_decorators.py index 11aa07d8d..3c853de86 100644 --- a/tests/unit/sdk/test_decorators.py +++ b/tests/unit/sdk/test_decorators.py @@ -57,7 +57,7 @@ def test_session(): session_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.SESSION] agent_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.AGENT] operation_spans = [s for s in spans if s.attributes.get( - SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.OPERATION] + SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.TASK] assert len(session_spans) == 1 assert len(agent_spans) == 1 From 24f3b9b894cd0566c9f4e7b3ea7d94f018048cee Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 03:37:46 +0200 Subject: [PATCH 30/45] test multiple levels of nesting Signed-off-by: Teo --- tests/unit/sdk/test_decorators.py | 110 +++++++++++++++++++++++++++++- 1 file changed, 109 insertions(+), 1 deletion(-) diff --git a/tests/unit/sdk/test_decorators.py b/tests/unit/sdk/test_decorators.py index 3c853de86..3cdaf03b0 100644 --- a/tests/unit/sdk/test_decorators.py +++ b/tests/unit/sdk/test_decorators.py @@ -3,7 +3,7 @@ import pytest from opentelemetry import trace -from agentops.sdk.decorators import agent, operation, session +from agentops.sdk.decorators import agent, operation, session, workflow from agentops.semconv import SpanKind from agentops.semconv.span_attributes import SpanAttributes from tests.unit.sdk.instrumentation_tester import InstrumentationTester @@ -75,3 +75,111 @@ def test_session(): # The operation spans should be children of the agent span for op_span in operation_spans: assert op_span.parent.span_id == agent_span.context.span_id + + def test_multiple_nesting_levels(self, instrumentation: InstrumentationTester): + """Test multiple levels of span nesting with different decorator types.""" + + # Define a helper operation that will be the deepest in the chain + @operation + def helper_operation(value): + return f"Helper: {value}" + + # Define a deeply nested agent structure + @agent + class NestedAgent: + def __init__(self): + pass + + @operation + def level1_operation(self, value): + # First level of operation nesting + return self.level2_operation(value) + + @operation + def level2_operation(self, value): + # Second level of operation nesting + return self.level3_operation(value) + + @operation + def level3_operation(self, value): + # Third level of operation nesting that calls a standalone function + return helper_operation(value) + + # Create a workflow that uses the agent + @workflow + def test_workflow(): + agent = NestedAgent() + return agent.level1_operation("test_value") + + # Create a session that runs the workflow + @session + def test_session(): + return test_workflow() + + # Run the test + result = test_session() + + # Verify the result + assert result == "Helper: test_value" + + # Get all spans + spans = instrumentation.get_finished_spans() + + # Group spans by kind + session_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.SESSION] + workflow_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.WORKFLOW] + agent_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.AGENT] + operation_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.TASK] + + # Verify we have the correct number of spans for each type + assert len(session_spans) == 1, "Should have exactly one session span" + assert len(workflow_spans) == 1, "Should have exactly one workflow span" + assert len(agent_spans) == 1, "Should have exactly one agent span" + # One standalone helper operation + three operations in the agent + assert len(operation_spans) == 4, "Should have four operation spans" + + # Identify the spans by name for verification + session_span = session_spans[0] + workflow_span = workflow_spans[0] + agent_span = agent_spans[0] + + # Find operation spans by name + level1_span = next((s for s in operation_spans if s.name == "level1_operation.task"), None) + level2_span = next((s for s in operation_spans if s.name == "level2_operation.task"), None) + level3_span = next((s for s in operation_spans if s.name == "level3_operation.task"), None) + helper_span = next((s for s in operation_spans if s.name == "helper_operation.task"), None) + + assert level1_span is not None, "level1_operation span not found" + assert level2_span is not None, "level2_operation span not found" + assert level3_span is not None, "level3_operation span not found" + assert helper_span is not None, "helper_operation span not found" + + # Debug: Print spans and their parent IDs + print("\nVerifying span hierarchy:") + for span, name in [(session_span, "session"), (workflow_span, "workflow"), + (agent_span, "agent"), (level1_span, "level1"), + (level2_span, "level2"), (level3_span, "level3"), + (helper_span, "helper")]: + parent_id = span.parent.span_id if span.parent else "None" + span_id = span.context.span_id + print(f"{name:<8} - ID: {span_id}, Parent ID: {parent_id}") + + # Verify the current nesting behavior (will need to change later): + # session -> workflow -> agent -> (level1, level2, level3) + # level3 -> helper + + # Session is the root + assert session_span.parent is None + + # Workflow is a child of session + assert workflow_span.parent.span_id == session_span.context.span_id + + # Agent is a child of workflow + assert agent_span.parent.span_id == workflow_span.context.span_id + + # All agent operations are direct children of the agent + for op_span in [level1_span, level2_span, level3_span]: + assert op_span.parent.span_id == agent_span.context.span_id + + # Helper operation is a child of level3 since it's called from level3 + assert helper_span.parent.span_id == level3_span.context.span_id From 8f17cb8a011aed9e8bd05549be5f5367a7c9604e Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 03:38:09 +0200 Subject: [PATCH 31/45] Revert "threadlocal context mgmt" This reverts commit 363ae37755045cdd388674bcc462da2b6fe8c972. --- agentops/sdk/decorators/factory.py | 125 +---------------------------- agentops/sdk/decorators/utility.py | 24 +++--- 2 files changed, 16 insertions(+), 133 deletions(-) diff --git a/agentops/sdk/decorators/factory.py b/agentops/sdk/decorators/factory.py index 804e45a9b..b88eb181e 100644 --- a/agentops/sdk/decorators/factory.py +++ b/agentops/sdk/decorators/factory.py @@ -1,8 +1,6 @@ import inspect import types import functools -import threading -from typing import Dict, Any, Optional import wrapt @@ -14,17 +12,6 @@ _record_entity_output) -# Thread-local storage for active spans by entity kind -class SpanContext(threading.local): - def __init__(self): - # Initialize thread-local storage - self.active_spans: Dict[str, Any] = {} - self.agent_instances: Dict[int, Any] = {} - -# Thread-local span context store -_span_ctx = SpanContext() - - def create_entity_decorator(entity_kind: str): """ Factory function that creates decorators for specific entity kinds. @@ -40,76 +27,6 @@ def decorator(wrapped=None, *, name=None, version=None): if wrapped is None: return functools.partial(decorator, name=name, version=version) - # Special handling for class decorator - if inspect.isclass(wrapped): - # Store original __init__ method - original_init = wrapped.__init__ - - # Define a new __init__ that sets up the agent span - @functools.wraps(original_init) - def init_wrapper(self, *args, **kwargs): - # Skip instrumentation if tracer not initialized - if not TracingCore.get_instance()._initialized: - return original_init(self, *args, **kwargs) - - # Use class name as operation name if not provided - operation_name = name or wrapped.__name__ - - # Get current context (likely session context if in a session) - current_ctx = None - - # Create span - span, ctx, token = _make_span(operation_name, entity_kind, version, parent_context=current_ctx) - - # Store agent context for this instance - instance_id = id(self) - _span_ctx.agent_instances[instance_id] = { - 'span': span, - 'ctx': ctx, - 'token': token - } - - # Record input - try: - _record_entity_input(span, args, kwargs) - except Exception as e: - logger.warning(f"Failed to record entity input: {e}") - - # Call original __init__ - result = original_init(self, *args, **kwargs) - - # Record output (instance) - try: - _record_entity_output(span, self) - except Exception as e: - logger.warning(f"Failed to record entity output: {e}") - - return result - - # Replace __init__ with our wrapper - wrapped.__init__ = init_wrapper - - # Store original __del__ or create one if it doesn't exist - original_del = getattr(wrapped, '__del__', None) - - def del_wrapper(self): - # Clean up agent span when instance is deleted - instance_id = id(self) - - if instance_id in _span_ctx.agent_instances: - agent_ctx = _span_ctx.agent_instances[instance_id] - _finalize_span(agent_ctx['span'], agent_ctx['token']) - del _span_ctx.agent_instances[instance_id] - - # Call original __del__ if it exists - if original_del is not None: - original_del(self) - - # Set the __del__ method - wrapped.__del__ = del_wrapper - - return wrapped - # Create the actual decorator wrapper function @wrapt.decorator def wrapper(wrapped, instance, args, kwargs): @@ -119,27 +36,12 @@ def wrapper(wrapped, instance, args, kwargs): # Use provided name or function name operation_name = name or wrapped.__name__ - - # Find parent context - look for agent instance if this is a method - parent_ctx = None - if instance is not None and entity_kind != "agent": - instance_id = id(instance) - if instance_id in _span_ctx.agent_instances: - parent_ctx = _span_ctx.agent_instances[instance_id]['ctx'] - + # Create and configure span - span, ctx, token = _make_span(operation_name, entity_kind, version, parent_context=parent_ctx) - - # Store in current active spans - prev_span = _span_ctx.active_spans.get(entity_kind) - _span_ctx.active_spans[entity_kind] = { - 'span': span, - 'ctx': ctx, - 'token': token - } + span, ctx, token = _make_span(operation_name, entity_kind, version) try: - # Record function inputs + # Record function inputs - safely handle potential serialization issues try: _record_entity_input(span, args, kwargs) except Exception as e: @@ -164,11 +66,6 @@ async def _awaited_result(): return awaited_result finally: _finalize_span(span, token) - # Restore previous span - if prev_span: - _span_ctx.active_spans[entity_kind] = prev_span - else: - _span_ctx.active_spans.pop(entity_kind, None) return _awaited_result() else: @@ -178,28 +75,14 @@ async def _awaited_result(): except Exception as e: logger.warning(f"Failed to record entity output: {e}") _finalize_span(span, token) - - # Restore previous span - if prev_span: - _span_ctx.active_spans[entity_kind] = prev_span - else: - _span_ctx.active_spans.pop(entity_kind, None) - return result except Exception as e: # Ensure span is properly ended even if there's an exception span.record_exception(e) _finalize_span(span, token) - - # Restore previous span - if prev_span: - _span_ctx.active_spans[entity_kind] = prev_span - else: - _span_ctx.active_spans.pop(entity_kind, None) - raise - return wrapper(wrapped) if not inspect.isclass(wrapped) else wrapped + return wrapper(wrapped) # type: ignore return decorator diff --git a/agentops/sdk/decorators/utility.py b/agentops/sdk/decorators/utility.py index 2c7b1c109..ad383f1de 100644 --- a/agentops/sdk/decorators/utility.py +++ b/agentops/sdk/decorators/utility.py @@ -59,8 +59,7 @@ async def _process_async_generator(span: trace.Span, context_token: Any, generat context_api.detach(context_token) def _make_span( - operation_name: str, span_kind: str, version: Optional[int] = None, attributes: Dict[str, Any] = {}, - parent_context: Optional[Any] = None + operation_name: str, span_kind: str, version: Optional[int] = None, attributes: Dict[str, Any] = {} ) -> tuple: """ Create and initialize a new instrumentation span with proper context. @@ -73,7 +72,6 @@ def _make_span( span_kind: Type of operation (from SpanKind) version: Optional version identifier for the operation attributes: Optional dictionary of attributes to set on the span - parent_context: Optional explicit parent context to use instead of current context Returns: A tuple of (span, context, token) for span management @@ -84,17 +82,16 @@ def _make_span( # Get tracer tracer = TracingCore.get_instance().get_tracer() - # Use provided parent context or get current context - current_context = parent_context if parent_context is not None else context_api.get_current() + # Get current context - this automatically maintains the parent-child relationship + current_context = context_api.get_current() - # Create a new attributes dictionary to avoid modifying the input - span_attributes = attributes.copy() - - # Add span kind to attributes using the correct attribute name - span_attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = span_kind + # Add span kind to attributes + attributes.update({ + SpanAttributes.AGENTOPS_SPAN_KIND: span_kind, + }) # Create span with current context to maintain parent-child relationship - span = tracer.start_span(span_name, context=current_context, attributes=span_attributes) + span = tracer.start_span(span_name, context=current_context, attributes=attributes) # Create a new context with this span and attach it context = trace.set_span_in_context(span) @@ -105,7 +102,10 @@ def _make_span( if version is not None: span.set_attribute("agentops.operation.version", version) - # No need to set attributes again, as we've already included them when creating the span + # Set additional attributes + if attributes: + for key, value in attributes.items(): + span.set_attribute(key, value) return span, context, token From fbfdb99b3ae7336d5332014dcf353d20395b0c90 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 03:42:44 +0200 Subject: [PATCH 32/45] Implement create_as_current_span Signed-off-by: Teo --- agentops/sdk/decorators/factory.py | 96 +++++++++++++++++---------- agentops/sdk/decorators/utility.py | 102 ++++++++++++++++++++++------- 2 files changed, 138 insertions(+), 60 deletions(-) diff --git a/agentops/sdk/decorators/factory.py b/agentops/sdk/decorators/factory.py index b88eb181e..c06e81850 100644 --- a/agentops/sdk/decorators/factory.py +++ b/agentops/sdk/decorators/factory.py @@ -1,15 +1,16 @@ import inspect import types import functools +import asyncio import wrapt from agentops.logging import logger from agentops.sdk.core import TracingCore -from .utility import (_finalize_span, _make_span, _process_async_generator, - _process_sync_generator, _record_entity_input, - _record_entity_output) +from .utility import (_create_as_current_span, _finalize_span, _make_span, + _process_async_generator, _process_sync_generator, + _record_entity_input, _record_entity_output) def create_entity_decorator(entity_kind: str): @@ -37,50 +38,75 @@ def wrapper(wrapped, instance, args, kwargs): # Use provided name or function name operation_name = name or wrapped.__name__ - # Create and configure span - span, ctx, token = _make_span(operation_name, entity_kind, version) + # Handle different types of functions (sync, async, generators) + is_async = asyncio.iscoroutinefunction(wrapped) or inspect.iscoroutinefunction(wrapped) + is_generator = inspect.isgeneratorfunction(wrapped) + is_async_generator = inspect.isasyncgenfunction(wrapped) - try: - # Record function inputs - safely handle potential serialization issues + # Handle generator functions + if is_generator: + # Use the old approach for generators + span, ctx, token = _make_span(operation_name, entity_kind, version) try: _record_entity_input(span, args, kwargs) except Exception as e: logger.warning(f"Failed to record entity input: {e}") - - # Execute the function and handle result based on its type + result = wrapped(*args, **kwargs) - - if isinstance(result, types.GeneratorType): - return _process_sync_generator(span, result) - elif isinstance(result, types.AsyncGeneratorType): - return _process_async_generator(span, token, result) - elif inspect.iscoroutine(result): - # For async functions, we need to create a wrapper that awaits the result - async def _awaited_result(): + return _process_sync_generator(span, result) + + # Handle async generator functions + elif is_async_generator: + # Use the old approach for async generators + span, ctx, token = _make_span(operation_name, entity_kind, version) + try: + _record_entity_input(span, args, kwargs) + except Exception as e: + logger.warning(f"Failed to record entity input: {e}") + + result = wrapped(*args, **kwargs) + return _process_async_generator(span, token, result) + + # Handle async functions + elif is_async: + async def _wrapped_async(): + with _create_as_current_span(operation_name, entity_kind, version) as span: + try: + _record_entity_input(span, args, kwargs) + except Exception as e: + logger.warning(f"Failed to record entity input: {e}") + try: - awaited_result = await result + result = await wrapped(*args, **kwargs) try: - _record_entity_output(span, awaited_result) + _record_entity_output(span, result) except Exception as e: logger.warning(f"Failed to record entity output: {e}") - return awaited_result - finally: - _finalize_span(span, token) - - return _awaited_result() - else: - # Handle regular return values + return result + except Exception as e: + span.record_exception(e) + raise + + return _wrapped_async() + + # Handle sync functions + else: + with _create_as_current_span(operation_name, entity_kind, version) as span: try: - _record_entity_output(span, result) + _record_entity_input(span, args, kwargs) + except Exception as e: + logger.warning(f"Failed to record entity input: {e}") + + try: + result = wrapped(*args, **kwargs) + try: + _record_entity_output(span, result) + except Exception as e: + logger.warning(f"Failed to record entity output: {e}") + return result except Exception as e: - logger.warning(f"Failed to record entity output: {e}") - _finalize_span(span, token) - return result - except Exception as e: - # Ensure span is properly ended even if there's an exception - span.record_exception(e) - _finalize_span(span, token) - raise + span.record_exception(e) + raise return wrapper(wrapped) # type: ignore diff --git a/agentops/sdk/decorators/utility.py b/agentops/sdk/decorators/utility.py index ad383f1de..66ea09ea6 100644 --- a/agentops/sdk/decorators/utility.py +++ b/agentops/sdk/decorators/utility.py @@ -2,12 +2,14 @@ import os import types import warnings +from contextlib import contextmanager from functools import wraps -from typing import Any, Dict, Optional +from typing import Any, Callable, ContextManager, Dict, Generator, Optional from opentelemetry import context as context_api from opentelemetry import trace from opentelemetry.context import attach, set_value +from opentelemetry.trace import Span from agentops.helpers.serialization import safe_serialize from agentops.logging import logger @@ -30,10 +32,13 @@ def set_entity_path(entity_path: str) -> None: attach(set_value("entity_path", entity_path)) # Helper functions for content management + + def _check_content_size(content_json: str) -> bool: """Verify that a JSON string is within acceptable size limits (1MB)""" return len(content_json) < 1_000_000 + def _process_sync_generator(span: trace.Span, generator: types.GeneratorType): """Process a synchronous generator and manage its span lifecycle""" # Ensure span context is attached to the generator context @@ -58,14 +63,20 @@ async def _process_async_generator(span: trace.Span, context_token: Any, generat span.end() context_api.detach(context_token) -def _make_span( - operation_name: str, span_kind: str, version: Optional[int] = None, attributes: Dict[str, Any] = {} -) -> tuple: + +@contextmanager +def _create_as_current_span( + operation_name: str, + span_kind: str, + version: Optional[int] = None, + attributes: Optional[Dict[str, Any]] = None +) -> Generator[Span, None, None]: """ - Create and initialize a new instrumentation span with proper context. + Create and yield an instrumentation span as the current span using proper context management. This function creates a span that will automatically be nested properly - within any parent span based on the current execution context. + within any parent span based on the current execution context, using OpenTelemetry's + context management to properly handle span lifecycle. Args: operation_name: Name of the operation being traced @@ -73,8 +84,8 @@ def _make_span( version: Optional version identifier for the operation attributes: Optional dictionary of attributes to set on the span - Returns: - A tuple of (span, context, token) for span management + Yields: + A span with proper context that will be automatically closed when exiting the context """ # Create span with proper naming convention span_name = f"{operation_name}.{span_kind}" @@ -82,32 +93,73 @@ def _make_span( # Get tracer tracer = TracingCore.get_instance().get_tracer() - # Get current context - this automatically maintains the parent-child relationship - current_context = context_api.get_current() + # Prepare attributes + if attributes is None: + attributes = {} # Add span kind to attributes - attributes.update({ - SpanAttributes.AGENTOPS_SPAN_KIND: span_kind, - }) + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = span_kind - # Create span with current context to maintain parent-child relationship - span = tracer.start_span(span_name, context=current_context, attributes=attributes) + # Add standard attributes + attributes["agentops.operation.name"] = operation_name + if version is not None: + attributes["agentops.operation.version"] = version - # Create a new context with this span and attach it - context = trace.set_span_in_context(span) - token = context_api.attach(context) + # Use OpenTelemetry's context manager to properly handle span lifecycle + with tracer.start_as_current_span(span_name, attributes=attributes) as span: + yield span + + +def _make_span( + operation_name: str, + span_kind: str, + version: Optional[int] = None, + attributes: Optional[Dict[str, Any]] = None +) -> tuple: + """ + Create a span without context management for manual span lifecycle control. + + This function creates a span that will be properly nested within any parent span + based on the current execution context, but requires manual ending via _finalize_span. + + Args: + operation_name: Name of the operation being traced + span_kind: Type of operation (from SpanKind) + version: Optional version identifier for the operation + attributes: Optional dictionary of attributes to set on the span + + Returns: + A tuple of (span, context, token) where: + - span is the created span + - context is the span context + - token is the context token needed for detaching + """ + # Create span with proper naming convention + span_name = f"{operation_name}.{span_kind}" + + # Get tracer + tracer = TracingCore.get_instance().get_tracer() + + # Prepare attributes + if attributes is None: + attributes = {} + + # Add span kind to attributes + attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = span_kind # Add standard attributes - span.set_attribute("agentops.operation.name", operation_name) + attributes["agentops.operation.name"] = operation_name if version is not None: - span.set_attribute("agentops.operation.version", version) + attributes["agentops.operation.version"] = version + + # Create the span (not as current) + span = tracer.start_span(span_name, attributes=attributes) - # Set additional attributes - if attributes: - for key, value in attributes.items(): - span.set_attribute(key, value) + # Set as current context and get token for later detachment + ctx = trace.set_span_in_context(span) + token = context_api.attach(ctx) - return span, context, token + return span, ctx, token def _record_entity_input(span: trace.Span, args: tuple, kwargs: Dict[str, Any]) -> None: From 82943d82237d27bc0fb2e3277bb501eed495c19b Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 03:53:49 +0200 Subject: [PATCH 33/45] DRAFT Signed-off-by: Teo --- agentops/sdk/decorators/utility.py | 63 ++++++++++++++++++-- test_context.py | 62 ++++++++++++++++++++ test_context_comparison.py | 92 ++++++++++++++++++++++++++++++ test_nesting.py | 32 +++++++++++ uv.lock | 3 +- 5 files changed, 246 insertions(+), 6 deletions(-) create mode 100644 test_context.py create mode 100644 test_context_comparison.py create mode 100644 test_nesting.py diff --git a/agentops/sdk/decorators/utility.py b/agentops/sdk/decorators/utility.py index 66ea09ea6..a55c54056 100644 --- a/agentops/sdk/decorators/utility.py +++ b/agentops/sdk/decorators/utility.py @@ -9,7 +9,7 @@ from opentelemetry import context as context_api from opentelemetry import trace from opentelemetry.context import attach, set_value -from opentelemetry.trace import Span +from opentelemetry.trace import Span, SpanContext from agentops.helpers.serialization import safe_serialize from agentops.logging import logger @@ -64,6 +64,20 @@ async def _process_async_generator(span: trace.Span, context_token: Any, generat context_api.detach(context_token) +def _get_current_span_info(): + """Helper to get information about the current span for debugging""" + current_span = trace.get_current_span() + if hasattr(current_span, "get_span_context"): + ctx = current_span.get_span_context() + return { + "span_id": f"{ctx.span_id:x}" if hasattr(ctx, "span_id") else "None", + "trace_id": f"{ctx.trace_id:x}" if hasattr(ctx, "trace_id") else "None", + "name": getattr(current_span, "name", "Unknown"), + "is_recording": getattr(current_span, "is_recording", False) + } + return {"name": "No current span"} + + @contextmanager def _create_as_current_span( operation_name: str, @@ -87,6 +101,10 @@ def _create_as_current_span( Yields: A span with proper context that will be automatically closed when exiting the context """ + # Log before we do anything + before_span = _get_current_span_info() + logger.debug(f"[DEBUG] BEFORE {operation_name}.{span_kind} - Current context: {before_span}") + # Create span with proper naming convention span_name = f"{operation_name}.{span_kind}" @@ -105,9 +123,21 @@ def _create_as_current_span( if version is not None: attributes["agentops.operation.version"] = version + # Get current context explicitly to debug it + current_context = context_api.get_current() + # Use OpenTelemetry's context manager to properly handle span lifecycle - with tracer.start_as_current_span(span_name, attributes=attributes) as span: + with tracer.start_as_current_span(span_name, attributes=attributes, context=current_context) as span: + # Log after span creation + if hasattr(span, "get_span_context"): + span_ctx = span.get_span_context() + logger.debug(f"[DEBUG] CREATED {span_name} - span_id: {span_ctx.span_id:x}, parent: {before_span.get('span_id', 'None')}") + yield span + + # Log after we're done + after_span = _get_current_span_info() + logger.debug(f"[DEBUG] AFTER {operation_name}.{span_kind} - Returned to context: {after_span}") def _make_span( @@ -134,6 +164,10 @@ def _make_span( - context is the span context - token is the context token needed for detaching """ + # Log before we do anything + before_span = _get_current_span_info() + logger.debug(f"[DEBUG] BEFORE _make_span {operation_name}.{span_kind} - Current context: {before_span}") + # Create span with proper naming convention span_name = f"{operation_name}.{span_kind}" @@ -152,12 +186,20 @@ def _make_span( if version is not None: attributes["agentops.operation.version"] = version - # Create the span (not as current) - span = tracer.start_span(span_name, attributes=attributes) + # Get current context explicitly + current_context = context_api.get_current() + + # Create the span with explicit context + span = tracer.start_span(span_name, context=current_context, attributes=attributes) # Set as current context and get token for later detachment ctx = trace.set_span_in_context(span) token = context_api.attach(ctx) + + # Log after span creation + if hasattr(span, "get_span_context"): + span_ctx = span.get_span_context() + logger.debug(f"[DEBUG] CREATED _make_span {span_name} - span_id: {span_ctx.span_id:x}, parent: {before_span.get('span_id', 'None')}") return span, ctx, token @@ -191,5 +233,18 @@ def _record_entity_output(span: trace.Span, result: Any) -> None: def _finalize_span(span: trace.Span, token: Any) -> None: """End the span and detach the context token""" + if hasattr(span, "get_span_context") and hasattr(span.get_span_context(), "span_id"): + span_id = f"{span.get_span_context().span_id:x}" + logger.debug(f"[DEBUG] ENDING span {getattr(span, 'name', 'unknown')} - span_id: {span_id}") + span.end() + + # Debug info before detaching + current_after_end = _get_current_span_info() + logger.debug(f"[DEBUG] AFTER span.end() - Current context: {current_after_end}") + context_api.detach(token) + + # Debug info after detaching + final_context = _get_current_span_info() + logger.debug(f"[DEBUG] AFTER detach - Final context: {final_context}") diff --git a/test_context.py b/test_context.py new file mode 100644 index 000000000..afaf49b18 --- /dev/null +++ b/test_context.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +""" +Test script to debug OpenTelemetry context propagation issues. +""" +import time +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import ConsoleSpanExporter, BatchSpanProcessor +from agentops.sdk.decorators import agent, task, operation +from agentops.sdk.core import TracingCore +from agentops.client.client import Client +from agentops.sdk.decorators.utility import _get_current_span_info +from agentops.logging import logger + +# Initialize tracing +client = Client() # Use default initialization +client.init() # This should set up TracingCore + +# Add a console exporter for local debugging +provider = trace.get_tracer_provider() +if hasattr(provider, "add_span_processor"): + provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter())) + +@agent +def my_agent(): + """Test agent function that should create a parent span""" + logger.debug(f"In my_agent - current span: {_get_current_span_info()}") + + # Call the task inside the agent + result = my_task() + + # Also explicitly call operation with a context manager + tracer = TracingCore.get_instance().get_tracer() + with tracer.start_as_current_span("manual_operation") as manual_span: + manual_span.set_attribute("manual", True) + logger.debug(f"In manual operation - current span: {_get_current_span_info()}") + time.sleep(0.1) + + return result + +@task +def my_task(): + """Test task function that should create a child span under the agent span""" + logger.debug(f"In my_task - current span: {_get_current_span_info()}") + + # Call a nested operation + return my_operation() + +@operation +def my_operation(): + """Test operation that should be nested under the task span""" + logger.debug(f"In my_operation - current span: {_get_current_span_info()}") + time.sleep(0.1) + return "done" + +if __name__ == "__main__": + # Run the test + result = my_agent() + print(f"Result: {result}") + + # Give the batch processor time to export + time.sleep(1) \ No newline at end of file diff --git a/test_context_comparison.py b/test_context_comparison.py new file mode 100644 index 000000000..db2de3825 --- /dev/null +++ b/test_context_comparison.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python +""" +Test script to compare the old and new context management approaches. +""" +import time +from opentelemetry import trace, context as context_api +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import ConsoleSpanExporter, BatchSpanProcessor +from agentops.sdk.decorators import agent, task, operation +from agentops.sdk.core import TracingCore +from agentops.client.client import Client +from agentops.sdk.decorators.utility import (_get_current_span_info, _make_span, + _finalize_span, _create_as_current_span) +from agentops.logging import logger + +# Initialize tracing +client = Client() +client.init() + +# Add a console exporter for local debugging +provider = trace.get_tracer_provider() +if hasattr(provider, "add_span_processor"): + provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter())) + +def test_manual_context(): + """Test using the manual context management approach""" + logger.debug("===== TESTING MANUAL CONTEXT APPROACH =====") + + # Create the root span + root_span, root_ctx, root_token = _make_span("root", "test") + logger.debug(f"Created root span: {_get_current_span_info()}") + + try: + # Create a child span + child_span, child_ctx, child_token = _make_span("child", "test") + logger.debug(f"Created child span: {_get_current_span_info()}") + + try: + # Create a grandchild span + grandchild_span, grandchild_ctx, grandchild_token = _make_span("grandchild", "test") + logger.debug(f"Created grandchild span: {_get_current_span_info()}") + + # Do some work + time.sleep(0.1) + + # End the grandchild span + _finalize_span(grandchild_span, grandchild_token) + logger.debug(f"After ending grandchild span: {_get_current_span_info()}") + + finally: + # End the child span + _finalize_span(child_span, child_token) + logger.debug(f"After ending child span: {_get_current_span_info()}") + + finally: + # End the root span + _finalize_span(root_span, root_token) + logger.debug(f"After ending root span: {_get_current_span_info()}") + +def test_context_manager(): + """Test using the context manager approach""" + logger.debug("===== TESTING CONTEXT MANAGER APPROACH =====") + + # Get a tracer + tracer = TracingCore.get_instance().get_tracer() + + # Create spans using context manager (native OpenTelemetry approach) + with _create_as_current_span("root", "test") as root_span: + logger.debug(f"Created root span: {_get_current_span_info()}") + + with _create_as_current_span("child", "test") as child_span: + logger.debug(f"Created child span: {_get_current_span_info()}") + + with _create_as_current_span("grandchild", "test") as grandchild_span: + logger.debug(f"Created grandchild span: {_get_current_span_info()}") + + # Do some work + time.sleep(0.1) + + logger.debug(f"After grandchild span: {_get_current_span_info()}") + + logger.debug(f"After child span: {_get_current_span_info()}") + + logger.debug(f"After root span: {_get_current_span_info()}") + +if __name__ == "__main__": + # Test both approaches + test_manual_context() + test_context_manager() + + # Give the batch processor time to export + time.sleep(1) \ No newline at end of file diff --git a/test_nesting.py b/test_nesting.py new file mode 100644 index 000000000..5cf686b4e --- /dev/null +++ b/test_nesting.py @@ -0,0 +1,32 @@ +import time +from agentops.sdk.decorators import agent, operation +from agentops.sdk.core import TracingCore + +# Initialize tracing +TracingCore.get_instance().initialize() + +@operation +def perform_operation(task_name): + """A simple operation that will be nested within an agent.""" + print(f"Performing operation: {task_name}") + time.sleep(0.5) # Simulate work + return f"Completed {task_name}" + +@agent +def run_agent(agent_name): + """An agent that will contain nested operations.""" + print(f"Agent {agent_name} is running") + + # Perform multiple operations + result1 = perform_operation("task1") + result2 = perform_operation("task2") + + return f"Agent {agent_name} completed with results: {result1}, {result2}" + +if __name__ == "__main__": + # Run the agent which will contain nested operations + result = run_agent("TestAgent") + print(f"Final result: {result}") + + # Give time for spans to be exported + time.sleep(1) \ No newline at end of file diff --git a/uv.lock b/uv.lock index b9b38f4c7..e0bfa3465 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,4 @@ version = 1 -revision = 1 requires-python = ">=3.9, <3.14" resolution-markers = [ "python_full_version >= '3.13' and platform_python_implementation == 'PyPy'", @@ -26,7 +25,7 @@ constraints = [ [[package]] name = "agentops" -version = "0.4.0" +version = "0.4.2" source = { editable = "." } dependencies = [ { name = "opentelemetry-api", version = "1.22.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, From f8fbd563d296bcbc003adc85b7ac69253463b02e Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 04:31:01 +0200 Subject: [PATCH 34/45] Use proper classes wrapping Signed-off-by: Teo --- agentops/sdk/decorators/factory.py | 39 +++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/agentops/sdk/decorators/factory.py b/agentops/sdk/decorators/factory.py index c06e81850..15d881c81 100644 --- a/agentops/sdk/decorators/factory.py +++ b/agentops/sdk/decorators/factory.py @@ -27,8 +27,44 @@ def decorator(wrapped=None, *, name=None, version=None): # Handle case where decorator is called with parameters if wrapped is None: return functools.partial(decorator, name=name, version=version) + + # Handle class decoration + if inspect.isclass(wrapped): + # Create a proxy class that wraps the original class + class WrappedClass(wrapped): + def __init__(self, *args, **kwargs): + # Start span when instance is created + operation_name = name or wrapped.__name__ + self._span_context = _create_as_current_span(operation_name, entity_kind, version) + self._span = self._span_context.__enter__() + + try: + _record_entity_input(self._span, args, kwargs) + except Exception as e: + logger.warning(f"Failed to record entity input: {e}") + + # Call the original __init__ + super().__init__(*args, **kwargs) + + def __del__(self): + # End span when instance is destroyed + if hasattr(self, '_span') and hasattr(self, '_span_context'): + try: + _record_entity_output(self._span, self) + except Exception as e: + logger.warning(f"Failed to record entity output: {e}") + + self._span_context.__exit__(None, None, None) + + # Preserve metadata of the original class + WrappedClass.__name__ = wrapped.__name__ + WrappedClass.__qualname__ = wrapped.__qualname__ + WrappedClass.__module__ = wrapped.__module__ + WrappedClass.__doc__ = wrapped.__doc__ + + return WrappedClass - # Create the actual decorator wrapper function + # Create the actual decorator wrapper function for functions @wrapt.decorator def wrapper(wrapped, instance, args, kwargs): # Skip instrumentation if tracer not initialized @@ -108,6 +144,7 @@ async def _wrapped_async(): span.record_exception(e) raise + # Return the wrapper for functions, we already returned WrappedClass for classes return wrapper(wrapped) # type: ignore return decorator From 5072488e62c045e1509e533628eeb05a1147d544 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 04:33:13 +0200 Subject: [PATCH 35/45] decorators factory - use lengthier attr names to avoid collisions Signed-off-by: Teo --- agentops/sdk/decorators/factory.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/agentops/sdk/decorators/factory.py b/agentops/sdk/decorators/factory.py index 15d881c81..b29ade4d6 100644 --- a/agentops/sdk/decorators/factory.py +++ b/agentops/sdk/decorators/factory.py @@ -35,11 +35,11 @@ class WrappedClass(wrapped): def __init__(self, *args, **kwargs): # Start span when instance is created operation_name = name or wrapped.__name__ - self._span_context = _create_as_current_span(operation_name, entity_kind, version) - self._span = self._span_context.__enter__() + self._agentops_span_context_manager = _create_as_current_span(operation_name, entity_kind, version) + self._agentops_active_span = self._agentops_span_context_manager.__enter__() try: - _record_entity_input(self._span, args, kwargs) + _record_entity_input(self._agentops_active_span, args, kwargs) except Exception as e: logger.warning(f"Failed to record entity input: {e}") @@ -48,13 +48,13 @@ def __init__(self, *args, **kwargs): def __del__(self): # End span when instance is destroyed - if hasattr(self, '_span') and hasattr(self, '_span_context'): + if hasattr(self, '_agentops_active_span') and hasattr(self, '_agentops_span_context_manager'): try: - _record_entity_output(self._span, self) + _record_entity_output(self._agentops_active_span, self) except Exception as e: logger.warning(f"Failed to record entity output: {e}") - self._span_context.__exit__(None, None, None) + self._agentops_span_context_manager.__exit__(None, None, None) # Preserve metadata of the original class WrappedClass.__name__ = wrapped.__name__ From f1d77f321c2a57de9c7303805400f7da26ff3dc3 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 04:50:58 +0200 Subject: [PATCH 36/45] fix test_decorators Signed-off-by: Teo --- tests/unit/sdk/test_decorators.py | 166 ++++++++---------------------- 1 file changed, 43 insertions(+), 123 deletions(-) diff --git a/tests/unit/sdk/test_decorators.py b/tests/unit/sdk/test_decorators.py index 3cdaf03b0..cbc5bcde7 100644 --- a/tests/unit/sdk/test_decorators.py +++ b/tests/unit/sdk/test_decorators.py @@ -1,7 +1,8 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, cast import pytest from opentelemetry import trace +from opentelemetry.sdk.trace import ReadableSpan from agentops.sdk.decorators import agent, operation, session, workflow from agentops.semconv import SpanKind @@ -42,144 +43,63 @@ def test_session(): # Run the test with our instrumentor result = test_session() - instrumentation.get_finished_spans() - # Verify the result assert result == "Processed: test message" # Get all spans captured during the test spans = instrumentation.get_finished_spans() - # We should have 3 spans: session, agent, and two operations + # Print detailed span information for debugging + print("\nDetailed span information:") + for i, span in enumerate(spans): + parent_id = span.parent.span_id if span.parent else "None" + span_id = span.context.span_id if span.context else "None" + print(f"Span {i}: name={span.name}, span_id={span_id}, parent_id={parent_id}") + + # We should have 4 spans: session, agent, and two operations assert len(spans) == 4 # Verify span kinds - session_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.SESSION] - agent_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.AGENT] - operation_spans = [s for s in spans if s.attributes.get( + session_spans = [s for s in spans if s.attributes and s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.SESSION] + agent_spans = [s for s in spans if s.attributes and s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.AGENT] + operation_spans = [s for s in spans if s.attributes and s.attributes.get( SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.TASK] assert len(session_spans) == 1 assert len(agent_spans) == 1 assert len(operation_spans) == 2 - # Verify span hierarchy by checking parent-child relationships - # The session span should be the root + # Find the main_operation and nested_operation spans + main_operation = None + nested_operation = None + + for span in operation_spans: + if span.attributes and span.attributes.get('agentops.operation.name') == 'main_operation': + main_operation = span + elif span.attributes and span.attributes.get('agentops.operation.name') == 'nested_operation': + nested_operation = span + + assert main_operation is not None, "main_operation span not found" + assert nested_operation is not None, "nested_operation span not found" + + # Verify the session span is the root session_span = session_spans[0] assert session_span.parent is None - - # The agent span should be a child of the session span + + # Verify the agent span is a child of the session span agent_span = agent_spans[0] + assert agent_span.parent is not None + assert session_span.context is not None assert agent_span.parent.span_id == session_span.context.span_id - - # The operation spans should be children of the agent span - for op_span in operation_spans: - assert op_span.parent.span_id == agent_span.context.span_id - - def test_multiple_nesting_levels(self, instrumentation: InstrumentationTester): - """Test multiple levels of span nesting with different decorator types.""" - - # Define a helper operation that will be the deepest in the chain - @operation - def helper_operation(value): - return f"Helper: {value}" - - # Define a deeply nested agent structure - @agent - class NestedAgent: - def __init__(self): - pass - - @operation - def level1_operation(self, value): - # First level of operation nesting - return self.level2_operation(value) - - @operation - def level2_operation(self, value): - # Second level of operation nesting - return self.level3_operation(value) - - @operation - def level3_operation(self, value): - # Third level of operation nesting that calls a standalone function - return helper_operation(value) - - # Create a workflow that uses the agent - @workflow - def test_workflow(): - agent = NestedAgent() - return agent.level1_operation("test_value") - - # Create a session that runs the workflow - @session - def test_session(): - return test_workflow() - - # Run the test - result = test_session() - - # Verify the result - assert result == "Helper: test_value" - - # Get all spans - spans = instrumentation.get_finished_spans() - - # Group spans by kind - session_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.SESSION] - workflow_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.WORKFLOW] - agent_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.AGENT] - operation_spans = [s for s in spans if s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.TASK] - - # Verify we have the correct number of spans for each type - assert len(session_spans) == 1, "Should have exactly one session span" - assert len(workflow_spans) == 1, "Should have exactly one workflow span" - assert len(agent_spans) == 1, "Should have exactly one agent span" - # One standalone helper operation + three operations in the agent - assert len(operation_spans) == 4, "Should have four operation spans" - - # Identify the spans by name for verification - session_span = session_spans[0] - workflow_span = workflow_spans[0] - agent_span = agent_spans[0] - - # Find operation spans by name - level1_span = next((s for s in operation_spans if s.name == "level1_operation.task"), None) - level2_span = next((s for s in operation_spans if s.name == "level2_operation.task"), None) - level3_span = next((s for s in operation_spans if s.name == "level3_operation.task"), None) - helper_span = next((s for s in operation_spans if s.name == "helper_operation.task"), None) - - assert level1_span is not None, "level1_operation span not found" - assert level2_span is not None, "level2_operation span not found" - assert level3_span is not None, "level3_operation span not found" - assert helper_span is not None, "helper_operation span not found" - - # Debug: Print spans and their parent IDs - print("\nVerifying span hierarchy:") - for span, name in [(session_span, "session"), (workflow_span, "workflow"), - (agent_span, "agent"), (level1_span, "level1"), - (level2_span, "level2"), (level3_span, "level3"), - (helper_span, "helper")]: - parent_id = span.parent.span_id if span.parent else "None" - span_id = span.context.span_id - print(f"{name:<8} - ID: {span_id}, Parent ID: {parent_id}") - - # Verify the current nesting behavior (will need to change later): - # session -> workflow -> agent -> (level1, level2, level3) - # level3 -> helper - - # Session is the root - assert session_span.parent is None - - # Workflow is a child of session - assert workflow_span.parent.span_id == session_span.context.span_id - - # Agent is a child of workflow - assert agent_span.parent.span_id == workflow_span.context.span_id - - # All agent operations are direct children of the agent - for op_span in [level1_span, level2_span, level3_span]: - assert op_span.parent.span_id == agent_span.context.span_id - - # Helper operation is a child of level3 since it's called from level3 - assert helper_span.parent.span_id == level3_span.context.span_id + + # Verify main_operation is a child of the agent span + assert main_operation.parent is not None + assert agent_span.context is not None + assert main_operation.parent.span_id == agent_span.context.span_id + + # Verify nested_operation is a child of main_operation + assert nested_operation.parent is not None + assert main_operation.context is not None + assert nested_operation.parent.span_id == main_operation.context.span_id + + From 91b006f6aa8a6030e5dcf4cb6500ff3d2b301209 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 04:54:48 +0200 Subject: [PATCH 37/45] Add multiple tests_decorators (async, async gen, etc) Signed-off-by: Teo --- tests/unit/sdk/test_decorators.py | 378 +++++++++++++++++++++++++++++- 1 file changed, 377 insertions(+), 1 deletion(-) diff --git a/tests/unit/sdk/test_decorators.py b/tests/unit/sdk/test_decorators.py index cbc5bcde7..fa389e59e 100644 --- a/tests/unit/sdk/test_decorators.py +++ b/tests/unit/sdk/test_decorators.py @@ -1,4 +1,5 @@ -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING, cast, AsyncGenerator, Generator +import asyncio import pytest from opentelemetry import trace @@ -102,4 +103,379 @@ def test_session(): assert main_operation.context is not None assert nested_operation.parent.span_id == main_operation.context.span_id + def test_async_operations(self, instrumentation: InstrumentationTester): + """Test that async operations are properly nested.""" + + # Define the test agent with async operations + @agent + class AsyncAgent: + def __init__(self): + pass + + @operation + async def nested_async_operation(self, message): + """Async operation that should appear as a child of the main operation""" + await asyncio.sleep(0.01) # Small delay to simulate async work + return f"Processed async: {message}" + + @operation + async def main_async_operation(self): + """Main async operation that calls the nested async operation""" + result = await self.nested_async_operation("test async") + return result + + # Test session with the async agent + @session + async def test_async_session(): + agent = AsyncAgent() + return await agent.main_async_operation() + + # Run the async test + result = asyncio.run(test_async_session()) + + # Verify the result + assert result == "Processed async: test async" + + # Get all spans captured during the test + spans = instrumentation.get_finished_spans() + + # Print detailed span information for debugging + print("\nDetailed span information for async test:") + for i, span in enumerate(spans): + parent_id = span.parent.span_id if span.parent else "None" + span_id = span.context.span_id if span.context else "None" + print(f"Span {i}: name={span.name}, span_id={span_id}, parent_id={parent_id}") + + # We should have 4 spans: session, agent, and two operations + assert len(spans) == 4 + + # Verify span kinds + session_spans = [s for s in spans if s.attributes and s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.SESSION] + agent_spans = [s for s in spans if s.attributes and s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.AGENT] + operation_spans = [s for s in spans if s.attributes and s.attributes.get( + SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.TASK] + + assert len(session_spans) == 1 + assert len(agent_spans) == 1 + assert len(operation_spans) == 2 + + # Find the main_operation and nested_operation spans + main_operation = None + nested_operation = None + + for span in operation_spans: + if span.attributes and span.attributes.get('agentops.operation.name') == 'main_async_operation': + main_operation = span + elif span.attributes and span.attributes.get('agentops.operation.name') == 'nested_async_operation': + nested_operation = span + + assert main_operation is not None, "main_async_operation span not found" + assert nested_operation is not None, "nested_async_operation span not found" + + # Verify the session span is the root + session_span = session_spans[0] + assert session_span.parent is None + + # Verify the agent span is a child of the session span + agent_span = agent_spans[0] + assert agent_span.parent is not None + assert session_span.context is not None + assert agent_span.parent.span_id == session_span.context.span_id + + # Verify main_operation is a child of the agent span + assert main_operation.parent is not None + assert agent_span.context is not None + assert main_operation.parent.span_id == agent_span.context.span_id + + # Verify nested_operation is a child of main_operation + assert nested_operation.parent is not None + assert main_operation.context is not None + assert nested_operation.parent.span_id == main_operation.context.span_id + + def test_generator_operations(self, instrumentation: InstrumentationTester): + """Test that generator operations are properly nested.""" + + # Define the test agent with generator operations + @agent + class GeneratorAgent: + def __init__(self): + pass + + @operation + def nested_generator(self, count): + """Generator operation that should appear as a child of the main operation""" + for i in range(count): + yield f"Item {i}" + + @operation + def main_generator_operation(self, count): + """Main operation that calls the nested generator""" + results = [] + for item in self.nested_generator(count): + results.append(item) + return results + + # Test session with the generator agent + @session + def test_generator_session(): + agent = GeneratorAgent() + return agent.main_generator_operation(3) + + # Run the test + result = test_generator_session() + + # Verify the result + assert result == ["Item 0", "Item 1", "Item 2"] + + # Get all spans captured during the test + spans = instrumentation.get_finished_spans() + + # Print detailed span information for debugging + print("\nDetailed span information for generator test:") + for i, span in enumerate(spans): + parent_id = span.parent.span_id if span.parent else "None" + span_id = span.context.span_id if span.context else "None" + print(f"Span {i}: name={span.name}, span_id={span_id}, parent_id={parent_id}") + + # We should have 4 spans: session, agent, and two operations + assert len(spans) == 4 + + # Verify span kinds + session_spans = [s for s in spans if s.attributes and s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.SESSION] + agent_spans = [s for s in spans if s.attributes and s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.AGENT] + operation_spans = [s for s in spans if s.attributes and s.attributes.get( + SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.TASK] + + assert len(session_spans) == 1 + assert len(agent_spans) == 1 + assert len(operation_spans) == 2 + + # Find the main_operation and nested_operation spans + main_operation = None + nested_operation = None + + for span in operation_spans: + if span.attributes and span.attributes.get('agentops.operation.name') == 'main_generator_operation': + main_operation = span + elif span.attributes and span.attributes.get('agentops.operation.name') == 'nested_generator': + nested_operation = span + + assert main_operation is not None, "main_generator_operation span not found" + assert nested_operation is not None, "nested_generator span not found" + + # Verify the session span is the root + session_span = session_spans[0] + assert session_span.parent is None + + # Verify the agent span is a child of the session span + agent_span = agent_spans[0] + assert agent_span.parent is not None + assert session_span.context is not None + assert agent_span.parent.span_id == session_span.context.span_id + + # Verify main_operation is a child of the agent span + assert main_operation.parent is not None + assert agent_span.context is not None + assert main_operation.parent.span_id == agent_span.context.span_id + + # Verify nested_operation is a child of main_operation + assert nested_operation.parent is not None + assert main_operation.context is not None + assert nested_operation.parent.span_id == main_operation.context.span_id + + def test_async_generator_operations(self, instrumentation: InstrumentationTester): + """Test that async generator operations are properly nested.""" + + # Define the test agent with async generator operations + @agent + class AsyncGeneratorAgent: + def __init__(self): + pass + + @operation + async def nested_async_generator(self, count) -> AsyncGenerator[str, None]: + """Async generator operation that should appear as a child of the main operation""" + for i in range(count): + await asyncio.sleep(0.01) # Small delay to simulate async work + yield f"Async Item {i}" + + @operation + async def main_async_generator_operation(self, count): + """Main async operation that calls the nested async generator""" + results = [] + async for item in self.nested_async_generator(count): + results.append(item) + return results + + # Test session with the async generator agent + @session + async def test_async_generator_session(): + agent = AsyncGeneratorAgent() + return await agent.main_async_generator_operation(3) + + # Run the async test + result = asyncio.run(test_async_generator_session()) + + # Verify the result + assert result == ["Async Item 0", "Async Item 1", "Async Item 2"] + + # Get all spans captured during the test + spans = instrumentation.get_finished_spans() + + # Print detailed span information for debugging + print("\nDetailed span information for async generator test:") + for i, span in enumerate(spans): + parent_id = span.parent.span_id if span.parent else "None" + span_id = span.context.span_id if span.context else "None" + print(f"Span {i}: name={span.name}, span_id={span_id}, parent_id={parent_id}") + + # We should have 4 spans: session, agent, and two operations + assert len(spans) == 4 + + # Verify span kinds + session_spans = [s for s in spans if s.attributes and s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.SESSION] + agent_spans = [s for s in spans if s.attributes and s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.AGENT] + operation_spans = [s for s in spans if s.attributes and s.attributes.get( + SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.TASK] + + assert len(session_spans) == 1 + assert len(agent_spans) == 1 + assert len(operation_spans) == 2 + + # Find the main_operation and nested_operation spans + main_operation = None + nested_operation = None + + for span in operation_spans: + if span.attributes and span.attributes.get('agentops.operation.name') == 'main_async_generator_operation': + main_operation = span + elif span.attributes and span.attributes.get('agentops.operation.name') == 'nested_async_generator': + nested_operation = span + + assert main_operation is not None, "main_async_generator_operation span not found" + assert nested_operation is not None, "nested_async_generator span not found" + + # Verify the session span is the root + session_span = session_spans[0] + assert session_span.parent is None + + # Verify the agent span is a child of the session span + agent_span = agent_spans[0] + assert agent_span.parent is not None + assert session_span.context is not None + assert agent_span.parent.span_id == session_span.context.span_id + + # Verify main_operation is a child of the agent span + assert main_operation.parent is not None + assert agent_span.context is not None + assert main_operation.parent.span_id == agent_span.context.span_id + + # Verify nested_operation is a child of main_operation + assert nested_operation.parent is not None + assert main_operation.context is not None + assert nested_operation.parent.span_id == main_operation.context.span_id + + def test_complex_nesting(self, instrumentation: InstrumentationTester): + """Test complex nesting with multiple levels of operations.""" + + # Define the test agent with complex nesting + @agent + class ComplexAgent: + def __init__(self): + pass + + @operation + def level3_operation(self, message): + """Level 3 operation (deepest)""" + return f"Level 3: {message}" + + @operation + def level2_operation(self, message): + """Level 2 operation that calls level 3""" + result = self.level3_operation(message) + return f"Level 2: {result}" + + @operation + def level1_operation(self, message): + """Level 1 operation that calls level 2""" + result = self.level2_operation(message) + return f"Level 1: {result}" + + # Test session with the complex agent + @session + def test_complex_session(): + agent = ComplexAgent() + return agent.level1_operation("test message") + + # Run the test + result = test_complex_session() + + # Verify the result + assert result == "Level 1: Level 2: Level 3: test message" + + # Get all spans captured during the test + spans = instrumentation.get_finished_spans() + + # Print detailed span information for debugging + print("\nDetailed span information for complex nesting test:") + for i, span in enumerate(spans): + parent_id = span.parent.span_id if span.parent else "None" + span_id = span.context.span_id if span.context else "None" + print(f"Span {i}: name={span.name}, span_id={span_id}, parent_id={parent_id}") + + # We should have 5 spans: session, agent, and three operations + assert len(spans) == 5 + + # Verify span kinds + session_spans = [s for s in spans if s.attributes and s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.SESSION] + agent_spans = [s for s in spans if s.attributes and s.attributes.get(SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.AGENT] + operation_spans = [s for s in spans if s.attributes and s.attributes.get( + SpanAttributes.AGENTOPS_SPAN_KIND) == SpanKind.TASK] + + assert len(session_spans) == 1 + assert len(agent_spans) == 1 + assert len(operation_spans) == 3 + + # Find the operation spans + level1_operation = None + level2_operation = None + level3_operation = None + + for span in operation_spans: + if span.attributes and span.attributes.get('agentops.operation.name') == 'level1_operation': + level1_operation = span + elif span.attributes and span.attributes.get('agentops.operation.name') == 'level2_operation': + level2_operation = span + elif span.attributes and span.attributes.get('agentops.operation.name') == 'level3_operation': + level3_operation = span + + assert level1_operation is not None, "level1_operation span not found" + assert level2_operation is not None, "level2_operation span not found" + assert level3_operation is not None, "level3_operation span not found" + + # Verify the session span is the root + session_span = session_spans[0] + assert session_span.parent is None + + # Verify the agent span is a child of the session span + agent_span = agent_spans[0] + assert agent_span.parent is not None + assert session_span.context is not None + assert agent_span.parent.span_id == session_span.context.span_id + + # Verify level1_operation is a child of the agent span + assert level1_operation.parent is not None + assert agent_span.context is not None + assert level1_operation.parent.span_id == agent_span.context.span_id + + # Verify level2_operation is a child of level1_operation + assert level2_operation.parent is not None + assert level1_operation.context is not None + assert level2_operation.parent.span_id == level1_operation.context.span_id + + # Verify level3_operation is a child of level2_operation + assert level3_operation.parent is not None + assert level2_operation.context is not None + assert level3_operation.parent.span_id == level2_operation.context.span_id + From 039f797f35db976e94f299025b6d632320e801ba Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 04:56:10 +0200 Subject: [PATCH 38/45] get rid of "commands" Signed-off-by: Teo --- agentops/sdk/__init__.py | 15 +-- agentops/sdk/commands.py | 149 ----------------------- examples/sdk/session_commands_example.py | 44 ------- 3 files changed, 4 insertions(+), 204 deletions(-) delete mode 100644 agentops/sdk/commands.py delete mode 100644 examples/sdk/session_commands_example.py diff --git a/agentops/sdk/__init__.py b/agentops/sdk/__init__.py index 024f1109f..1b0779dd5 100644 --- a/agentops/sdk/__init__.py +++ b/agentops/sdk/__init__.py @@ -5,28 +5,21 @@ for different types of operations in AI agent workflows. """ -# Import command functions -from agentops.sdk.commands import end_span, record, start_span # Import core components from agentops.sdk.core import TracingCore # Import decorators -from agentops.sdk.decorators import agent -from agentops.sdk.decorators import task as operation +from agentops.sdk.decorators import agent, operation, session, task, workflow # from agentops.sdk.traced import TracedObject # Merged into TracedObject from agentops.sdk.types import TracingConfig -# Import span types - - __all__ = [ # Core components "TracingCore", "TracingConfig", # Decorators + "session", "operation", "agent", - # Command functions - "start_span", - "end_span", - "record", + "task", + "workflow", ] diff --git a/agentops/sdk/commands.py b/agentops/sdk/commands.py deleted file mode 100644 index 9d9d263e0..000000000 --- a/agentops/sdk/commands.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -Mid-level command layer for working with AgentOps SDK - -This module provides functions for creating and managing spans in AgentOps. -It focuses on generic span operations rather than specific session management. - -!! NOTE !! -If you are looking for the legacy start_session / end_session, look -at the `agentops.legacy` module. -""" - -from typing import Any, Dict, Optional, Tuple - -from opentelemetry import trace - -from agentops.exceptions import AgentOpsClientNotInitializedException -from agentops.sdk.core import TracingCore -from agentops.sdk.decorators.utility import _finalize_span, _make_span -from agentops.semconv.span_attributes import SpanAttributes -from agentops.semconv.span_kinds import SpanKind - - -def start_span( - name: str = "manual_span", - span_kind: str = SpanKind.OPERATION, - attributes: Dict[str, Any] = {}, - version: Optional[int] = None, -) -> Tuple[Any, Any]: - """ - Start a new AgentOps span manually. - - This function creates and starts a new span, which can be used to track - operations. The span will remain active until end_span is called with - the returned span and token. - - Args: - name: Name of the span - span_kind: Kind of span (e.g., SpanKind.OPERATION, SpanKind.SESSION) - attributes: Optional attributes to set on the span - version: Optional version identifier for the span - - Returns: - A tuple of (span, token) that should be passed to end_span - - Example: - ```python - # Start a span - my_span, token = agentops.start_span("my_custom_span") - - # Perform operations within the span - # ... - - # End the span - agentops.end_span(my_span, token) - ``` - """ - # Skip if tracing is not initialized - from agentops.client.client import Client - - cli = Client() - if not cli.initialized: - # Attempt to initialize the client if not already initialized - if cli.config.auto_init: - cli.init() - else: - raise AgentOpsClientNotInitializedException - - attributes.setdefault(SpanAttributes.AGENTOPS_SPAN_KIND, span_kind) - - # Use the standardized _make_span function to create the span - span, context, token = _make_span(operation_name=name, span_kind=span_kind, version=version, attributes=attributes) - - return span, token - - -def record(message: str, attributes: Optional[Dict[str, Any]] = None): - """ - Record an event with a message within the current span context. - - This function creates a simple operation span with the provided message - and attributes, which will be automatically associated with the current span context. - - Args: - message: The message to record - attributes: Optional attributes to set on the span - - Example: - ```python - # Start a span - my_span, token = agentops.start_span("my_custom_span") - - # Record an event within the span - agentops.record("This will generate a span within the current context") - - # End the span - agentops.end_span(my_span, token) - ``` - """ - # Skip if tracing is not initialized - if not TracingCore.get_instance()._initialized: - return - - # Get tracer - tracer = TracingCore.get_instance().get_tracer() - - # Create a simple span - with tracer.start_as_current_span( - "record", - kind=trace.SpanKind.INTERNAL, - ) as span: - # Set standard attributes - span.set_attribute("agentops.span.kind", SpanKind.OPERATION) - span.set_attribute("agentops.operation.message", message) - - # Add custom attributes if provided - if attributes: - for key, value in attributes.items(): - span.set_attribute(key, value) - - -def end_span(span, token): - """ - End a previously started AgentOps span. - - This function ends the span and detaches the context token, - completing the span lifecycle. - - Args: - span: The span returned by start_span - token: The token returned by start_span - - Example: - ```python - # Start a span - my_span, token = agentops.start_span("my_custom_span") - - # Perform operations within the span - # ... - - # End the span - agentops.end_span(my_span, token) - ``` - """ - # Handle case where tracing wasn't initialized - if span is None or token is None: - return - - # Use the standardized _finalize_span function to end the span - _finalize_span(span, token) diff --git a/examples/sdk/session_commands_example.py b/examples/sdk/session_commands_example.py deleted file mode 100644 index 23790be96..000000000 --- a/examples/sdk/session_commands_example.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Example demonstrating how to use the AgentOps session commands. - -This example shows three different ways to manage session spans: -1. Using the start_session and end_session functions directly - -Run this example with: - uv run examples/session_commands_example.py -""" - -import time - -import agentops -from agentops.sdk.commands import end_span, record, start_span -from agentops.sdk.decorators import operation -from agentops.semconv.span_kinds import SpanKind - -# Initialize AgentOps with your API key -# In a real application, you would use your actual API key -agentops.init() - - -def example_1_manual_session(): - """Example using start_session and end_session functions directly.""" - print("Example 1: Manual session control") - - # Start a session manually - span, token = start_span( - name="manual_session", - span_kind=SpanKind.SESSION, - attributes={"example": "manual", "method": "direct_functions"}, - ) - - # Simulate some work - record("This will generate a span within the 'manual_session' session") - - # End the session manually - end_span(span, token) - print(" Manual session ended") - - -if __name__ == "__main__": - # Run all examples - example_1_manual_session() From 2058675ff9f0e246af90c6881c7b0ebd9c2b7ead Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 05:07:03 +0200 Subject: [PATCH 39/45] Session(span,token) Signed-off-by: Teo --- agentops/legacy/__init__.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/agentops/legacy/__init__.py b/agentops/legacy/__init__.py index aadbf204e..031a9b74b 100644 --- a/agentops/legacy/__init__.py +++ b/agentops/legacy/__init__.py @@ -8,7 +8,6 @@ from typing import Any, Dict, Tuple -from agentops.sdk.commands import start_span, end_span from agentops.semconv.span_kinds import SpanKind __all__ = [ @@ -20,9 +19,19 @@ ] +class Session: + """ + A legacy session object that holds a span and token. + """ + + def __init__(self, span: Any, token: Any): + self.span = span + self.token = token + + def start_session( name: str = "manual_session", attributes: Dict[str, Any] = {} -) -> Tuple[Any, Any]: +) -> Session: """ Start a new AgentOps session manually. @@ -38,12 +47,14 @@ def start_session( version: Optional version identifier for the session Returns: - A tuple of (span, token) that should be passed to end_session + A Session object that should be passed to end_session """ - return start_span(name=name, span_kind=SpanKind.SESSION, attributes=attributes) + from agentops.sdk.decorators.utility import _make_span + span, token = _make_span('session', span_kind=SpanKind.SESSION, attributes=attributes) + return Session(span, token) -def end_session(span, token) -> None: +def end_session(session: Session) -> None: """ End a previously started AgentOps session. @@ -53,9 +64,10 @@ def end_session(span, token) -> None: This is a legacy function that uses end_span. Args: - span: The span returned by start_session + session: The session object returned by start_session """ - end_span(span, token) + from agentops.sdk.decorators.utility import _finalize_span + _finalize_span(session.span, session.token) def ToolEvent(*args, **kwargs) -> None: From 9b70fc563961848d410451b333dc7f0d62c361e7 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 05:13:40 +0200 Subject: [PATCH 40/45] __init__ exports Signed-off-by: Teo --- agentops/__init__.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/agentops/__init__.py b/agentops/__init__.py index 9418232e0..eeab5f1eb 100755 --- a/agentops/__init__.py +++ b/agentops/__init__.py @@ -1,13 +1,8 @@ from typing import Any, Dict, List, Optional, Union -import agentops.legacy as legacy -from agentops.legacy import ErrorEvent, ToolEvent +from agentops.legacy import ErrorEvent, ToolEvent, end_session, start_session from .client import Client -from .sdk.commands import end_span as sdk_end_span -from .sdk.commands import record as sdk_record -from .sdk.commands import start_span as sdk_start_span -from .semconv.span_kinds import SpanKind # Client global instance; one per process runtime _client = Client() @@ -130,6 +125,18 @@ def configure(**kwargs): _client.configure(**kwargs) # For backwards compatibility and testing + + def get_client() -> Client: """Get the singleton client instance""" return _client + + + +__all__ = [ + "init", + "configure", + "get_client", + "start_session", + "end_session", +] From 50af51fffc1229ddadd4d819e7e389de41786197 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 05:16:01 +0200 Subject: [PATCH 41/45] backward compat +legacy Signed-off-by: Teo --- agentops/__init__.py | 2 ++ agentops/legacy/__init__.py | 62 +++++++++++++++++-------------------- 2 files changed, 30 insertions(+), 34 deletions(-) diff --git a/agentops/__init__.py b/agentops/__init__.py index eeab5f1eb..148dcc375 100755 --- a/agentops/__init__.py +++ b/agentops/__init__.py @@ -133,6 +133,8 @@ def get_client() -> Client: +from agentops.legacy import * # type: ignore + __all__ = [ "init", "configure", diff --git a/agentops/legacy/__init__.py b/agentops/legacy/__init__.py index 031a9b74b..db4529fe3 100644 --- a/agentops/legacy/__init__.py +++ b/agentops/legacy/__init__.py @@ -9,14 +9,7 @@ from typing import Any, Dict, Tuple from agentops.semconv.span_kinds import SpanKind - -__all__ = [ - "start_session", - "end_session", - "ToolEvent", - "ErrorEvent", - "session", -] +from agentops.logging import logger class Session: @@ -30,7 +23,7 @@ def __init__(self, span: Any, token: Any): def start_session( - name: str = "manual_session", attributes: Dict[str, Any] = {} + name: str = "manual_session", attributes: Dict[str, Any] = {}, tags=None, ) -> Session: """ Start a new AgentOps session manually. @@ -49,8 +42,10 @@ def start_session( Returns: A Session object that should be passed to end_session """ + if tags is not None: + attributes = {**attributes, **tags} from agentops.sdk.decorators.utility import _make_span - span, token = _make_span('session', span_kind=SpanKind.SESSION, attributes=attributes) + span, context, token = _make_span('session', span_kind=SpanKind.SESSION, attributes=attributes) return Session(span, token) @@ -86,27 +81,26 @@ def ErrorEvent(*args, **kwargs) -> None: return None -class session: - @classmethod - def record(cls, *args, **kwargs): - """ - @deprecated - Use tracing instead. - """ - pass # noop silently - - @classmethod - def create_agent(cls, *args, **kwargs): - """ - @deprecated - Agents are registered automatically. - """ - pass # noop silently - - @classmethod - def end_session(cls, *args, **kwargs): - """ - @deprecated - Sessions are ended automatically. - """ - pass # noop silently +def ActionEvent(*args, **kwargs) -> None: + """ + @deprecated + Use tracing instead. + """ + return None + + +def LLMEvent(*args, **kwargs) -> None: + """ + @deprecated + Use tracing instead. + """ + return None + + +__all__ = [ + "start_session", + "end_session", + "ToolEvent", + "ErrorEvent", + "ActionEvent", +] From ea0d7bfa128b09b8615c8f934e659c8959eac8f9 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 05:44:15 +0200 Subject: [PATCH 42/45] deprecate pre-legacy core manual tests Signed-off-by: Teo --- tests/core_manual_tests/time_travel.py | 136 ------------------------- 1 file changed, 136 deletions(-) delete mode 100644 tests/core_manual_tests/time_travel.py diff --git a/tests/core_manual_tests/time_travel.py b/tests/core_manual_tests/time_travel.py deleted file mode 100644 index 3505f81e8..000000000 --- a/tests/core_manual_tests/time_travel.py +++ /dev/null @@ -1,136 +0,0 @@ -from openai import OpenAI, AsyncOpenAI -import openai -from openai.resources.chat import completions -import agentops -from dotenv import load_dotenv - -load_dotenv() -client = OpenAI() - -agentops.init(default_tags=["TTD Test", openai.__version__]) - -try: - chat_completion_1 = client.chat.completions.create( - messages=[ - { - "content": "Come up with a random superpower that isn't time travel. Just return the superpower in the format: 'Superpower: [superpower]'", - "role": "user", - } - ], - model="gpt-3.5-turbo-0125", - ) - content1 = chat_completion_1.choices[0].message.content - print(content1) - superpower = content1.split("Superpower:")[1].strip() - - chat_completion_2 = client.chat.completions.create( - messages=[ - { - "content": "Come up with a superhero name given this superpower: " - + superpower - + ". Just return the superhero name in this format: 'Superhero: [superhero name]'", - "role": "user", - } - ], - model="gpt-3.5-turbo-0125", - ) - content2 = chat_completion_2.choices[0].message.content - print(content2) - superhero = content2.split("Superhero:")[1].strip() - - chat_completion_3 = client.chat.completions.create( - messages=[ - { - "content": "Come up with a fictional city for superhero " - + superhero - + ". Just return the city name in this format: 'City: [city name]'", - "role": "user", - } - ], - model="gpt-3.5-turbo-0125", - ) - content3 = chat_completion_3.choices[0].message.content - print(content3) - city = content3.split("City:")[1].strip() - - chat_completion_4 = client.chat.completions.create( - messages=[ - { - "content": "Come up with a weakness for superhero " - + superhero - + " with superpower " - + superpower - + ". Just return the weakness in this format: 'Weakness: [weakness]'", - "role": "user", - } - ], - model="gpt-3.5-turbo-0125", - ) - content4 = chat_completion_4.choices[0].message.content - print(content4) - weakness = content4.split("Weakness:")[1].strip() - - chat_completion_5 = client.chat.completions.create( - messages=[ - { - "content": "Come up with the superpower of superhero " - + superhero - + "'s arch nemesis. The superpower cannot be time travel. Just return the superpower in the format: 'Superpower: [superpower]'", - "role": "user", - } - ], - model="gpt-3.5-turbo-0125", - ) - content5 = chat_completion_5.choices[0].message.content - print(content5) - arch_nemesis_superpower = content5.split("Superpower:")[1].strip() - - chat_completion_6 = client.chat.completions.create( - messages=[ - { - "content": "Given the following superpower of a supervillain - " - + arch_nemesis_superpower - + " - come up with the supervillain's name. Just return the supervillain's name in this format 'Supervillain: [supervillain name]'", - "role": "user", - } - ], - model="gpt-3.5-turbo-0125", - ) - content6 = chat_completion_6.choices[0].message.content - print(content6) - supervillain = content6.split("Supervillain:")[1].strip() - - chat_completion_7 = client.chat.completions.create( - messages=[ - { - "content": "Write a 100 word superhero story about the feud between superhero " - + superhero - + " and his arch nemesis " - + supervillain - + " set in " - + city - + ". " - + superhero - + "'s superpower is " - + superpower - + " and his weakness is " - + weakness - + "." - + supervillain - + "'s superpower is " - + arch_nemesis_superpower - + ".", - "role": "user", - } - ], - model="gpt-3.5-turbo-0125", - ) - content7 = chat_completion_7.choices[0].message.content - print(content7) - - agentops.end_session("Success") - -# TODO: This just fails more gracefully for the demo but might wanna refactor so split/strip don't except -except (IndexError, AttributeError) as e: - agentops.logger.warning(f"An error occurred: {e}") - agentops.end_session("Fail") From d2b3ea66c10072acd4298cb139a1e93215d49015 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 05:48:53 +0200 Subject: [PATCH 43/45] legacy: agentops.start_session(+tags) Signed-off-by: Teo --- agentops/legacy/__init__.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/agentops/legacy/__init__.py b/agentops/legacy/__init__.py index db4529fe3..c8358ee18 100644 --- a/agentops/legacy/__init__.py +++ b/agentops/legacy/__init__.py @@ -6,7 +6,7 @@ This maintains compatibility with codebases that adhere to the previous API. """ -from typing import Any, Dict, Tuple +from typing import Any, Dict, Tuple, Union, List from agentops.semconv.span_kinds import SpanKind from agentops.logging import logger @@ -23,7 +23,7 @@ def __init__(self, span: Any, token: Any): def start_session( - name: str = "manual_session", attributes: Dict[str, Any] = {}, tags=None, + tags: Union[Dict[str, Any], List[str], None] = None, ) -> Session: """ Start a new AgentOps session manually. @@ -36,15 +36,16 @@ def start_session( Args: name: Name of the session - attributes: Optional attributes to set on the session span - version: Optional version identifier for the session + attributes: Optional {key: value} dict + tags: Optional | forwards to `attributes` Returns: A Session object that should be passed to end_session """ - if tags is not None: - attributes = {**attributes, **tags} from agentops.sdk.decorators.utility import _make_span + attributes = {} + if tags: + attributes["tags"] = tags span, context, token = _make_span('session', span_kind=SpanKind.SESSION, attributes=attributes) return Session(span, token) From 65f46c6ec8a253c7dc2d93de66962c91d0a374b0 Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 05:53:41 +0200 Subject: [PATCH 44/45] Auto end Session.span on __del__ Signed-off-by: Teo --- agentops/legacy/__init__.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/agentops/legacy/__init__.py b/agentops/legacy/__init__.py index c8358ee18..28f9955da 100644 --- a/agentops/legacy/__init__.py +++ b/agentops/legacy/__init__.py @@ -6,10 +6,12 @@ This maintains compatibility with codebases that adhere to the previous API. """ -from typing import Any, Dict, Tuple, Union, List +from typing import Any, Dict, List, Tuple, Union + +from httpx import Client -from agentops.semconv.span_kinds import SpanKind from agentops.logging import logger +from agentops.semconv.span_kinds import SpanKind class Session: @@ -21,6 +23,9 @@ def __init__(self, span: Any, token: Any): self.span = span self.token = token + def __del__(self): + self.span.end() + def start_session( tags: Union[Dict[str, Any], List[str], None] = None, @@ -42,6 +47,10 @@ def start_session( Returns: A Session object that should be passed to end_session """ + from agentops import Client + if not Client().initialized: + Client().init() + from agentops.sdk.decorators.utility import _make_span attributes = {} if tags: From 69ebf9f9b53e1a8d214628c6384c7d5707b0ab7c Mon Sep 17 00:00:00 2001 From: Teo Date: Fri, 14 Mar 2025 05:59:04 +0200 Subject: [PATCH 45/45] Session + backwards compat methods (create_agent, record, end_session) Signed-off-by: Teo --- agentops/legacy/__init__.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/agentops/legacy/__init__.py b/agentops/legacy/__init__.py index 28f9955da..56ea94434 100644 --- a/agentops/legacy/__init__.py +++ b/agentops/legacy/__init__.py @@ -24,9 +24,22 @@ def __init__(self, span: Any, token: Any): self.token = token def __del__(self): + try: + self.span.end() + except: + pass + + def create_agent(self): + pass + + def record(self): + pass + + def end_session(self): self.span.end() + def start_session( tags: Union[Dict[str, Any], List[str], None] = None, ) -> Session: