From 0506cf0c44429ecba316724be30adbbdc711878a Mon Sep 17 00:00:00 2001 From: Pratyush Shukla Date: Tue, 17 Jun 2025 03:34:36 +0530 Subject: [PATCH 1/7] check tools are not `None` --- agentops/instrumentation/openai/wrappers/chat.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/agentops/instrumentation/openai/wrappers/chat.py b/agentops/instrumentation/openai/wrappers/chat.py index eb331b088..bec136332 100644 --- a/agentops/instrumentation/openai/wrappers/chat.py +++ b/agentops/instrumentation/openai/wrappers/chat.py @@ -104,12 +104,13 @@ def handle_chat_attributes( # Tools if "tools" in kwargs: tools = kwargs["tools"] - for i, tool in enumerate(tools): - function = tool.get("function", {}) - prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}" - attributes[f"{prefix}.name"] = function.get("name") - attributes[f"{prefix}.description"] = function.get("description") - attributes[f"{prefix}.parameters"] = json.dumps(function.get("parameters")) + if tools: # Check if tools is not None + for i, tool in enumerate(tools): + function = tool.get("function", {}) + prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}" + attributes[f"{prefix}.name"] = function.get("name") + attributes[f"{prefix}.description"] = function.get("description") + attributes[f"{prefix}.parameters"] = json.dumps(function.get("parameters")) # Extract response attributes from return value if return_value: From f46f143ed44206cb256c71820ec8e1d679c484e3 Mon Sep 17 00:00:00 2001 From: Pratyush Shukla Date: Tue, 17 Jun 2025 04:02:32 +0530 Subject: [PATCH 2/7] clear mem0 notebook --- examples/mem0/mem0_memoryclient_example.ipynb | 241 +----------------- 1 file changed, 9 insertions(+), 232 deletions(-) diff --git a/examples/mem0/mem0_memoryclient_example.ipynb b/examples/mem0/mem0_memoryclient_example.ipynb index cbcb72496..a1d0b326b 100644 --- a/examples/mem0/mem0_memoryclient_example.ipynb +++ b/examples/mem0/mem0_memoryclient_example.ipynb @@ -45,85 +45,10 @@ }, { "cell_type": "code", - "execution_count": 95, + "execution_count": null, "id": "69b834f6", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: agentops in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (0.4.14)\n", - "Requirement already satisfied: httpx<0.29.0,>=0.24.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (0.28.1)\n", - "Requirement already satisfied: opentelemetry-api>1.29.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (1.34.1)\n", - "Requirement already satisfied: opentelemetry-exporter-otlp-proto-http>1.29.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (1.34.1)\n", - "Requirement already satisfied: opentelemetry-instrumentation>=0.50b0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (0.55b1)\n", - "Requirement already satisfied: opentelemetry-sdk>1.29.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (1.34.1)\n", - "Requirement already satisfied: opentelemetry-semantic-conventions>=0.50b0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (0.55b1)\n", - "Requirement already satisfied: ordered-set<5.0.0,>=4.0.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (4.1.0)\n", - "Requirement already satisfied: packaging<25.0,>=21.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (24.2)\n", - "Requirement already satisfied: psutil<7.0.1,>=5.9.8 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (7.0.0)\n", - "Requirement already satisfied: pyyaml<7.0,>=5.3 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (6.0.2)\n", - "Requirement already satisfied: requests<3.0.0,>=2.0.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (2.32.4)\n", - "Requirement already satisfied: termcolor<2.5.0,>=2.3.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (2.4.0)\n", - "Requirement already satisfied: wrapt<2.0.0,>=1.0.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from agentops) (1.17.2)\n", - "Requirement already satisfied: anyio in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from httpx<0.29.0,>=0.24.0->agentops) (4.9.0)\n", - "Requirement already satisfied: certifi in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from httpx<0.29.0,>=0.24.0->agentops) (2025.4.26)\n", - "Requirement already satisfied: httpcore==1.* in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from httpx<0.29.0,>=0.24.0->agentops) (1.0.9)\n", - "Requirement already satisfied: idna in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from httpx<0.29.0,>=0.24.0->agentops) (3.10)\n", - "Requirement already satisfied: h11>=0.16 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from httpcore==1.*->httpx<0.29.0,>=0.24.0->agentops) (0.16.0)\n", - "Requirement already satisfied: charset_normalizer<4,>=2 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from requests<3.0.0,>=2.0.0->agentops) (3.4.2)\n", - "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from requests<3.0.0,>=2.0.0->agentops) (2.4.0)\n", - "Requirement already satisfied: importlib-metadata<8.8.0,>=6.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from opentelemetry-api>1.29.0->agentops) (8.7.0)\n", - "Requirement already satisfied: typing-extensions>=4.5.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from opentelemetry-api>1.29.0->agentops) (4.14.0)\n", - "Requirement already satisfied: zipp>=3.20 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from importlib-metadata<8.8.0,>=6.0->opentelemetry-api>1.29.0->agentops) (3.23.0)\n", - "Requirement already satisfied: googleapis-common-protos~=1.52 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from opentelemetry-exporter-otlp-proto-http>1.29.0->agentops) (1.70.0)\n", - "Requirement already satisfied: opentelemetry-exporter-otlp-proto-common==1.34.1 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from opentelemetry-exporter-otlp-proto-http>1.29.0->agentops) (1.34.1)\n", - "Requirement already satisfied: opentelemetry-proto==1.34.1 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from opentelemetry-exporter-otlp-proto-http>1.29.0->agentops) (1.34.1)\n", - "Requirement already satisfied: protobuf<6.0,>=5.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from opentelemetry-proto==1.34.1->opentelemetry-exporter-otlp-proto-http>1.29.0->agentops) (5.29.5)\n", - "Requirement already satisfied: sniffio>=1.1 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from anyio->httpx<0.29.0,>=0.24.0->agentops) (1.3.1)\n", - "Note: you may need to restart the kernel to use updated packages.\n", - "Requirement already satisfied: mem0ai in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (0.1.104)\n", - "Requirement already satisfied: openai>=1.33.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from mem0ai) (1.75.0)\n", - "Requirement already satisfied: posthog>=3.5.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from mem0ai) (3.25.0)\n", - "Requirement already satisfied: pydantic>=2.7.3 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from mem0ai) (2.11.4)\n", - "Requirement already satisfied: pytz>=2024.1 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from mem0ai) (2024.2)\n", - "Requirement already satisfied: qdrant-client>=1.9.1 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from mem0ai) (1.14.2)\n", - "Requirement already satisfied: sqlalchemy>=2.0.31 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from mem0ai) (2.0.41)\n", - "Requirement already satisfied: anyio<5,>=3.5.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from openai>=1.33.0->mem0ai) (4.9.0)\n", - "Requirement already satisfied: distro<2,>=1.7.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from openai>=1.33.0->mem0ai) (1.9.0)\n", - "Requirement already satisfied: httpx<1,>=0.23.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from openai>=1.33.0->mem0ai) (0.28.1)\n", - "Requirement already satisfied: jiter<1,>=0.4.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from openai>=1.33.0->mem0ai) (0.8.2)\n", - "Requirement already satisfied: sniffio in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from openai>=1.33.0->mem0ai) (1.3.1)\n", - "Requirement already satisfied: tqdm>4 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from openai>=1.33.0->mem0ai) (4.67.1)\n", - "Requirement already satisfied: typing-extensions<5,>=4.11 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from openai>=1.33.0->mem0ai) (4.14.0)\n", - "Requirement already satisfied: idna>=2.8 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from anyio<5,>=3.5.0->openai>=1.33.0->mem0ai) (3.10)\n", - "Requirement already satisfied: certifi in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from httpx<1,>=0.23.0->openai>=1.33.0->mem0ai) (2025.4.26)\n", - "Requirement already satisfied: httpcore==1.* in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from httpx<1,>=0.23.0->openai>=1.33.0->mem0ai) (1.0.9)\n", - "Requirement already satisfied: h11>=0.16 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai>=1.33.0->mem0ai) (0.16.0)\n", - "Requirement already satisfied: annotated-types>=0.6.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from pydantic>=2.7.3->mem0ai) (0.7.0)\n", - "Requirement already satisfied: pydantic-core==2.33.2 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from pydantic>=2.7.3->mem0ai) (2.33.2)\n", - "Requirement already satisfied: typing-inspection>=0.4.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from pydantic>=2.7.3->mem0ai) (0.4.0)\n", - "Requirement already satisfied: requests<3.0,>=2.7 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from posthog>=3.5.0->mem0ai) (2.32.4)\n", - "Requirement already satisfied: six>=1.5 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from posthog>=3.5.0->mem0ai) (1.17.0)\n", - "Requirement already satisfied: monotonic>=1.5 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from posthog>=3.5.0->mem0ai) (1.6)\n", - "Requirement already satisfied: backoff>=1.10.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from posthog>=3.5.0->mem0ai) (2.2.1)\n", - "Requirement already satisfied: python-dateutil>2.1 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from posthog>=3.5.0->mem0ai) (2.9.0.post0)\n", - "Requirement already satisfied: charset_normalizer<4,>=2 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from requests<3.0,>=2.7->posthog>=3.5.0->mem0ai) (3.4.2)\n", - "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from requests<3.0,>=2.7->posthog>=3.5.0->mem0ai) (2.4.0)\n", - "Requirement already satisfied: grpcio>=1.41.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from qdrant-client>=1.9.1->mem0ai) (1.71.0)\n", - "Requirement already satisfied: numpy>=1.21 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from qdrant-client>=1.9.1->mem0ai) (2.2.5)\n", - "Requirement already satisfied: portalocker<3.0.0,>=2.7.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from qdrant-client>=1.9.1->mem0ai) (2.10.1)\n", - "Requirement already satisfied: protobuf>=3.20.0 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from qdrant-client>=1.9.1->mem0ai) (5.29.5)\n", - "Requirement already satisfied: h2<5,>=3 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from httpx[http2]>=0.20.0->qdrant-client>=1.9.1->mem0ai) (4.2.0)\n", - "Requirement already satisfied: hyperframe<7,>=6.1 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from h2<5,>=3->httpx[http2]>=0.20.0->qdrant-client>=1.9.1->mem0ai) (6.1.0)\n", - "Requirement already satisfied: hpack<5,>=4.1 in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (from h2<5,>=3->httpx[http2]>=0.20.0->qdrant-client>=1.9.1->mem0ai) (4.1.0)\n", - "Note: you may need to restart the kernel to use updated packages.\n", - "Requirement already satisfied: python-dotenv in /Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages (1.1.0)\n", - "Note: you may need to restart the kernel to use updated packages.\n" - ] - } - ], + "outputs": [], "source": [ "# Install the required dependencies:\n", "%pip install agentops\n", @@ -133,7 +58,7 @@ }, { "cell_type": "code", - "execution_count": 96, + "execution_count": null, "id": "e552e158", "metadata": {}, "outputs": [], @@ -158,7 +83,7 @@ }, { "cell_type": "code", - "execution_count": 97, + "execution_count": null, "id": "969f7c42", "metadata": {}, "outputs": [], @@ -182,7 +107,7 @@ }, { "cell_type": "code", - "execution_count": 98, + "execution_count": null, "id": "0abf3fe7", "metadata": {}, "outputs": [], @@ -225,7 +150,7 @@ }, { "cell_type": "code", - "execution_count": 99, + "execution_count": null, "id": "63d2f851", "metadata": {}, "outputs": [], @@ -290,7 +215,7 @@ }, { "cell_type": "code", - "execution_count": 100, + "execution_count": null, "id": "2462a05f", "metadata": {}, "outputs": [], @@ -366,158 +291,10 @@ }, { "cell_type": "code", - "execution_count": 101, + "execution_count": null, "id": "3a6524b2", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "🖇 AgentOps: \u001b[34m\u001b[34mSession Replay for mem0_memoryclient_sync_example trace: https://app.agentops.ai/sessions?trace_id=75c9c672ca2f2205d1bdb9b4f615ba2e\u001b[0m\u001b[0m\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "args: ([{'role': 'user', 'content': \"I'm planning to watch a movie tonight. Any recommendations?\"}, {'role': 'assistant', 'content': 'How about a thriller? They can be quite engaging.'}, {'role': 'user', 'content': \"I'm not a big fan of thriller movies but I love sci-fi movies.\"}, {'role': 'assistant', 'content': \"Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future.\"}],)\n", - "kwargs: {'user_id': 'alice_demo', 'metadata': {'category': 'cloud_movie_preferences', 'session': 'cloud_demo'}, 'version': 'v2'}\n", - "return_value: None\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/fenil/Documents/agentops/venv/lib/python3.11/site-packages/mem0/client/main.py:34: DeprecationWarning: output_format='v1.0' is deprecated therefore setting it to 'v1.1' by default.Check out the docs for more information: https://docs.mem0.ai/platform/quickstart#4-1-create-memories\n", - " return func(*args, **kwargs)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "args: None\n", - "kwargs: None\n", - "return_value: {'results': [{'id': '15b4579e-a074-4ef6-a318-03078a18858b', 'event': 'ADD', 'memory': 'Prefers sci-fi movies over thriller movies'}]}\n", - "Add result: {'results': [{'id': '15b4579e-a074-4ef6-a318-03078a18858b', 'event': 'ADD', 'memory': 'Prefers sci-fi movies over thriller movies'}]}\n", - "args: ([{'role': 'user', 'content': 'I prefer dark roast coffee over light roast'}],)\n", - "kwargs: {'user_id': 'alice_demo', 'metadata': {'type': 'cloud_preference', 'index': 0}}\n", - "return_value: None\n", - "args: None\n", - "kwargs: None\n", - "return_value: {'results': [{'id': 'd012318e-6dd3-4456-9ddb-1e33b5973201', 'event': 'ADD', 'memory': 'Prefers dark roast coffee over light roast'}]}\n", - "args: ([{'role': 'user', 'content': 'I exercise every morning at 6 AM'}],)\n", - "kwargs: {'user_id': 'alice_demo', 'metadata': {'type': 'cloud_preference', 'index': 1}}\n", - "return_value: None\n", - "args: None\n", - "kwargs: None\n", - "return_value: {'results': [{'id': '1e0829f9-ada4-4fde-8371-9a0b24410692', 'event': 'ADD', 'memory': 'Exercises every morning at 6 AM'}]}\n", - "args: ([{'role': 'user', 'content': \"I'm vegetarian and avoid all meat products\"}],)\n", - "kwargs: {'user_id': 'alice_demo', 'metadata': {'type': 'cloud_preference', 'index': 2}}\n", - "return_value: None\n", - "args: None\n", - "kwargs: None\n", - "return_value: {'results': [{'id': 'ea933b7a-ad62-4340-9e4b-422ba89ccb13', 'event': 'ADD', 'memory': 'Is vegetarian and avoids all meat products'}]}\n", - "get_search_attributes args: (\"What are the user's movie preferences?\",)\n", - "get_search_attributes kwargs: {'user_id': 'alice_demo'}\n", - "get_search_attributes return_value: None\n", - "get_search_attributes args: None\n", - "get_search_attributes kwargs: None\n", - "get_search_attributes return_value: [{'id': '15b4579e-a074-4ef6-a318-03078a18858b', 'memory': 'Prefers sci-fi movies over thriller movies', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'session': 'cloud_demo', 'category': 'cloud_movie_preferences'}, 'categories': ['user_preferences', 'entertainment'], 'created_at': '2025-06-13T07:01:02.776810-07:00', 'updated_at': '2025-06-13T07:01:02.795856-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.5759006142616313}, {'id': 'd012318e-6dd3-4456-9ddb-1e33b5973201', 'memory': 'Prefers dark roast coffee over light roast', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'cloud_preference', 'index': 0}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:09.645061-07:00', 'updated_at': '2025-06-13T07:01:09.666647-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.4206016754386812}, {'id': 'ea933b7a-ad62-4340-9e4b-422ba89ccb13', 'memory': 'Is vegetarian and avoids all meat products', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'cloud_preference', 'index': 2}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:24.877544-07:00', 'updated_at': '2025-06-13T07:01:24.896089-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.3932421036670084}, {'id': '1e0829f9-ada4-4fde-8371-9a0b24410692', 'memory': 'Exercises every morning at 6 AM', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'cloud_preference', 'index': 1}, 'categories': ['health'], 'created_at': '2025-06-13T07:01:16.659112-07:00', 'updated_at': '2025-06-13T07:01:16.740428-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.3198329210281394}]\n", - "Search result: [{'id': '15b4579e-a074-4ef6-a318-03078a18858b', 'memory': 'Prefers sci-fi movies over thriller movies', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'session': 'cloud_demo', 'category': 'cloud_movie_preferences'}, 'categories': ['user_preferences', 'entertainment'], 'created_at': '2025-06-13T07:01:02.776810-07:00', 'updated_at': '2025-06-13T07:01:02.795856-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.5759006142616313}, {'id': 'd012318e-6dd3-4456-9ddb-1e33b5973201', 'memory': 'Prefers dark roast coffee over light roast', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'cloud_preference', 'index': 0}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:09.645061-07:00', 'updated_at': '2025-06-13T07:01:09.666647-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.4206016754386812}, {'id': 'ea933b7a-ad62-4340-9e4b-422ba89ccb13', 'memory': 'Is vegetarian and avoids all meat products', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'cloud_preference', 'index': 2}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:24.877544-07:00', 'updated_at': '2025-06-13T07:01:24.896089-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.3932421036670084}, {'id': '1e0829f9-ada4-4fde-8371-9a0b24410692', 'memory': 'Exercises every morning at 6 AM', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'cloud_preference', 'index': 1}, 'categories': ['health'], 'created_at': '2025-06-13T07:01:16.659112-07:00', 'updated_at': '2025-06-13T07:01:16.740428-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.3198329210281394}]\n", - "Cloud memories retrieved: [{'id': 'ea933b7a-ad62-4340-9e4b-422ba89ccb13', 'memory': 'Is vegetarian and avoids all meat products', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'cloud_preference', 'index': 2}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:24.877544-07:00', 'updated_at': '2025-06-13T07:01:24.896089-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None}, {'id': '1e0829f9-ada4-4fde-8371-9a0b24410692', 'memory': 'Exercises every morning at 6 AM', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'cloud_preference', 'index': 1}, 'categories': ['health'], 'created_at': '2025-06-13T07:01:16.659112-07:00', 'updated_at': '2025-06-13T07:01:16.740428-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None}, {'id': 'd012318e-6dd3-4456-9ddb-1e33b5973201', 'memory': 'Prefers dark roast coffee over light roast', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'cloud_preference', 'index': 0}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:09.645061-07:00', 'updated_at': '2025-06-13T07:01:09.666647-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None}, {'id': '15b4579e-a074-4ef6-a318-03078a18858b', 'memory': 'Prefers sci-fi movies over thriller movies', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'session': 'cloud_demo', 'category': 'cloud_movie_preferences'}, 'categories': ['user_preferences', 'entertainment'], 'created_at': '2025-06-13T07:01:02.776810-07:00', 'updated_at': '2025-06-13T07:01:02.795856-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None}]\n", - "Delete all result: {'message': 'Memories deleted successfully!'}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "🖇 AgentOps: \u001b[34m\u001b[34mSession Replay for mem0_memoryclient_sync_example.session trace: https://app.agentops.ai/sessions?trace_id=75c9c672ca2f2205d1bdb9b4f615ba2e\u001b[0m\u001b[0m\n", - "🖇 AgentOps: \u001b[34m\u001b[34mSession Replay for mem0_memoryclient_async_example trace: https://app.agentops.ai/sessions?trace_id=077cb3ff917063d6c4c668dd3de1a5ed\u001b[0m\u001b[0m\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "args: ([{'role': 'user', 'content': \"I'm planning to watch a movie tonight. Any recommendations?\"}, {'role': 'assistant', 'content': 'How about a thriller? They can be quite engaging.'}, {'role': 'user', 'content': \"I'm not a big fan of thriller movies but I love sci-fi movies.\"}, {'role': 'assistant', 'content': \"Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future.\"}],)\n", - "kwargs: {'user_id': 'alice_demo', 'metadata': {'category': 'async_cloud_movies', 'session': 'async_cloud_demo'}}\n", - "return_value: None\n", - "args: None\n", - "kwargs: None\n", - "return_value: \n", - "args: ([{'role': 'user', 'content': 'I prefer dark roast coffee over light roast'}],)\n", - "kwargs: {'user_id': 'alice_demo', 'metadata': {'type': 'async_cloud_preference', 'index': 0}}\n", - "return_value: None\n", - "args: None\n", - "kwargs: None\n", - "return_value: \n", - "args: ([{'role': 'user', 'content': 'I exercise every morning at 6 AM'}],)\n", - "kwargs: {'user_id': 'alice_demo', 'metadata': {'type': 'async_cloud_preference', 'index': 1}}\n", - "return_value: None\n", - "args: None\n", - "kwargs: None\n", - "return_value: \n", - "args: ([{'role': 'user', 'content': \"I'm vegetarian and avoid all meat products\"}],)\n", - "kwargs: {'user_id': 'alice_demo', 'metadata': {'type': 'async_cloud_preference', 'index': 2}}\n", - "return_value: None\n", - "args: None\n", - "kwargs: None\n", - "return_value: \n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/events.py:84: DeprecationWarning: output_format='v1.0' is deprecated therefore setting it to 'v1.1' by default.Check out the docs for more information: https://docs.mem0.ai/platform/quickstart#4-1-create-memories\n", - " self._context.run(self._callback, *self._args)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1. {'results': [{'id': '2ef3c2da-4ad1-4b5d-b735-9852d78c17e7', 'event': 'ADD', 'memory': 'Prefers sci-fi movies over thriller movies'}]}\n", - "2. {'results': [{'id': '05b6099e-2269-4ba1-9c8c-476a160e180d', 'event': 'ADD', 'memory': 'Prefers dark roast coffee over light roast'}]}\n", - "3. {'results': [{'id': '38b92272-b8f2-491a-8674-2b37e60fe93a', 'event': 'ADD', 'memory': 'Exercises every morning at 6 AM'}]}\n", - "4. {'results': [{'id': 'e053d1bd-a584-4a82-ba61-eb2b00f299c8', 'event': 'ADD', 'memory': 'Is vegetarian'}, {'id': '2dcee7ea-0684-4ddc-bacd-8a66609649f9', 'event': 'ADD', 'memory': 'Avoids all meat products'}]}\n", - "get_search_attributes args: ('movie preferences',)\n", - "get_search_attributes kwargs: {'user_id': 'alice_demo'}\n", - "get_search_attributes return_value: None\n", - "get_search_attributes args: None\n", - "get_search_attributes kwargs: None\n", - "get_search_attributes return_value: \n", - "get_search_attributes args: ('food preferences',)\n", - "get_search_attributes kwargs: {'user_id': 'alice_demo'}\n", - "get_search_attributes return_value: None\n", - "get_search_attributes args: None\n", - "get_search_attributes kwargs: None\n", - "get_search_attributes return_value: \n", - "get_search_attributes args: ('work information',)\n", - "get_search_attributes kwargs: {'user_id': 'alice_demo'}\n", - "get_search_attributes return_value: None\n", - "get_search_attributes args: None\n", - "get_search_attributes kwargs: None\n", - "get_search_attributes return_value: \n", - "Search 1 result: [{'id': '2ef3c2da-4ad1-4b5d-b735-9852d78c17e7', 'memory': 'Prefers sci-fi movies over thriller movies', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'session': 'async_cloud_demo', 'category': 'async_cloud_movies'}, 'categories': ['user_preferences', 'entertainment'], 'created_at': '2025-06-13T07:01:33.908188-07:00', 'updated_at': '2025-06-13T07:01:33.928443-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.5863419103332085}, {'id': '05b6099e-2269-4ba1-9c8c-476a160e180d', 'memory': 'Prefers dark roast coffee over light roast', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 0}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:33.765858-07:00', 'updated_at': '2025-06-13T07:01:33.785855-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.4206878417525355}, {'id': '2dcee7ea-0684-4ddc-bacd-8a66609649f9', 'memory': 'Avoids all meat products', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 2}, 'categories': None, 'created_at': '2025-06-13T07:01:36.120569-07:00', 'updated_at': '2025-06-13T07:01:36.138808-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.3659444262368807}, {'id': '38b92272-b8f2-491a-8674-2b37e60fe93a', 'memory': 'Exercises every morning at 6 AM', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 1}, 'categories': ['health'], 'created_at': '2025-06-13T07:01:33.281702-07:00', 'updated_at': '2025-06-13T07:01:33.300498-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.3010108740041443}]\n", - "Search 2 result: [{'id': '05b6099e-2269-4ba1-9c8c-476a160e180d', 'memory': 'Prefers dark roast coffee over light roast', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 0}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:33.765858-07:00', 'updated_at': '2025-06-13T07:01:33.785855-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.5045933422688647}, {'id': '2dcee7ea-0684-4ddc-bacd-8a66609649f9', 'memory': 'Avoids all meat products', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 2}, 'categories': None, 'created_at': '2025-06-13T07:01:36.120569-07:00', 'updated_at': '2025-06-13T07:01:36.138808-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.48612332344055176}, {'id': '2ef3c2da-4ad1-4b5d-b735-9852d78c17e7', 'memory': 'Prefers sci-fi movies over thriller movies', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'session': 'async_cloud_demo', 'category': 'async_cloud_movies'}, 'categories': ['user_preferences', 'entertainment'], 'created_at': '2025-06-13T07:01:33.908188-07:00', 'updated_at': '2025-06-13T07:01:33.928443-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.43332618077553475}, {'id': 'e053d1bd-a584-4a82-ba61-eb2b00f299c8', 'memory': 'Is vegetarian', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 2}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:34.655396-07:00', 'updated_at': '2025-06-13T07:01:34.728349-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.3886008858680725}, {'id': '38b92272-b8f2-491a-8674-2b37e60fe93a', 'memory': 'Exercises every morning at 6 AM', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 1}, 'categories': ['health'], 'created_at': '2025-06-13T07:01:33.281702-07:00', 'updated_at': '2025-06-13T07:01:33.300498-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.3528817804385299}]\n", - "Search 3 result: [{'id': '2dcee7ea-0684-4ddc-bacd-8a66609649f9', 'memory': 'Avoids all meat products', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 2}, 'categories': None, 'created_at': '2025-06-13T07:01:36.120569-07:00', 'updated_at': '2025-06-13T07:01:36.138808-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.3600524282486701}, {'id': '38b92272-b8f2-491a-8674-2b37e60fe93a', 'memory': 'Exercises every morning at 6 AM', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 1}, 'categories': ['health'], 'created_at': '2025-06-13T07:01:33.281702-07:00', 'updated_at': '2025-06-13T07:01:33.300498-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.3572185167334696}, {'id': '05b6099e-2269-4ba1-9c8c-476a160e180d', 'memory': 'Prefers dark roast coffee over light roast', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 0}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:33.765858-07:00', 'updated_at': '2025-06-13T07:01:33.785855-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.32800289988517994}, {'id': 'e053d1bd-a584-4a82-ba61-eb2b00f299c8', 'memory': 'Is vegetarian', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 2}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:34.655396-07:00', 'updated_at': '2025-06-13T07:01:34.728349-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.31821893562191406}, {'id': '2ef3c2da-4ad1-4b5d-b735-9852d78c17e7', 'memory': 'Prefers sci-fi movies over thriller movies', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'session': 'async_cloud_demo', 'category': 'async_cloud_movies'}, 'categories': ['user_preferences', 'entertainment'], 'created_at': '2025-06-13T07:01:33.908188-07:00', 'updated_at': '2025-06-13T07:01:33.928443-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None, 'score': 0.3099285733614652}]\n", - "Async cloud memories: [{'id': '2dcee7ea-0684-4ddc-bacd-8a66609649f9', 'memory': 'Avoids all meat products', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 2}, 'categories': ['food'], 'created_at': '2025-06-13T07:01:36.120569-07:00', 'updated_at': '2025-06-13T07:01:36.138808-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None}, {'id': 'e053d1bd-a584-4a82-ba61-eb2b00f299c8', 'memory': 'Is vegetarian', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 2}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:34.655396-07:00', 'updated_at': '2025-06-13T07:01:34.728349-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None}, {'id': '2ef3c2da-4ad1-4b5d-b735-9852d78c17e7', 'memory': 'Prefers sci-fi movies over thriller movies', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'session': 'async_cloud_demo', 'category': 'async_cloud_movies'}, 'categories': ['user_preferences', 'entertainment'], 'created_at': '2025-06-13T07:01:33.908188-07:00', 'updated_at': '2025-06-13T07:01:33.928443-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None}, {'id': '05b6099e-2269-4ba1-9c8c-476a160e180d', 'memory': 'Prefers dark roast coffee over light roast', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 0}, 'categories': ['user_preferences', 'food'], 'created_at': '2025-06-13T07:01:33.765858-07:00', 'updated_at': '2025-06-13T07:01:33.785855-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None}, {'id': '38b92272-b8f2-491a-8674-2b37e60fe93a', 'memory': 'Exercises every morning at 6 AM', 'user_id': 'alice_demo', 'actor_id': None, 'metadata': {'type': 'async_cloud_preference', 'index': 1}, 'categories': ['health'], 'created_at': '2025-06-13T07:01:33.281702-07:00', 'updated_at': '2025-06-13T07:01:33.300498-07:00', 'expiration_date': None, 'structured_attributes': None, 'internal_metadata': None, 'deleted_at': None}]\n", - "Delete all result: {'message': 'Memories deleted successfully!'}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "🖇 AgentOps: \u001b[34m\u001b[34mSession Replay for mem0_memoryclient_async_example.session trace: https://app.agentops.ai/sessions?trace_id=077cb3ff917063d6c4c668dd3de1a5ed\u001b[0m\u001b[0m\n" - ] - } - ], + "outputs": [], "source": [ "# Execute both sync and async demonstrations\n", "demonstrate_sync_memory_client(sample_messages, sample_preferences, user_id)\n", From a0456665239bbc8acb5c3c8242752f0047361a21 Mon Sep 17 00:00:00 2001 From: Pratyush Shukla Date: Tue, 17 Jun 2025 04:25:20 +0530 Subject: [PATCH 3/7] rename dir to `llamaindex` --- examples/{llamaindex_examples => llamaindex}/README.md | 0 .../{llamaindex_examples => llamaindex}/llamaindex_example.ipynb | 0 .../{llamaindex_examples => llamaindex}/llamaindex_example.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename examples/{llamaindex_examples => llamaindex}/README.md (100%) rename examples/{llamaindex_examples => llamaindex}/llamaindex_example.ipynb (100%) rename examples/{llamaindex_examples => llamaindex}/llamaindex_example.py (100%) diff --git a/examples/llamaindex_examples/README.md b/examples/llamaindex/README.md similarity index 100% rename from examples/llamaindex_examples/README.md rename to examples/llamaindex/README.md diff --git a/examples/llamaindex_examples/llamaindex_example.ipynb b/examples/llamaindex/llamaindex_example.ipynb similarity index 100% rename from examples/llamaindex_examples/llamaindex_example.ipynb rename to examples/llamaindex/llamaindex_example.ipynb diff --git a/examples/llamaindex_examples/llamaindex_example.py b/examples/llamaindex/llamaindex_example.py similarity index 100% rename from examples/llamaindex_examples/llamaindex_example.py rename to examples/llamaindex/llamaindex_example.py From 2d2c893a70c546dd98d71c429a35ca25a254bd32 Mon Sep 17 00:00:00 2001 From: Pratyush Shukla Date: Tue, 17 Jun 2025 04:30:22 +0530 Subject: [PATCH 4/7] fix notebooks metadata --- examples/ag2/async_human_input.ipynb | 7 ++++ .../anthropic/anthropic-example-sync.ipynb | 35 +++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/examples/ag2/async_human_input.ipynb b/examples/ag2/async_human_input.ipynb index 0f1c6b0bf..39ea10c3c 100755 --- a/examples/ag2/async_human_input.ipynb +++ b/examples/ag2/async_human_input.ipynb @@ -179,6 +179,13 @@ "source": [ "agentops.end_trace(tracer, end_state=\"Success\")" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/examples/anthropic/anthropic-example-sync.ipynb b/examples/anthropic/anthropic-example-sync.ipynb index 2a7df162e..9944f95b6 100644 --- a/examples/anthropic/anthropic-example-sync.ipynb +++ b/examples/anthropic/anthropic-example-sync.ipynb @@ -246,6 +246,7 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2024-11-09T19:21:38.031580Z", @@ -257,7 +258,37 @@ "trusted": true }, "outputs": [], - "source": "stream = client.messages.create(\n max_tokens=2400,\n model=\"claude-3-7-sonnet-20250219\", # Comma added here\n messages=[\n {\n \"role\": \"user\",\n \"content\": \"Create a story based on the three sentence fragments given to you, it has been combined into one below.\",\n },\n {\n \"role\": \"assistant\",\n \"content\": \"{A foolish doll} {died in a world} {of ended dreams.}\",\n },\n {\"role\": \"assistant\", \"content\": defaultstory},\n {\n \"role\": \"user\",\n \"content\": \"Create a story based on the three sentence fragments given to you, it has been combined into one below.\",\n },\n {\"role\": \"assistant\", \"content\": generatedsentence},\n ],\n stream=True,\n)\n\nresponse = \"\"\nfor event in stream:\n if event.type == \"content_block_delta\":\n response += event.delta.text\n elif event.type == \"message_stop\":\n print(generatedsentence)\n print(response)" + "source": [ + "stream = client.messages.create(\n", + " max_tokens=2400,\n", + " model=\"claude-3-7-sonnet-20250219\", # Comma added here\n", + " messages=[\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": \"Create a story based on the three sentence fragments given to you, it has been combined into one below.\",\n", + " },\n", + " {\n", + " \"role\": \"assistant\",\n", + " \"content\": \"{A foolish doll} {died in a world} {of ended dreams.}\",\n", + " },\n", + " {\"role\": \"assistant\", \"content\": defaultstory},\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": \"Create a story based on the three sentence fragments given to you, it has been combined into one below.\",\n", + " },\n", + " {\"role\": \"assistant\", \"content\": generatedsentence},\n", + " ],\n", + " stream=True,\n", + ")\n", + "\n", + "response = \"\"\n", + "for event in stream:\n", + " if event.type == \"content_block_delta\":\n", + " response += event.delta.text\n", + " elif event.type == \"message_stop\":\n", + " print(generatedsentence)\n", + " print(response)" + ] }, { "cell_type": "markdown", @@ -310,4 +341,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} From 747e40d7835aaeb1e45840dfac98b03631f0a4e6 Mon Sep 17 00:00:00 2001 From: Pratyush Shukla Date: Tue, 17 Jun 2025 04:34:37 +0530 Subject: [PATCH 5/7] title overrides when generating doc using script --- examples/generate_documentation.py | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/examples/generate_documentation.py b/examples/generate_documentation.py index 532bfd28c..57aefe867 100644 --- a/examples/generate_documentation.py +++ b/examples/generate_documentation.py @@ -22,6 +22,22 @@ """ +# Title overrides to reflect correct names +TITLE_OVERRIDES = { + "ag2": "AG2", + "autogen": "AutoGen", + "crewai": "CrewAI", + "google_adk": "Google ADK", + "google_genai": "Google GenAI", + "langchain": "LangChain", + "litellm": "LiteLLM", + "llamaindex": "LlamaIndex", + "openai": "OpenAI", + "openai_agents": "OpenAI Agents", + "watsonx": "WatsonX", + "xai": "xAI", +} + def convert_notebook_to_markdown(notebook_path): """Convert Jupyter notebook to markdown using jupyter nbconvert.""" @@ -104,7 +120,14 @@ def generate_mdx_content(notebook_path, processed_content, frontmatter=None): if not frontmatter: # Generate new frontmatter folder_name = Path(notebook_path).parent.name - title = f"{folder_name.replace('_', ' ').title()} Example" + + # Check for title override, otherwise use default title case conversion + if folder_name in TITLE_OVERRIDES: + base_title = TITLE_OVERRIDES[folder_name] + else: + base_title = folder_name.replace("_", " ").title() + + title = f"{base_title}" # Extract description from first heading or use default description = f"{title} example using AgentOps" From 6dda6d18a500715b2a505841c1600602c06c0204 Mon Sep 17 00:00:00 2001 From: Pratyush Shukla Date: Tue, 17 Jun 2025 05:09:36 +0530 Subject: [PATCH 6/7] sync ag2 version --- agentops/instrumentation/__init__.py | 2 +- agentops/instrumentation/ag2/instrumentor.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index 84ace1d6d..11f7b6331 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -92,7 +92,7 @@ class InstrumentorConfig(TypedDict): "class_name": "CrewAIInstrumentor", "min_version": "0.56.0", }, - "autogen": {"module_name": "agentops.instrumentation.ag2", "class_name": "AG2Instrumentor", "min_version": "0.1.0"}, + "autogen": {"module_name": "agentops.instrumentation.ag2", "class_name": "AG2Instrumentor", "min_version": "0.3.2"}, "agents": { "module_name": "agentops.instrumentation.openai_agents", "class_name": "OpenAIAgentsInstrumentor", diff --git a/agentops/instrumentation/ag2/instrumentor.py b/agentops/instrumentation/ag2/instrumentor.py index 6c42ce859..661e7415f 100644 --- a/agentops/instrumentation/ag2/instrumentor.py +++ b/agentops/instrumentation/ag2/instrumentor.py @@ -32,7 +32,7 @@ class AG2Instrumentor(BaseInstrumentor): def instrumentation_dependencies(self) -> Collection[str]: """Return packages required for instrumentation.""" - return ["autogen >= 0.3.2"] + return ["ag2 >= 0.3.2"] def _instrument(self, **kwargs): """Instrument AG2 components.""" From 7d3abbc69f0cc31b5792233a2a35302a57a065bc Mon Sep 17 00:00:00 2001 From: Pratyush Shukla Date: Tue, 17 Jun 2025 05:11:18 +0530 Subject: [PATCH 7/7] remove ag2 human async example --- examples/ag2/README.md | 10 +- examples/ag2/agentchat_with_memory.ipynb | 320 ----------------------- examples/ag2/agentchat_with_memory.py | 128 --------- 3 files changed, 2 insertions(+), 456 deletions(-) delete mode 100644 examples/ag2/agentchat_with_memory.ipynb delete mode 100644 examples/ag2/agentchat_with_memory.py diff --git a/examples/ag2/README.md b/examples/ag2/README.md index 6d218d3b3..7c2ed2b7c 100644 --- a/examples/ag2/README.md +++ b/examples/ag2/README.md @@ -12,20 +12,14 @@ This directory contains examples of using AG2 (AutoGen 2.0) with AgentOps instru ## Examples -### 1. Agent Chat with Memory - -Example: `agentchat_with_memory` -This example demonstrates: -- Agent chat with persistent memory - -### 2. Asynchronous Human Input +### 1. Asynchronous Human Input Example: `async_human_input` This example demonstrates: - Asynchronous human input handling -### 3. Wikipedia Search Tool +### 2. Wikipedia Search Tool Example: `tools_wikipedia_search` diff --git a/examples/ag2/agentchat_with_memory.ipynb b/examples/ag2/agentchat_with_memory.ipynb deleted file mode 100644 index 7f443675a..000000000 --- a/examples/ag2/agentchat_with_memory.ipynb +++ /dev/null @@ -1,320 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "A3guIYLG6PnX" - }, - "source": [ - "# Observe an Agent with memory powered by Mem0" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "di1opljX6sQT" - }, - "source": [ - "This notebook demonstrates an intelligent customer service chatbot system that combines:\n", - "\n", - "- AG2 for conversational agents\n", - "- Mem0 for memory management\n", - "\n", - "[Mem0](https://www.mem0.ai/) provides a smart, self-improving memory layer for Large Language Models (LLMs), enabling developers to create personalized AI experiences that evolve with each user interaction. Refer [docs](https://docs.mem0.ai/overview) for more information.\n", - "\n", - "The implementation showcases how to initialize agents, manage conversation memory, and facilitate multi-agent conversations for enhanced problem-solving in customer support scenarios.\n", - "\n", - "With AgentOps, you can observe the agent's memory and interactions in real-time, providing insights into how the agent learns and adapts over time." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Pre-requisites\n", - "- AgentOps API key from [AgentOps](https://app.agentops.ai/).\n", - "- Mem0 API key from [Mem0 Platform](https://app.mem0.ai/).\n", - "- OpenAI API key from [OpenAI](https://platform.openai.com/)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Install required dependencies\n", - "%pip install agentops\n", - "%pip install \"ag2[openai]\" \n", - "%pip install mem0ai" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "4nQWyJ-n9qOB", - "outputId": "de897f4f-32fd-4359-f021-c3510467b69c" - }, - "outputs": [], - "source": [ - "import os\n", - "from dotenv import load_dotenv\n", - "import agentops\n", - "from mem0 import MemoryClient\n", - "from autogen import ConversableAgent" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "load_dotenv()\n", - "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_api_key_here\")\n", - "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_api_key_here\")\n", - "os.environ[\"MEM0_API_KEY\"] = os.getenv(\"MEM0_API_KEY\", \"your_api_key_here\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6GAZAkGmAjsT" - }, - "source": [ - "## Initialize Agent and Memory\n", - "\n", - "The conversational agent is set up using the 'gpt-4o' model and a mem0 client. We'll utilize the client's methods for storing and accessing memories.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "agentops.init(auto_start_session=False)\n", - "tracer = agentops.start_trace(trace_name=\"AG2 Agent using Mem0\", tags=[\"ag2-mem0-example\", \"agentops-example\"])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "xWSEHMKT9qLz" - }, - "outputs": [], - "source": [ - "agent = ConversableAgent(\n", - " \"chatbot\",\n", - " llm_config={\"config_list\": [{\"model\": \"gpt-4o\", \"api_key\": os.environ.get(\"OPENAI_API_KEY\")}]},\n", - " code_execution_config=False,\n", - " function_map=None,\n", - " human_input_mode=\"NEVER\",\n", - ")\n", - "\n", - "memory = MemoryClient()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "c7vGQoMg_KhS" - }, - "source": [ - "Initialize a conversation history for a Best Buy customer service chatbot. It contains a list of message exchanges between the user and the assistant, structured as dictionaries with 'role' and 'content' keys. The entire conversation is then stored in memory using the `memory.add()` method, associated with the identifier \"customer_service_bot\"." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "b6ghSobs-mYo", - "outputId": "18a6e657-49ba-41cd-8e51-2e76cf668967" - }, - "outputs": [], - "source": [ - "conversation = [\n", - " {\n", - " \"role\": \"assistant\",\n", - " \"content\": \"Hi, I'm Best Buy's chatbot!\\n\\nThanks for being a My Best Buy TotalTM member.\\n\\nWhat can I help you with?\",\n", - " },\n", - " {\n", - " \"role\": \"user\",\n", - " \"content\": 'Seeing horizontal lines on our tv. TV model: Sony - 77\" Class BRAVIA XR A80K OLED 4K UHD Smart Google TV',\n", - " },\n", - " {\n", - " \"role\": \"assistant\",\n", - " \"content\": \"Thanks for being a My Best Buy Total™ member. I can connect you to an expert immediately - just one perk of your membership!\\n\\nSelect the button below when you're ready to chat.\",\n", - " },\n", - " {\n", - " \"role\": \"assistant\",\n", - " \"content\": \"Good evening, thank you for choosing Best Buy, Fnu. My name is Lovely. I hope you are doing well. I'm sorry to hear that you're seeing horizontal lines on your TV.\\n\\nI'm absolutely committed to exploring all possible ways to assist you to fix this issue.\\n\\nTo ensure that we are on the right account, may I please have your email address registered with your Best Buy account?\",\n", - " },\n", - " {\"role\": \"user\", \"content\": \"dd@gmail.com\"},\n", - " {\n", - " \"role\": \"assistant\",\n", - " \"content\": \"Perfect! Thank you for providing all the details, surely you have made my job easier by doing this. I really appreciate it.\\n\\nI also want to take a moment to express our heartfelt appreciation for your trust and loyalty. Thank you for being an amazing customer of BestBuy Total.\\n\\nCould you please help me with the order number or product's details to check it quickly?\\n\\nSamsung - 49\\\" Odyssey OLED G9 (G95SC) DQHD 240Hz 0.03ms G-Sync Compatible Curved Smart Gaming Monitor - Silver - just to confirm this is the item, right?\",\n", - " },\n", - " {\"role\": \"user\", \"content\": \"Order number: 112217629\"},\n", - " {\n", - " \"role\": \"assistant\",\n", - " \"content\": \"Superb! Thank you for confirmation.\\n\\nThank you for your patience. After exploring all possible solutions, I can help you to arrange a home repair appointment for your device. Our Geek Squad experts will visit your home to inspect and fix your device.\\n\\nIt's great that you have a protection plan - rest assured, we've got your back! As a valued Total member, you can avail this service at a minimal service fee. This fee, applicable to all repairs, covers the cost of diagnosing the issue and any small parts needed for the repair. It's part of our 24-month free protection plan.\\n\\nPlease click here to review the service fee and plan coverage details -\\n\\nhttps://www.bestbuy.com/site/best-buy-membership/best-buy-protection/pcmcat1608643232014.c?id=pcmcat1608643232014#jl-servicefees\\n\\nFnu - just to confirm shall I proceed to schedule the appointment?\",\n", - " },\n", - " {\"role\": \"user\", \"content\": \"Yes please\"},\n", - " {\"role\": \"assistant\", \"content\": \"When should I schedule the appointment?\"},\n", - " {\"role\": \"user\", \"content\": \"Schedule it for tomorrow please\"},\n", - "]\n", - "\n", - "memory.add(messages=conversation, user_id=\"customer_service_bot\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hlxxncxkAvck" - }, - "source": [ - "## Agent Inference\n", - "\n", - "We ask a question to the agent, utilizing mem0 to retrieve relevant memories. The agent then formulates a response based on both the question and the retrieved contextual information." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "wYdDkuKc-Xc8", - "outputId": "b50f55d3-a280-4c0d-82e5-43fc0589109b" - }, - "outputs": [], - "source": [ - "data = \"I forgot the order number, can you quickly tell me?\"\n", - "\n", - "relevant_memories = memory.search(data, user_id=\"customer_service_bot\")\n", - "flatten_relevant_memories = \"\\n\".join([m[\"memory\"] for m in relevant_memories])\n", - "\n", - "prompt = f\"\"\"Answer the user question considering the memories. Keep answers clear and concise.\n", - "Memories:\n", - "{flatten_relevant_memories}\n", - "\\n\\n\n", - "Question: {data}\n", - "\"\"\"\n", - "\n", - "reply = agent.generate_reply(messages=[{\"content\": prompt, \"role\": \"user\"}])\n", - "print(reply)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "sryX0gfdBGGD" - }, - "source": [ - "## Multi Agent Conversation\n", - "\n", - "Initialize two AI agents: a \"manager\" for resolving customer issues and a \"customer_bot\" for gathering information on customer problems, both using GPT-4. It then retrieves relevant memories for a given question, combining them with the question into a prompt. This prompt can be used by either the manager or customer_bot to generate a contextually informed response." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Vq5BFL2l-XZl" - }, - "outputs": [], - "source": [ - "manager = ConversableAgent(\n", - " \"manager\",\n", - " system_message=\"You are a manager who helps in resolving customer issues.\",\n", - " llm_config={\"config_list\": [{\"model\": \"gpt-4o-mini\", \"api_key\": os.environ.get(\"OPENAI_API_KEY\")}]},\n", - " human_input_mode=\"NEVER\",\n", - ")\n", - "\n", - "customer_bot = ConversableAgent(\n", - " \"customer_bot\",\n", - " system_message=\"You are a customer service bot who gathers information on issues customers are facing. Keep answers clear and concise.\",\n", - " llm_config={\"config_list\": [{\"model\": \"gpt-4\", \"api_key\": os.environ.get(\"OPENAI_API_KEY\")}]},\n", - " human_input_mode=\"NEVER\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "iX4ehmc6Fbib" - }, - "outputs": [], - "source": [ - "data = \"When is the appointment?\"\n", - "\n", - "relevant_memories = memory.search(data, user_id=\"customer_service_bot\")\n", - "flatten_relevant_memories = \"\\n\".join([m[\"memory\"] for m in relevant_memories])\n", - "\n", - "prompt = f\"\"\"\n", - "Context:\n", - "{flatten_relevant_memories}\n", - "\\n\\n\n", - "Question: {data}\n", - "\"\"\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "D3paRZWvCIzt", - "outputId": "15eadb7c-5973-44f1-de43-5e6cdebe88de" - }, - "outputs": [], - "source": [ - "result = manager.send(prompt, customer_bot, request_reply=True)\n", - "\n", - "agentops.end_trace(tracer, end_state=\"Success\")" - ] - } - ], - "metadata": { - "front_matter": { - "description": "Use Mem0 to create agents with memory.", - "tags": [ - "mem0", - "integration", - "memory" - ] - }, - "kernelspec": { - "display_name": "agentops (3.11.11)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/examples/ag2/agentchat_with_memory.py b/examples/ag2/agentchat_with_memory.py deleted file mode 100644 index 7d3099509..000000000 --- a/examples/ag2/agentchat_with_memory.py +++ /dev/null @@ -1,128 +0,0 @@ -# Observe an Agent with memory powered by Mem0 -# This notebook demonstrates an intelligent customer service chatbot system that combines: -# -# - AG2 for conversational agents -# - Mem0 for memory management -# -# [Mem0](https://www.mem0.ai/) provides a smart, self-improving memory layer for Large Language Models (LLMs), enabling developers to create personalized AI experiences that evolve with each user interaction. Refer [docs](https://docs.mem0.ai/overview) for more information. -# -# The implementation showcases how to initialize agents, manage conversation memory, and facilitate multi-agent conversations for enhanced problem-solving in customer support scenarios. -# -# With AgentOps, you can observe the agent's memory and interactions in real-time, providing insights into how the agent learns and adapts over time. -# ## Pre-requisites -# - AgentOps API key from [AgentOps](https://app.agentops.ai/). -# - Mem0 API key from [Mem0 Platform](https://app.mem0.ai/). -# - OpenAI API key from [OpenAI](https://platform.openai.com/). -# # Install required dependencies -# %pip install agentops -# %pip install "ag2[openai]" -# %pip install mem0ai -import os -from dotenv import load_dotenv -import agentops -from mem0 import MemoryClient -from autogen import ConversableAgent - -load_dotenv() -os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_api_key_here") -os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here") -os.environ["MEM0_API_KEY"] = os.getenv("MEM0_API_KEY", "your_api_key_here") - -# ## Initialize Agent and Memory -# -# The conversational agent is set up using the 'gpt-4o' model and a mem0 client. We'll utilize the client's methods for storing and accessing memories. -agentops.init(auto_start_session=False) -tracer = agentops.start_trace(trace_name="AG2 Agent using Mem0", tags=["ag2-mem0-example", "agentops-example"]) - -agent = ConversableAgent( - "chatbot", - llm_config={"config_list": [{"model": "gpt-4o", "api_key": os.environ.get("OPENAI_API_KEY")}]}, - code_execution_config=False, - function_map=None, - human_input_mode="NEVER", -) - -memory = MemoryClient() - -# Initialize a conversation history for a Best Buy customer service chatbot. It contains a list of message exchanges between the user and the assistant, structured as dictionaries with 'role' and 'content' keys. The entire conversation is then stored in memory using the `memory.add()` method, associated with the identifier "customer_service_bot". -conversation = [ - { - "role": "assistant", - "content": "Hi, I'm Best Buy's chatbot!\n\nThanks for being a My Best Buy TotalTM member.\n\nWhat can I help you with?", - }, - { - "role": "user", - "content": 'Seeing horizontal lines on our tv. TV model: Sony - 77" Class BRAVIA XR A80K OLED 4K UHD Smart Google TV', - }, - { - "role": "assistant", - "content": "Thanks for being a My Best Buy Total™ member. I can connect you to an expert immediately - just one perk of your membership!\n\nSelect the button below when you're ready to chat.", - }, - { - "role": "assistant", - "content": "Good evening, thank you for choosing Best Buy, Fnu. My name is Lovely. I hope you are doing well. I'm sorry to hear that you're seeing horizontal lines on your TV.\n\nI'm absolutely committed to exploring all possible ways to assist you to fix this issue.\n\nTo ensure that we are on the right account, may I please have your email address registered with your Best Buy account?", - }, - {"role": "user", "content": "dd@gmail.com"}, - { - "role": "assistant", - "content": "Perfect! Thank you for providing all the details, surely you have made my job easier by doing this. I really appreciate it.\n\nI also want to take a moment to express our heartfelt appreciation for your trust and loyalty. Thank you for being an amazing customer of BestBuy Total.\n\nCould you please help me with the order number or product's details to check it quickly?\n\nSamsung - 49\" Odyssey OLED G9 (G95SC) DQHD 240Hz 0.03ms G-Sync Compatible Curved Smart Gaming Monitor - Silver - just to confirm this is the item, right?", - }, - {"role": "user", "content": "Order number: 112217629"}, - { - "role": "assistant", - "content": "Superb! Thank you for confirmation.\n\nThank you for your patience. After exploring all possible solutions, I can help you to arrange a home repair appointment for your device. Our Geek Squad experts will visit your home to inspect and fix your device.\n\nIt's great that you have a protection plan - rest assured, we've got your back! As a valued Total member, you can avail this service at a minimal service fee. This fee, applicable to all repairs, covers the cost of diagnosing the issue and any small parts needed for the repair. It's part of our 24-month free protection plan.\n\nPlease click here to review the service fee and plan coverage details -\n\nhttps://www.bestbuy.com/site/best-buy-membership/best-buy-protection/pcmcat1608643232014.c?id=pcmcat1608643232014#jl-servicefees\n\nFnu - just to confirm shall I proceed to schedule the appointment?", - }, - {"role": "user", "content": "Yes please"}, - {"role": "assistant", "content": "When should I schedule the appointment?"}, - {"role": "user", "content": "Schedule it for tomorrow please"}, -] - -memory.add(messages=conversation, user_id="customer_service_bot") - -# ## Agent Inference -# -# We ask a question to the agent, utilizing mem0 to retrieve relevant memories. The agent then formulates a response based on both the question and the retrieved contextual information. -data = "I forgot the order number, can you quickly tell me?" -relevant_memories = memory.search(data, user_id="customer_service_bot") -flatten_relevant_memories = "\n".join([m["memory"] for m in relevant_memories]) - -prompt = f"""Answer the user question considering the memories. Keep answers clear and concise. -Memories: -{flatten_relevant_memories} -\n\n -Question: {data} -""" - -reply = agent.generate_reply(messages=[{"content": prompt, "role": "user"}]) -print(reply) - -# ## Multi Agent Conversation -# -# Initialize two AI agents: a "manager" for resolving customer issues and a "customer_bot" for gathering information on customer problems, both using GPT-4. It then retrieves relevant memories for a given question, combining them with the question into a prompt. This prompt can be used by either the manager or customer_bot to generate a contextually informed response. -manager = ConversableAgent( - "manager", - system_message="You are a manager who helps in resolving customer issues.", - llm_config={"config_list": [{"model": "gpt-4o-mini", "api_key": os.environ.get("OPENAI_API_KEY")}]}, - human_input_mode="NEVER", -) - -customer_bot = ConversableAgent( - "customer_bot", - system_message="You are a customer service bot who gathers information on issues customers are facing. Keep answers clear and concise.", - llm_config={"config_list": [{"model": "gpt-4", "api_key": os.environ.get("OPENAI_API_KEY")}]}, - human_input_mode="NEVER", -) - -data = "When is the appointment?" -relevant_memories = memory.search(data, user_id="customer_service_bot") -flatten_relevant_memories = "\n".join([m["memory"] for m in relevant_memories]) - -prompt = f""" -Context: -{flatten_relevant_memories} -\n\n -Question: {data} -""" - -result = manager.send(prompt, customer_bot, request_reply=True) -agentops.end_trace(tracer, end_state="Success")