diff --git a/src/agentex/lib/cli/commands/init.py b/src/agentex/lib/cli/commands/init.py index 2977757cd..d00149b57 100644 --- a/src/agentex/lib/cli/commands/init.py +++ b/src/agentex/lib/cli/commands/init.py @@ -23,6 +23,7 @@ class TemplateType(str, Enum): TEMPORAL = "temporal" + TEMPORAL_OPENAI_AGENTS = "temporal-openai-agents" DEFAULT = "default" SYNC = "sync" @@ -54,6 +55,7 @@ def create_project_structure( # Define project files based on template type project_files = { TemplateType.TEMPORAL: ["acp.py", "workflow.py", "run_worker.py"], + TemplateType.TEMPORAL_OPENAI_AGENTS: ["acp.py", "workflow.py", "run_worker.py", "activities.py"], TemplateType.DEFAULT: ["acp.py"], TemplateType.SYNC: ["acp.py"], }[template_type] @@ -152,13 +154,26 @@ def validate_agent_name(text: str) -> bool | str: "What type of template would you like to create?", choices=[ {"name": "Async - ACP Only", "value": TemplateType.DEFAULT}, - {"name": "Async - Temporal", "value": TemplateType.TEMPORAL}, + {"name": "Async - Temporal", "value": "temporal_submenu"}, {"name": "Sync ACP", "value": TemplateType.SYNC}, ], ).ask() if not template_type: return + # If Temporal was selected, show sub-menu for Temporal variants + if template_type == "temporal_submenu": + console.print() + template_type = questionary.select( + "Which Temporal template would you like to use?", + choices=[ + {"name": "Basic Temporal", "value": TemplateType.TEMPORAL}, + {"name": "Temporal + OpenAI Agents SDK (Recommended)", "value": TemplateType.TEMPORAL_OPENAI_AGENTS}, + ], + ).ask() + if not template_type: + return + project_path = questionary.path( "Where would you like to create your project?", default="." ).ask() diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/.dockerignore.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/.dockerignore.j2 new file mode 100644 index 000000000..c2d7fca4d --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/.dockerignore.j2 @@ -0,0 +1,43 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Environments +.env** +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# IDE +.idea/ +.vscode/ +*.swp +*.swo + +# Git +.git +.gitignore + +# Misc +.DS_Store diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile-uv.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile-uv.j2 new file mode 100644 index 000000000..81dd9c5b6 --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile-uv.j2 @@ -0,0 +1,48 @@ +# syntax=docker/dockerfile:1.3 +FROM python:3.12-slim +COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + htop \ + vim \ + curl \ + tar \ + python3-dev \ + postgresql-client \ + build-essential \ + libpq-dev \ + gcc \ + cmake \ + netcat-openbsd \ + nodejs \ + npm \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/** + +# Install tctl (Temporal CLI) +RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ + tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ + chmod +x /usr/local/bin/tctl && \ + rm /tmp/tctl.tar.gz + +RUN uv pip install --system --upgrade pip setuptools wheel + +ENV UV_HTTP_TIMEOUT=1000 + +# Copy just the pyproject.toml file to optimize caching +COPY {{ project_path_from_build_root }}/pyproject.toml /app/{{ project_path_from_build_root }}/pyproject.toml + +WORKDIR /app/{{ project_path_from_build_root }} + +# Install the required Python packages using uv +RUN uv pip install --system . + +# Copy the project code +COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project + +# Run the ACP server using uvicorn +CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] + +# When we deploy the worker, we will replace the CMD with the following +# CMD ["python", "-m", "run_worker"] \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile.j2 new file mode 100644 index 000000000..4c1798c42 --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile.j2 @@ -0,0 +1,48 @@ +# syntax=docker/dockerfile:1.3 +FROM python:3.12-slim +COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + htop \ + vim \ + curl \ + tar \ + python3-dev \ + postgresql-client \ + build-essential \ + libpq-dev \ + gcc \ + cmake \ + netcat-openbsd \ + node \ + npm \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install tctl (Temporal CLI) +RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ + tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ + chmod +x /usr/local/bin/tctl && \ + rm /tmp/tctl.tar.gz + +RUN uv pip install --system --upgrade pip setuptools wheel + +ENV UV_HTTP_TIMEOUT=1000 + +# Copy just the requirements file to optimize caching +COPY {{ project_path_from_build_root }}/requirements.txt /app/{{ project_path_from_build_root }}/requirements.txt + +WORKDIR /app/{{ project_path_from_build_root }} + +# Install the required Python packages +RUN uv pip install --system -r requirements.txt + +# Copy the project code +COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project + +# Run the ACP server using uvicorn +CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] + +# When we deploy the worker, we will replace the CMD with the following +# CMD ["python", "-m", "run_worker"] \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/README.md.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/README.md.j2 new file mode 100644 index 000000000..071fc24ba --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/README.md.j2 @@ -0,0 +1,224 @@ +# {{ agent_name }} - AgentEx Temporal + OpenAI Agents SDK Template + +This is a starter template for building AI agents with the AgentEx framework, Temporal workflows, and OpenAI Agents SDK. It provides a production-ready foundation with: + +- **Durable execution** via Temporal workflows +- **AI agent capabilities** via OpenAI Agents SDK +- **Tool use** via Temporal activities +- **Streaming responses** for real-time feedback +- **Conversation state management** across turns +- **Tracing/observability** via SGP integration + +## What You'll Learn + +- **Tasks**: A task is a grouping mechanism for related messages (like a conversation thread) +- **Messages**: Communication objects within a task (text, data, instructions) +- **Temporal Workflows**: Long-running processes with state management and async operations +- **Activities**: Non-deterministic operations (API calls, I/O) that Temporal can retry and recover +- **OpenAI Agents SDK**: Building AI agents with tools, instructions, and streaming + +## Running the Agent + +1. Run the agent locally: +```bash +agentex agents run --manifest manifest.yaml +``` + +The agent will start on port 8000 and be ready to handle conversations. + +## Project Structure + +``` +{{ project_name }}/ +├── project/ # Your agent's code +│ ├── __init__.py +│ ├── acp.py # ACP server with OpenAI plugin setup +│ ├── workflow.py # Temporal workflow with OpenAI agent +│ ├── activities.py # Temporal activities (tools for your agent) +│ └── run_worker.py # Temporal worker setup +├── Dockerfile # Container definition +├── manifest.yaml # Deployment config +├── dev.ipynb # Development notebook for testing +{% if use_uv %} +└── pyproject.toml # Dependencies (uv) +{% else %} +└── requirements.txt # Dependencies (pip) +{% endif %} +``` + +## Key Concepts + +### Activities as Tools + +Activities are Temporal's way of handling non-deterministic operations. In this template, activities also serve as tools for your OpenAI agent: + +```python +# In activities.py - define the activity +@activity.defn +async def get_weather() -> str: + return "Sunny, 72°F" + +# In workflow.py - use it as a tool for the agent +agent = Agent( + name="my-agent", + tools=[ + openai_agents.workflow.activity_as_tool( + get_weather, + start_to_close_timeout=timedelta(minutes=5), + ), + ], +) +``` + +### Conversation State + +The workflow maintains conversation history across turns using `StateModel`: + +```python +class StateModel(BaseModel): + input_list: List[Dict[str, Any]] # Conversation history + turn_number: int # Turn counter for tracing +``` + +### Tracing + +Each conversation turn creates a tracing span for observability: + +```python +async with adk.tracing.span( + trace_id=params.task.id, + name=f"Turn {self._state.turn_number}", + input=turn_input.model_dump(), +) as span: + # Agent execution happens here +``` + +## Adding New Tools/Activities + +See the detailed instructions in `project/activities.py`. The process is: + +1. **Define** the activity in `activities.py` +2. **Register** it in `run_worker.py` +3. **Add** it as a tool in `workflow.py` + +## Temporal Dashboard + +Monitor your workflows and activities at: + +``` +http://localhost:8080 +``` + +The dashboard shows: +- Running and completed workflows +- Activity execution history +- Retries and failures +- Workflow state and signals + +## Development + +### 1. Customize the Agent + +Edit `project/workflow.py` to change: +- Agent instructions +- Model (default: `gpt-4o-mini`) +- Tools available to the agent + +### 2. Add New Activities + +See `project/activities.py` for detailed instructions on adding new tools. + +### 3. Test with the Development Notebook + +```bash +jupyter notebook dev.ipynb +# Or in VS Code +code dev.ipynb +``` + +### 4. Manage Dependencies + +{% if use_uv %} +```bash +# Add new dependencies +agentex uv add requests anthropic + +# Install/sync dependencies +agentex uv sync +``` +{% else %} +```bash +# Add to requirements.txt +echo "requests" >> requirements.txt +pip install -r requirements.txt +``` +{% endif %} + +## Local Development + +### 1. Start the Agentex Backend +```bash +cd agentex +make dev +``` + +### 2. Setup Your Agent's Environment +```bash +{% if use_uv %} +agentex uv sync +source .venv/bin/activate +{% else %} +pip install -r requirements.txt +{% endif %} +``` + +### 3. Run Your Agent +```bash +export ENVIRONMENT=development +agentex agents run --manifest manifest.yaml +``` + +### 4. Interact with Your Agent + +Via Web UI: +```bash +cd agentex-web +make dev +# Open http://localhost:3000 +``` + +## Environment Variables + +For local development, create a `.env` file: + +```bash +OPENAI_API_KEY=your-api-key +SGP_API_KEY=your-sgp-key # Optional: for tracing +SGP_ACCOUNT_ID=your-account-id # Optional: for tracing +``` + +## Troubleshooting + +### Common Issues + +1. **Agent not responding** + - Check if agent is running on port 8000 + - Verify `ENVIRONMENT=development` is set + - Check logs for errors + +2. **Temporal workflow issues** + - Check Temporal Web UI at http://localhost:8080 + - Verify Temporal server is running + - Check workflow logs + +3. **OpenAI API errors** + - Verify `OPENAI_API_KEY` is set + - Check API rate limits + - Verify model name is correct + +4. **Activity failures** + - Check activity logs in console + - Verify activity is registered in `run_worker.py` + - Check timeout settings + +Happy building with Temporal + OpenAI Agents SDK! diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/dev.ipynb.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/dev.ipynb.j2 new file mode 100644 index 000000000..d3a68303f --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/dev.ipynb.j2 @@ -0,0 +1,126 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "36834357", + "metadata": {}, + "outputs": [], + "source": [ + "from agentex import Agentex\n", + "\n", + "client = Agentex(base_url=\"http://localhost:5003\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d1c309d6", + "metadata": {}, + "outputs": [], + "source": [ + "AGENT_NAME = \"{{ agent_name }}\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f6e6ef0", + "metadata": {}, + "outputs": [], + "source": [ + "# (REQUIRED) Create a new task. For Async agents, you must create a task for messages to be associated with.\n", + "import uuid\n", + "\n", + "rpc_response = client.agents.create_task(\n", + " agent_name=AGENT_NAME,\n", + " params={\n", + " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", + " \"params\": {}\n", + " }\n", + ")\n", + "\n", + "task = rpc_response.result\n", + "print(task)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b03b0d37", + "metadata": {}, + "outputs": [], + "source": [ + "# Send an event to the agent\n", + "\n", + "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", + "# - TextContent: A message with just text content \n", + "# - DataContent: A message with JSON-serializable data content\n", + "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", + "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", + "\n", + "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", + "\n", + "rpc_response = client.agents.send_event(\n", + " agent_name=AGENT_NAME,\n", + " params={\n", + " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", + " \"task_id\": task.id,\n", + " }\n", + ")\n", + "\n", + "event = rpc_response.result\n", + "print(event)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a6927cc0", + "metadata": {}, + "outputs": [], + "source": [ + "# Subscribe to the async task messages produced by the agent\n", + "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", + "\n", + "task_messages = subscribe_to_async_task_messages(\n", + " client=client,\n", + " task=task, \n", + " only_after_timestamp=event.created_at, \n", + " print_messages=True,\n", + " rich_print=True,\n", + " timeout=5,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4864e354", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/environments.yaml.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/environments.yaml.j2 new file mode 100644 index 000000000..a3df5e228 --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/environments.yaml.j2 @@ -0,0 +1,64 @@ +# Agent Environment Configuration +# ------------------------------ +# This file defines environment-specific settings for your agent. +# This DIFFERS from the manifest.yaml file in that it is used to program things that are ONLY per environment. + +# ********** EXAMPLE ********** +# schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI +# environments: +# dev: +# auth: +# principal: +# user_id: "1234567890" +# user_name: "John Doe" +# user_email: "john.doe@example.com" +# user_role: "admin" +# user_permissions: "read, write, delete" +# helm_overrides: # This is used to override the global helm values.yaml file in the agentex-agent helm charts +# replicas: 3 +# resources: +# requests: +# cpu: "1000m" +# memory: "2Gi" +# limits: +# cpu: "2000m" +# memory: "4Gi" +# env: +# - name: LOG_LEVEL +# value: "DEBUG" +# - name: ENVIRONMENT +# value: "staging" +# +# kubernetes: +# # OPTIONAL - Otherwise it will be derived from separately. However, this can be used to override the derived +# # namespace and deploy it with in the same namespace that already exists for a separate agent. +# namespace: "team-{{agent_name}}" +# ********** END EXAMPLE ********** + +schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI +environments: + dev: + auth: + principal: + user_id: # TODO: Fill in + account_id: # TODO: Fill in + helm_overrides: + # This is used to override the global helm values.yaml file in the agentex-agent helm charts + replicaCount: 2 + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1000m" + memory: "2Gi" + temporal-worker: + enabled: true + replicaCount: 2 + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1000m" + memory: "2Gi" \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/manifest.yaml.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/manifest.yaml.j2 new file mode 100644 index 000000000..a6433ce7d --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/manifest.yaml.j2 @@ -0,0 +1,140 @@ +# Agent Manifest Configuration +# --------------------------- +# This file defines how your agent should be built and deployed. + +# Build Configuration +# ------------------ +# The build config defines what gets packaged into your agent's Docker image. +# This same configuration is used whether building locally or remotely. +# +# When building: +# 1. All files from include_paths are collected into a build context +# 2. The context is filtered by dockerignore rules +# 3. The Dockerfile uses this context to build your agent's image +# 4. The image is pushed to a registry and used to run your agent +build: + context: + # Root directory for the build context + root: ../ # Keep this as the default root + + # Paths to include in the Docker build context + # Must include: + # - Your agent's directory (your custom agent code) + # These paths are collected and sent to the Docker daemon for building + include_paths: + - {{ project_path_from_build_root }} + + # Path to your agent's Dockerfile + # This defines how your agent's image is built from the context + # Relative to the root directory + dockerfile: {{ project_path_from_build_root }}/Dockerfile + + # Path to your agent's .dockerignore + # Filters unnecessary files from the build context + # Helps keep build context small and builds fast + dockerignore: {{ project_path_from_build_root }}/.dockerignore + + +# Local Development Configuration +# ----------------------------- +# Only used when running the agent locally +local_development: + agent: + port: 8000 # Port where your local ACP server is running + host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) + + # File paths for local development (relative to this manifest.yaml) + paths: + # Path to ACP server file + # Examples: + # project/acp.py (standard) + # src/server.py (custom structure) + # ../shared/acp.py (shared across projects) + # /absolute/path/acp.py (absolute path) + acp: project/acp.py + + # Path to temporal worker file + # Examples: + # project/run_worker.py (standard) + # workers/temporal.py (custom structure) + # ../shared/worker.py (shared across projects) + worker: project/run_worker.py + + +# Agent Configuration +# ----------------- +agent: + # Type of agent - either sync or async + acp_type: async + + # Unique name for your agent + # Used for task routing and monitoring + name: {{ agent_name }} + + # Description of what your agent does + # Helps with documentation and discovery + description: {{ description }} + + # Temporal workflow configuration + # This enables your agent to run as a Temporal workflow for long-running tasks + temporal: + enabled: true + workflows: + # Name of the workflow class + # Must match the @workflow.defn name in your workflow.py + - name: {{ workflow_name }} + + # Queue name for task distribution + # Used by Temporal to route tasks to your agent + # Convention: _task_queue + queue_name: {{ queue_name }} + + # Optional: Health check port for temporal worker + # Defaults to 80 if not specified + # health_check_port: 80 + + # Optional: Credentials mapping + # Maps Kubernetes secrets to environment variables + # Common credentials include: + credentials: + - env_var_name: REDIS_URL + secret_name: redis-url-secret + secret_key: url + # - env_var_name: OPENAI_API_KEY + # secret_name: openai-api-key + # secret_key: api-key + + # Optional: Set Environment variables for running your agent locally as well + # as for deployment later on + env: {} + # OPENAI_API_KEY: "" + # OPENAI_BASE_URL: "" + # OPENAI_ORG_ID: "" + + +# Deployment Configuration +# ----------------------- +# Configuration for deploying your agent to Kubernetes clusters +deployment: + # Container image configuration + image: + repository: "" # Update with your container registry + tag: "latest" # Default tag, should be versioned in production + + imagePullSecrets: [] # Update with your image pull secret name + # - name: my-registry-secret + + # Global deployment settings that apply to all clusters + # These can be overridden in cluster-specific environments (environments.yaml) + global: + # Default replica count + replicaCount: 1 + + # Default resource requirements + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1000m" + memory: "2Gi" \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/project/acp.py.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/project/acp.py.j2 new file mode 100644 index 000000000..87a3fdb96 --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/project/acp.py.j2 @@ -0,0 +1,80 @@ +import os +import sys +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin, ModelActivityParameters +from datetime import timedelta +from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor +from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( + TemporalStreamingModelProvider, +) + +# === DEBUG SETUP (AgentEx CLI Debug Support) === +if os.getenv("AGENTEX_DEBUG_ENABLED") == "true": + try: + import debugpy + from agentex.lib.utils.logging import make_logger + + logger = make_logger(__name__) + debug_port = int(os.getenv("AGENTEX_DEBUG_PORT", "5679")) + debug_type = os.getenv("AGENTEX_DEBUG_TYPE", "acp") + wait_for_attach = os.getenv("AGENTEX_DEBUG_WAIT_FOR_ATTACH", "false").lower() == "true" + + # Configure debugpy + debugpy.configure(subProcess=False) + debugpy.listen(debug_port) + + logger.info(f"[{debug_type.upper()}] Debug server listening on port {debug_port}") + + if wait_for_attach: + logger.info(f"[{debug_type.upper()}] Waiting for debugger to attach...") + debugpy.wait_for_client() + logger.info(f"[{debug_type.upper()}] Debugger attached!") + else: + logger.info(f"[{debug_type.upper()}] Ready for debugger attachment") + + except ImportError: + print("debugpy not available. Install with: pip install debugpy") + sys.exit(1) + except Exception as e: + print(f"Debug setup failed: {e}") + sys.exit(1) +# === END DEBUG SETUP === + +from agentex.lib.sdk.fastacp.fastacp import FastACP +from agentex.lib.types.fastacp import TemporalACPConfig + +context_interceptor = ContextInterceptor() +streaming_model_provider = TemporalStreamingModelProvider() + + +# Create the ACP server +acp = FastACP.create( + acp_type="async", + config=TemporalACPConfig( + # When deployed to the cluster, the Temporal address will automatically be set to the cluster address + # For local development, we set the address manually to talk to the local Temporal service set up via docker compose + type="temporal", + temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233"), + plugins=[OpenAIAgentsPlugin( + model_params=ModelActivityParameters( + start_to_close_timeout=timedelta(days=1) + ), + model_provider=streaming_model_provider + )], + interceptors=[context_interceptor] + ) +) + + +# Notice that we don't need to register any handlers when we use type="temporal" +# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp +# You can see that these handlers are automatically registered when the ACP is created + +# @acp.on_task_create +# This will be handled by the method in your workflow that is decorated with @workflow.run + +# @acp.on_task_event_send +# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) + +# @acp.on_task_cancel +# This does not need to be handled by your workflow. +# It is automatically handled by the temporal client which cancels the workflow directly diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/project/activities.py.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/project/activities.py.j2 new file mode 100644 index 000000000..907cb287a --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/project/activities.py.j2 @@ -0,0 +1,116 @@ +""" +Temporal Activities for OpenAI Agents SDK +========================================== + +WHAT ARE ACTIVITIES? +-------------------- +Activities are functions that perform non-deterministic operations - things that +might have different results each time they run, such as: +- API calls (weather services, databases, external services) +- File I/O operations +- Current time/date lookups +- Random number generation +- Any operation with side effects + +Temporal workflows must be deterministic (same input = same output every time). +Activities let you safely perform non-deterministic work while Temporal handles +retries, timeouts, and failure recovery automatically. + + +HOW TO ADD NEW ACTIVITIES: +-------------------------- +Adding a new activity requires 3 steps: + +1. DEFINE the activity in this file with the @activity.defn decorator: + + @activity.defn + async def my_new_activity(param: str) -> str: + # Your non-deterministic logic here + return result + +2. REGISTER it in run_worker.py by adding to the activities list: + + from project.activities import get_weather, my_new_activity + + all_activities = get_all_activities() + [ + stream_lifecycle_content, + get_weather, + my_new_activity, # Add your new activity here + ] + +3. ADD it as a tool to your OpenAI agent in workflow.py: + + from project.activities import get_weather, my_new_activity + + agent = Agent( + name="...", + tools=[ + openai_agents.workflow.activity_as_tool( + get_weather, + start_to_close_timeout=timedelta(minutes=5), + ), + openai_agents.workflow.activity_as_tool( + my_new_activity, # Add your new activity as a tool + start_to_close_timeout=timedelta(minutes=5), + ), + ], + ) + + +RUNNING ACTIVITIES OUTSIDE OPENAI AGENT SDK: +-------------------------------------------- +You can also call activities directly from your workflow without going through +the OpenAI agent. This is useful for setup/teardown operations or when you need +to run an activity before the agent starts: + + from temporalio import workflow + from datetime import timedelta + + # Inside your workflow method: + result = await workflow.execute_activity( + get_weather, + start_to_close_timeout=timedelta(minutes=5), + ) + +For activities with parameters: + + result = await workflow.execute_activity( + my_activity_with_params, + "param_value", # positional args + start_to_close_timeout=timedelta(minutes=5), + ) + + +TEMPORAL DASHBOARD: +------------------- +Monitor your workflows and activities in real-time at: + + http://localhost:8080 + +The dashboard shows: +- Running and completed workflows +- Activity execution history +- Retries and failures +- Workflow state and signals +""" + +from temporalio import activity + +from agentex.lib.utils.logging import make_logger + +logger = make_logger(__name__) + + +@activity.defn +async def get_weather() -> str: + """ + Get the current weather. + + This is a dummy activity that returns a hardcoded string for demo purposes. + Replace this with a real weather API call in your implementation. + + Returns: + A string describing the current weather conditions. + """ + logger.info("get_weather activity called") + return "Sunny, 72°F" diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/project/run_worker.py.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/project/run_worker.py.j2 new file mode 100644 index 000000000..2516d3e0b --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/project/run_worker.py.j2 @@ -0,0 +1,56 @@ +import asyncio + +from agentex.lib.core.temporal.activities import get_all_activities +from agentex.lib.core.temporal.workers.worker import AgentexWorker +from agentex.lib.utils.logging import make_logger +from agentex.lib.utils.debug import setup_debug_if_enabled +from agentex.lib.environment_variables import EnvironmentVariables +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin, ModelActivityParameters +from datetime import timedelta +from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor +from agentex.lib.core.temporal.plugins.openai_agents.hooks.activities import stream_lifecycle_content +from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( + TemporalStreamingModelProvider, +) +from project.workflow import {{ workflow_class }} +from project.activities import get_weather + +environment_variables = EnvironmentVariables.refresh() + +logger = make_logger(__name__) + + +async def main(): + # Setup debug mode if enabled + setup_debug_if_enabled() + + task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE + if task_queue_name is None: + raise ValueError("WORKFLOW_TASK_QUEUE is not set") + + # Register all activities here + # When you add new activities in activities.py, add them to this list + all_activities = get_all_activities() + [stream_lifecycle_content, get_weather] + + context_interceptor = ContextInterceptor() + streaming_model_provider = TemporalStreamingModelProvider() + + # Create a worker with automatic tracing + worker = AgentexWorker( + task_queue=task_queue_name, + plugins=[OpenAIAgentsPlugin( + model_params=ModelActivityParameters( + start_to_close_timeout=timedelta(days=1) + ), + model_provider=streaming_model_provider + )], + interceptors=[context_interceptor], + ) + + await worker.run( + activities=all_activities, + workflow={{ workflow_class }}, + ) + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/project/workflow.py.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/project/workflow.py.j2 new file mode 100644 index 000000000..94b5221fb --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/project/workflow.py.j2 @@ -0,0 +1,169 @@ +import json +import os + +from temporalio import workflow + +from agentex.lib import adk +from agentex.lib.types.acp import CreateTaskParams, SendEventParams +from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow +from agentex.lib.core.temporal.types.workflow import SignalName +from agentex.lib.utils.logging import make_logger +from agentex.types.text_content import TextContent +from agentex.lib.environment_variables import EnvironmentVariables +from agents import Agent, Runner +from agentex.lib.core.temporal.plugins.openai_agents.hooks.hooks import TemporalStreamingHooks +from pydantic import BaseModel +from typing import List, Dict, Any +from temporalio.contrib import openai_agents +from project.activities import get_weather +from agentex.lib.core.tracing.tracing_processor_manager import ( + add_tracing_processor_config, +) +from agentex.lib.types.tracing import SGPTracingProcessorConfig +from datetime import timedelta + + +environment_variables = EnvironmentVariables.refresh() + +if environment_variables.WORKFLOW_NAME is None: + raise ValueError("Environment variable WORKFLOW_NAME is not set") + +if environment_variables.AGENT_NAME is None: + raise ValueError("Environment variable AGENT_NAME is not set") + +logger = make_logger(__name__) + +# Setup tracing for SGP (Scale GenAI Platform) +# This enables visibility into your agent's execution in the SGP dashboard +add_tracing_processor_config( + SGPTracingProcessorConfig( + sgp_api_key=os.environ.get("SGP_API_KEY", ""), + sgp_account_id=os.environ.get("SGP_ACCOUNT_ID", ""), + ) +) + + +class StateModel(BaseModel): + """ + State model for preserving conversation history across turns. + + This allows the agent to maintain context throughout the conversation, + making it possible to reference previous messages and build on the discussion. + + Attributes: + input_list: The conversation history in OpenAI message format. + turn_number: Counter for tracking conversation turns (useful for tracing). + """ + + input_list: List[Dict[str, Any]] + turn_number: int + + +class TurnInput(BaseModel): + """Input model for tracing spans.""" + input_list: List[Dict[str, Any]] + + +class TurnOutput(BaseModel): + """Output model for tracing spans.""" + final_output: Any + + +@workflow.defn(name=environment_variables.WORKFLOW_NAME) +class {{ workflow_class }}(BaseWorkflow): + """ + Workflow for {{ agent_name }} agent using OpenAI Agents SDK. + + This workflow: + - Maintains conversation state across turns + - Creates tracing spans for each turn + - Runs an OpenAI agent with tools (activities) + - Streams responses back to the client + """ + + def __init__(self): + super().__init__(display_name=environment_variables.AGENT_NAME) + self._complete_task = False + self._state: StateModel = StateModel(input_list=[], turn_number=0) + self._task_id = None + self._trace_id = None + self._parent_span_id = None + + @workflow.signal(name=SignalName.RECEIVE_EVENT) + async def on_task_event_send(self, params: SendEventParams) -> None: + logger.info(f"Received task message instruction: {params}") + + # Increment turn number for tracing + self._state.turn_number += 1 + + self._task_id = params.task.id + self._trace_id = params.task.id + self._parent_span_id = params.task.id + + # Add the user message to conversation history + self._state.input_list.append({"role": "user", "content": params.event.content.content}) + + # Echo back the client's message to show it in the UI + await adk.messages.create(task_id=params.task.id, content=params.event.content) + + temporal_streaming_hooks = TemporalStreamingHooks(task_id=params.task.id) + + # Create a span to track this turn of the conversation + turn_input = TurnInput( + input_list=self._state.input_list, + ) + async with adk.tracing.span( + trace_id=params.task.id, + name=f"Turn {self._state.turn_number}", + input=turn_input.model_dump(), + ) as span: + self._parent_span_id = span.id if span else None + + # Create the OpenAI agent with tools + # Add your activities as tools using activity_as_tool() + agent = Agent( + name="{{ agent_name }}", + instructions="You are a helpful assistant. Use your tools to help the user.", + model="gpt-4o-mini", + tools=[ + openai_agents.workflow.activity_as_tool( + get_weather, + start_to_close_timeout=timedelta(minutes=5), + ), + # Add more tools here as you create new activities: + # openai_agents.workflow.activity_as_tool( + # your_new_activity, + # start_to_close_timeout=timedelta(minutes=5), + # ), + ], + ) + + # Run the agent with hooks to enable streaming responses + result = await Runner.run(agent, self._state.input_list, hooks=temporal_streaming_hooks) + + # Update the state with the assistant's response for the next turn + self._state.input_list = result.to_input_list() # type: ignore[assignment] + + # Set span output for tracing - include full state + if span: + turn_output = TurnOutput(final_output=result.final_output) + span.output = turn_output.model_dump() + + @workflow.run + async def on_task_create(self, params: CreateTaskParams) -> str: + logger.info(f"Received task create params: {params}") + + # Acknowledge that the task has been created + await adk.messages.create( + task_id=params.task.id, + content=TextContent( + author="agent", + content=f"Hello! I'm {{ agent_name }}, your AI assistant. How can I help you today?\n\nParams received:\n{json.dumps(params.params, indent=2)}", + ), + ) + + await workflow.wait_condition( + lambda: self._complete_task, + timeout=None, + ) + return "Task completed" diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/pyproject.toml.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/pyproject.toml.j2 new file mode 100644 index 000000000..a1ebab933 --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/pyproject.toml.j2 @@ -0,0 +1,35 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "{{ project_name }}" +version = "0.1.0" +description = "{{ description }}" +requires-python = ">=3.12" +dependencies = [ + "agentex-sdk", + "scale-gp", + "temporalio", + "openai-agents>=0.4.2", +] + +[project.optional-dependencies] +dev = [ + "pytest", + "black", + "isort", + "flake8", + "debugpy>=1.8.15", +] + +[tool.hatch.build.targets.wheel] +packages = ["project"] + +[tool.black] +line-length = 88 +target-version = ['py312'] + +[tool.isort] +profile = "black" +line_length = 88 diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/requirements.txt.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/requirements.txt.j2 new file mode 100644 index 000000000..d4bd7a0fa --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/requirements.txt.j2 @@ -0,0 +1,4 @@ +agentex-sdk +scale-gp +temporalio +openai-agents>=0.4.2 diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/test_agent.py.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/test_agent.py.j2 new file mode 100644 index 000000000..ee71f177c --- /dev/null +++ b/src/agentex/lib/cli/templates/temporal-openai-agents/test_agent.py.j2 @@ -0,0 +1,147 @@ +""" +Sample tests for AgentEx ACP agent. + +This test suite demonstrates how to test the main AgentEx API functions: +- Non-streaming event sending and polling +- Streaming event sending + +To run these tests: +1. Make sure the agent is running (via docker-compose or `agentex agents run`) +2. Set the AGENTEX_API_BASE_URL environment variable if not using default +3. Run: pytest test_agent.py -v + +Configuration: +- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) +- AGENT_NAME: Name of the agent to test (default: {{ agent_name }}) +""" + +import os +import uuid +import asyncio +import pytest +import pytest_asyncio +from agentex import AsyncAgentex +from agentex.types import TaskMessage +from agentex.types.agent_rpc_params import ParamsCreateTaskRequest +from agentex.types.text_content_param import TextContentParam +from test_utils.async_utils import ( + poll_for_agent_response, + send_event_and_poll_yielding, + stream_agent_response, + validate_text_in_response, + poll_messages, +) + + +# Configuration from environment variables +AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") +AGENT_NAME = os.environ.get("AGENT_NAME", "{{ agent_name }}") + + +@pytest_asyncio.fixture +async def client(): + """Create an AsyncAgentex client instance for testing.""" + client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) + yield client + await client.close() + + +@pytest.fixture +def agent_name(): + """Return the agent name for testing.""" + return AGENT_NAME + + +@pytest_asyncio.fixture +async def agent_id(client, agent_name): + """Retrieve the agent ID based on the agent name.""" + agents = await client.agents.list() + for agent in agents: + if agent.name == agent_name: + return agent.id + raise ValueError(f"Agent with name {agent_name} not found.") + + +class TestNonStreamingEvents: + """Test non-streaming event sending and polling.""" + + @pytest.mark.asyncio + async def test_send_event_and_poll(self, client: AsyncAgentex, _agent_name: str, agent_id: str): + """Test sending an event and polling for the response.""" + # TODO: Create a task for this conversation + # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) + # task = task_response.result + # assert task is not None + + # TODO: Poll for the initial task creation message (if your agent sends one) + # async for message in poll_messages( + # client=client, + # task_id=task.id, + # timeout=30, + # sleep_interval=1.0, + # ): + # assert isinstance(message, TaskMessage) + # if message.content and message.content.type == "text" and message.content.author == "agent": + # # Check for your expected initial message + # assert "expected initial text" in message.content.content + # break + + # TODO: Send an event and poll for response using the yielding helper function + # user_message = "Your test message here" + # async for message in send_event_and_poll_yielding( + # client=client, + # agent_id=agent_id, + # task_id=task.id, + # user_message=user_message, + # timeout=30, + # sleep_interval=1.0, + # ): + # assert isinstance(message, TaskMessage) + # if message.content and message.content.type == "text" and message.content.author == "agent": + # # Check for your expected response + # assert "expected response text" in message.content.content + # break + pass + + +class TestStreamingEvents: + """Test streaming event sending.""" + + @pytest.mark.asyncio + async def test_send_event_and_stream(self, client: AsyncAgentex, _agent_name: str, agent_id: str): + """Test sending an event and streaming the response.""" + # TODO: Create a task for this conversation + # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) + # task = task_response.result + # assert task is not None + + # user_message = "Your test message here" + + # # Collect events from stream + # all_events = [] + + # async def collect_stream_events(): + # async for event in stream_agent_response( + # client=client, + # task_id=task.id, + # timeout=30, + # ): + # all_events.append(event) + + # # Start streaming task + # stream_task = asyncio.create_task(collect_stream_events()) + + # # Send the event + # event_content = TextContentParam(type="text", author="user", content=user_message) + # await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) + + # # Wait for streaming to complete + # await stream_task + + # # TODO: Add your validation here + # assert len(all_events) > 0, "No events received in streaming response" + pass + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/src/agentex/types/__init__.py b/src/agentex/types/__init__.py index 04a3b059d..140acd92f 100644 --- a/src/agentex/types/__init__.py +++ b/src/agentex/types/__init__.py @@ -63,8 +63,8 @@ from .tool_request_content_param import ToolRequestContentParam as ToolRequestContentParam from .tool_response_content_param import ToolResponseContentParam as ToolResponseContentParam from .task_retrieve_by_name_params import TaskRetrieveByNameParams as TaskRetrieveByNameParams -from .deployment_history_list_params import DeploymentHistoryListParams as DeploymentHistoryListParams from .message_list_paginated_params import MessageListPaginatedParams as MessageListPaginatedParams +from .deployment_history_list_params import DeploymentHistoryListParams as DeploymentHistoryListParams from .task_retrieve_by_name_response import TaskRetrieveByNameResponse as TaskRetrieveByNameResponse -from .deployment_history_list_response import DeploymentHistoryListResponse as DeploymentHistoryListResponse from .message_list_paginated_response import MessageListPaginatedResponse as MessageListPaginatedResponse +from .deployment_history_list_response import DeploymentHistoryListResponse as DeploymentHistoryListResponse