Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 63 additions & 0 deletions coding_agent_example/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
Coding Agent Example
====================

This example builds a fresh coding CLI agent from scratch using LangChain and Ollama (no reuse of `python_example/`).

Prerequisites
-------------
- Python 3.11+
- Ollama running locally (`ollama serve`) and at least one chat model installed (e.g., `ollama pull gemma3:4b`)
- `uv` (recommended) or `pip`

Setup
-----
From the repo root:

```bash
uv run python coding_agent_example/run.py --help
```

If you prefer pip:

```bash
cd python_example
python -m venv .venv && source .venv/bin/activate
pip install -r requirements.txt
python ../coding_agent_example/run.py --help
```

Usage
-----
Simplest way (with sensible defaults):
```bash
./coding_agent_example/start.sh
```

Advanced:
```bash
# Interactive REPL with explicit model/base URL
uv run -p 3.11 --with coding_agent_example/requirements.txt python coding_agent_example/run.py --repl --model gemma3:latest --base-url http://localhost:11434

# Single-shot codegen and write to file
uv run -p 3.11 --with coding_agent_example/requirements.txt python coding_agent_example/run.py --model gemma3:latest --language python --output scratch/example.py
```

Commands inside the chat
------------------------
Use natural language; the script will stream generated code as it is produced.

Notes
-----
- The script auto-detects an available Ollama model close to your requested name and falls back gracefully.

Next: Multi-agent with LangGraph
--------------------------------
In the next phase we will implement a multi-agent architecture coordinated by LangChain’s LangGraph framework. The system will follow the “Open SWE” pattern of specialized sub-agents (graphs):

- Manager: orchestrates overall workflow and user interaction
- Planner: “researches” the codebase and requirements to draft a detailed plan (pauses for human approval)
- Programmer: executes the approved plan across the repository (creates/edits files)
- Reviewer: validates output by running tests/linters and verifies the changes resolve the issue before completion

The loop will be human-in-the-loop at the planning stage to ensure control and safety.

38 changes: 38 additions & 0 deletions coding_agent_example/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
from __future__ import annotations

from dataclasses import dataclass
from typing import List

from langchain.agents import AgentExecutor, create_react_agent
from langchain_ollama import ChatOllama
from langchain_core.prompts import ChatPromptTemplate


def build_model(model_name: str, base_url: str) -> ChatOllama:
return ChatOllama(
model=model_name,
base_url=base_url,
temperature=0.2,
)


def build_prompt() -> ChatPromptTemplate:
system = (
"You are a careful coding assistant. You have tools to read/write files, list directories, and search text. "
"Use a ReAct style: think about the problem, choose a tool when needed, observe results, and iterate. "
"For writes, show a short summary of what will be written. Keep outputs concise."
)
# ReAct agent expects these variables in the prompt: tools, tool_names, input, agent_scratchpad
return ChatPromptTemplate.from_messages([
("system", system + "\n\nAvailable tools:\n{tools}\n\nTool names: {tool_names}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
])


def build_agent(model: ChatOllama, tools: List) -> AgentExecutor:
prompt = build_prompt()
agent = create_react_agent(model, tools, prompt)
return AgentExecutor(agent=agent, tools=tools, verbose=True)


5 changes: 5 additions & 0 deletions coding_agent_example/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
langchain>=0.2.10
langchain-community>=0.2.10
langchain-ollama>=0.2.0
rich>=13.7.1

153 changes: 153 additions & 0 deletions coding_agent_example/run.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
#!/usr/bin/env python3
"""Simple code generator CLI using LangChain + Ollama (no agent)."""

import argparse
import requests
from typing import Optional

from langchain_ollama import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
import logging


logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
logger = logging.getLogger(__name__)


def _detect_model(base_url: str, desired: Optional[str]) -> str:
try:
r = requests.get(f"{base_url}/api/tags", timeout=3)
models = [m.get("name", "") for m in r.json().get("models", [])]
except Exception:
models = []
if desired:
base = desired.split(":")[0]
for m in models:
if m.startswith(base):
return m
return desired
prefs = ["gemma", "mistral", "phi", "llama", "qwen", "tinyllama"]
for pref in prefs:
for m in models:
if m.startswith(pref):
return m
return "gemma2:9b"


def main() -> int:
parser = argparse.ArgumentParser(description="Generate code with Ollama via LangChain")
parser.add_argument("--model", default=None, help="Ollama model (e.g., gemma2:9b, mistral)")
parser.add_argument("--base-url", default="http://localhost:11434", help="Ollama base URL")
parser.add_argument("--language", default=None, help="Preferred language hint (e.g., python, typescript)")
parser.add_argument("--output", default=None, help="Optional path to write the generated code")
parser.add_argument("--repl", action="store_true", help="Interactive loop (prints code)")
args = parser.parse_args()

# Detect model and ping Ollama
model_name = _detect_model(args.base_url, args.model)
logger.info(f"Selected model: {model_name}")
try:
logger.info(f"Pinging Ollama at {args.base_url}...")
requests.get(f"{args.base_url}/api/tags", timeout=5).raise_for_status()
except Exception as e:
logger.error(f"Ollama not reachable at {args.base_url}: {e}")
return 1

# Build simple codegen chain
system = (
"You are a senior software engineer. Generate only code for the user's request. "
"Do not include explanations or commentary. If multiple files are needed, start with the most critical file."
)
if args.language:
system += f" Prefer {args.language} when reasonable."

prompt = ChatPromptTemplate.from_messages([
("system", system),
("human", "{task}"),
])
logger.info("Initializing model...")
model = ChatOllama(
model=model_name,
base_url=args.base_url,
temperature=0.2,
num_predict=512,
streaming=True,
)
chain = prompt | model | StrOutputParser()

if args.repl:
print("Coding Agent REPL. Type 'exit' to quit.")
logger.info("Starting REPL. Type 'exit' to quit.")
while True:
try:
user = input("\nDescribe the code to generate (or 'exit'):\n> ").strip()
except (EOFError, KeyboardInterrupt):
logger.info("Exiting REPL.")
break
if not user or user.lower() in {"exit", "quit"}:
logger.info("Goodbye.")
break
logger.info("Generating code (streaming)...")
try:
total = 0
for chunk in chain.stream({"task": user}):
total += len(chunk)
print(chunk, end="", flush=True)
print()
logger.info(f"Stream complete. {total} characters.")
if total == 0:
logger.warning("Empty response from model.")
except Exception as e:
logger.error(f"Generation failed: {e}")
return 0

# Single-shot mode: read from stdin or prompt once
try:
task = input("Enter a coding task (or run with --repl):\n> ")
except (EOFError, KeyboardInterrupt):
return 0
if not task.strip():
return 0
logger.info("Generating code (streaming single-shot)...")
if args.output:
# Accumulate chunks to write to file
try:
buf = []
total = 0
for chunk in chain.stream({"task": task}):
buf.append(chunk)
total += len(chunk)
code = "".join(buf)
logger.info(f"Stream complete. {total} characters.")
except Exception as e:
logger.error(f"Generation failed: {e}")
return 1
try:
from pathlib import Path
p = Path(args.output)
p.parent.mkdir(parents=True, exist_ok=True)
p.write_text(code, encoding="utf-8")
logger.info(f"Wrote code to {args.output}")
except Exception as e:
logger.error(f"Failed to write code to {args.output}: {e}")
logger.info("Printing generated code to stdout.")
print(code)
else:
try:
total = 0
for chunk in chain.stream({"task": task}):
total += len(chunk)
print(chunk, end="", flush=True)
print()
logger.info(f"Stream complete. {total} characters.")
except Exception as e:
logger.error(f"Generation failed: {e}")
return 1
return 0


if __name__ == "__main__":
raise SystemExit(main())


13 changes: 13 additions & 0 deletions coding_agent_example/start.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -euo pipefail

# Simple wrapper to run the coding agent with uv and default model

MODEL=${MODEL:-gemma3:latest}
BASE_URL=${BASE_URL:-http://localhost:11434}

echo "Starting coding agent (model=$MODEL, base_url=$BASE_URL)..."
uv run -p 3.11 --with coding_agent_example/requirements.txt \
python coding_agent_example/run.py --repl --model "$MODEL" --base-url "$BASE_URL"


67 changes: 67 additions & 0 deletions coding_agent_example/tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
from __future__ import annotations

import re
from pathlib import Path
from typing import List

from langchain.agents import tool


@tool
def read_file(file_path: str) -> str:
"""Read a UTF-8 text file at file_path and return its contents."""
try:
p = Path(file_path)
return p.read_text(encoding="utf-8")
except Exception as e:
return f"Error reading {file_path}: {e}"


@tool
def write_file(file_path: str, content: str) -> str:
"""Write UTF-8 content to file_path, creating parent directories if needed."""
try:
p = Path(file_path)
p.parent.mkdir(parents=True, exist_ok=True)
p.write_text(content, encoding="utf-8")
return f"Wrote {len(content)} bytes to {file_path}"
except Exception as e:
return f"Error writing {file_path}: {e}"


@tool
def list_dir(directory: str) -> str:
"""List files and directories at the given path."""
try:
p = Path(directory)
if not p.exists():
return f"Directory not found: {directory}"
entries = []
for child in sorted(p.iterdir()):
kind = "dir" if child.is_dir() else "file"
entries.append(f"{kind}\t{child.name}")
return "\n".join(entries)
except Exception as e:
return f"Error listing {directory}: {e}"


@tool
def search_text(file_path: str, pattern: str, ignore_case: bool = True) -> str:
"""Search for a regex pattern in a text file and return matching lines with numbers."""
try:
flags = re.IGNORECASE if ignore_case else 0
p = Path(file_path)
lines = p.read_text(encoding="utf-8").splitlines()
result: List[str] = []
for i, line in enumerate(lines, start=1):
if re.search(pattern, line, flags):
result.append(f"{i}: {line}")
return "\n".join(result) if result else "(no matches)"
except Exception as e:
return f"Error searching {file_path}: {e}"


def all_tools() -> List:
return [read_file, write_file, list_dir, search_text]