diff --git a/.agentready-demo/demo-assessment-20251121-051257.json b/.agentready-demo/demo-assessment-20251121-051257.json
new file mode 100644
index 0000000..8fa2ec9
--- /dev/null
+++ b/.agentready-demo/demo-assessment-20251121-051257.json
@@ -0,0 +1,620 @@
+{
+ "repository": {
+ "path": "/var/folders/43/ffv83_l50cn2k0rzgxp63kyw0000gn/T/agentready-demo-zp8md4nd/demo-repo",
+ "name": "demo-repo",
+ "url": null,
+ "branch": "main",
+ "commit_hash": "8798997839b602cbb3f02073ca66ed95abc6cdda",
+ "languages": {
+ "Python": 4
+ },
+ "total_files": 7,
+ "total_lines": 100
+ },
+ "timestamp": "2025-11-21T05:12:57.860663",
+ "overall_score": 73.1,
+ "certification_level": "Silver",
+ "attributes_assessed": 10,
+ "attributes_skipped": 15,
+ "attributes_total": 25,
+ "findings": [
+ {
+ "attribute": {
+ "id": "claude_md_file",
+ "name": "CLAUDE.md Configuration Files",
+ "category": "Context Window Optimization",
+ "tier": 1,
+ "description": "Project-specific configuration for Claude Code",
+ "criteria": "CLAUDE.md file exists in repository root",
+ "default_weight": 0.1
+ },
+ "status": "pass",
+ "score": 100.0,
+ "measured_value": "present",
+ "threshold": "present",
+ "evidence": [
+ "CLAUDE.md found at /var/folders/43/ffv83_l50cn2k0rzgxp63kyw0000gn/T/agentready-demo-zp8md4nd/demo-repo/CLAUDE.md"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "readme_structure",
+ "name": "README Structure",
+ "category": "Documentation Standards",
+ "tier": 1,
+ "description": "Well-structured README with key sections",
+ "criteria": "README.md with installation, usage, and development sections",
+ "default_weight": 0.1
+ },
+ "status": "pass",
+ "score": 100.0,
+ "measured_value": "3/3 sections",
+ "threshold": "3/3 sections",
+ "evidence": [
+ "Found 3/3 essential sections",
+ "Installation: \u2713",
+ "Usage: \u2713",
+ "Development: \u2713"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "type_annotations",
+ "name": "Type Annotations",
+ "category": "Code Quality",
+ "tier": 1,
+ "description": "Type hints in function signatures",
+ "criteria": ">80% of functions have type annotations",
+ "default_weight": 0.1
+ },
+ "status": "pass",
+ "score": 100.0,
+ "measured_value": "100.0%",
+ "threshold": "\u226580%",
+ "evidence": [
+ "Typed functions: 5/5",
+ "Coverage: 100.0%"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "standard_layout",
+ "name": "Standard Project Layouts",
+ "category": "Repository Structure",
+ "tier": 1,
+ "description": "Follows standard project structure for language",
+ "criteria": "Standard directories (src/, tests/, docs/) present",
+ "default_weight": 0.1
+ },
+ "status": "pass",
+ "score": 100.0,
+ "measured_value": "2/2 directories",
+ "threshold": "2/2 directories",
+ "evidence": [
+ "Found 2/2 standard directories",
+ "src/: \u2713",
+ "tests/: \u2713"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "lock_files",
+ "name": "Lock Files for Reproducibility",
+ "category": "Dependency Management",
+ "tier": 1,
+ "description": "Lock files present for dependency pinning",
+ "criteria": "package-lock.json, yarn.lock, poetry.lock, or requirements.txt with versions",
+ "default_weight": 0.1
+ },
+ "status": "fail",
+ "score": 0.0,
+ "measured_value": "none",
+ "threshold": "at least one lock file",
+ "evidence": [
+ "No lock files found"
+ ],
+ "remediation": {
+ "summary": "Add lock file for dependency reproducibility",
+ "steps": [
+ "Use npm install, poetry lock, or equivalent to generate lock file"
+ ],
+ "tools": [],
+ "commands": [
+ "npm install # generates package-lock.json"
+ ],
+ "examples": [],
+ "citations": []
+ },
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "test_coverage",
+ "name": "Test Coverage Requirements",
+ "category": "Testing & CI/CD",
+ "tier": 2,
+ "description": "Test coverage thresholds configured and enforced",
+ "criteria": ">80% code coverage",
+ "default_weight": 0.03
+ },
+ "status": "fail",
+ "score": 50.0,
+ "measured_value": "not configured",
+ "threshold": "configured with >80% threshold",
+ "evidence": [
+ "Coverage config: \u2713",
+ "pytest-cov: \u2717"
+ ],
+ "remediation": {
+ "summary": "Configure test coverage with \u226580% threshold",
+ "steps": [
+ "Install coverage tool (pytest-cov for Python, jest for JavaScript)",
+ "Configure coverage threshold in project config",
+ "Add coverage reporting to CI/CD pipeline",
+ "Run coverage locally before committing"
+ ],
+ "tools": [
+ "pytest-cov",
+ "jest",
+ "vitest",
+ "coverage"
+ ],
+ "commands": [
+ "# Python",
+ "pip install pytest-cov",
+ "pytest --cov=src --cov-report=term-missing --cov-fail-under=80",
+ "",
+ "# JavaScript",
+ "npm install --save-dev jest",
+ "npm test -- --coverage --coverageThreshold='{\\'global\\': {\\'lines\\': 80}}'"
+ ],
+ "examples": [
+ "# Python - pyproject.toml\n[tool.pytest.ini_options]\naddopts = \"--cov=src --cov-report=term-missing\"\n\n[tool.coverage.report]\nfail_under = 80\n",
+ "// JavaScript - package.json\n{\n \"jest\": {\n \"coverageThreshold\": {\n \"global\": {\n \"lines\": 80,\n \"statements\": 80,\n \"functions\": 80,\n \"branches\": 80\n }\n }\n }\n}\n"
+ ],
+ "citations": [
+ {
+ "source": "pytest-cov",
+ "title": "Coverage Configuration",
+ "url": "https://pytest-cov.readthedocs.io/",
+ "relevance": "pytest-cov configuration guide"
+ }
+ ]
+ },
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "precommit_hooks",
+ "name": "Pre-commit Hooks & CI/CD Linting",
+ "category": "Testing & CI/CD",
+ "tier": 2,
+ "description": "Pre-commit hooks configured for linting and formatting",
+ "criteria": ".pre-commit-config.yaml exists",
+ "default_weight": 0.03
+ },
+ "status": "fail",
+ "score": 0.0,
+ "measured_value": "not configured",
+ "threshold": "configured",
+ "evidence": [
+ ".pre-commit-config.yaml not found"
+ ],
+ "remediation": {
+ "summary": "Configure pre-commit hooks for automated code quality checks",
+ "steps": [
+ "Install pre-commit framework",
+ "Create .pre-commit-config.yaml",
+ "Add hooks for linting and formatting",
+ "Install hooks: pre-commit install",
+ "Run on all files: pre-commit run --all-files"
+ ],
+ "tools": [
+ "pre-commit"
+ ],
+ "commands": [
+ "pip install pre-commit",
+ "pre-commit install",
+ "pre-commit run --all-files"
+ ],
+ "examples": [
+ "# .pre-commit-config.yaml\nrepos:\n - repo: https://github.com/pre-commit/pre-commit-hooks\n rev: v4.4.0\n hooks:\n - id: trailing-whitespace\n - id: end-of-file-fixer\n - id: check-yaml\n - id: check-added-large-files\n\n - repo: https://github.com/psf/black\n rev: 23.3.0\n hooks:\n - id: black\n\n - repo: https://github.com/pycqa/isort\n rev: 5.12.0\n hooks:\n - id: isort\n"
+ ],
+ "citations": [
+ {
+ "source": "pre-commit.com",
+ "title": "Pre-commit Framework",
+ "url": "https://pre-commit.com/",
+ "relevance": "Official pre-commit documentation"
+ }
+ ]
+ },
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "conventional_commits",
+ "name": "Conventional Commit Messages",
+ "category": "Git & Version Control",
+ "tier": 2,
+ "description": "Follows conventional commit format",
+ "criteria": "\u226580% of recent commits follow convention",
+ "default_weight": 0.03
+ },
+ "status": "fail",
+ "score": 0.0,
+ "measured_value": "not configured",
+ "threshold": "configured",
+ "evidence": [
+ "No commitlint or husky configuration"
+ ],
+ "remediation": {
+ "summary": "Configure conventional commits with commitlint",
+ "steps": [
+ "Install commitlint",
+ "Configure husky for commit-msg hook"
+ ],
+ "tools": [
+ "commitlint",
+ "husky"
+ ],
+ "commands": [
+ "npm install --save-dev @commitlint/cli @commitlint/config-conventional husky"
+ ],
+ "examples": [],
+ "citations": []
+ },
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "gitignore_completeness",
+ "name": ".gitignore Completeness",
+ "category": "Git & Version Control",
+ "tier": 2,
+ "description": "Comprehensive .gitignore file",
+ "criteria": ".gitignore exists and covers common patterns",
+ "default_weight": 0.03
+ },
+ "status": "pass",
+ "score": 100.0,
+ "measured_value": "325 bytes",
+ "threshold": ">50 bytes",
+ "evidence": [
+ ".gitignore found (325 bytes)"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "cyclomatic_complexity",
+ "name": "Cyclomatic Complexity Thresholds",
+ "category": "Code Quality",
+ "tier": 3,
+ "description": "Cyclomatic complexity thresholds enforced",
+ "criteria": "Average complexity <10, no functions >15",
+ "default_weight": 0.03
+ },
+ "status": "pass",
+ "score": 100.0,
+ "measured_value": "2.0",
+ "threshold": "<10.0",
+ "evidence": [
+ "Average cyclomatic complexity: 2.0"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "one_command_setup",
+ "name": "One-Command Build/Setup",
+ "category": "Build & Development",
+ "tier": 2,
+ "description": "Assessment for One-Command Build/Setup",
+ "criteria": "To be implemented",
+ "default_weight": 0.03
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "One-Command Build/Setup assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "concise_documentation",
+ "name": "Concise Structured Documentation",
+ "category": "Context Window Optimization",
+ "tier": 2,
+ "description": "Assessment for Concise Structured Documentation",
+ "criteria": "To be implemented",
+ "default_weight": 0.03
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Concise Structured Documentation assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "inline_documentation",
+ "name": "Inline Documentation",
+ "category": "Documentation Standards",
+ "tier": 2,
+ "description": "Assessment for Inline Documentation",
+ "criteria": "To be implemented",
+ "default_weight": 0.03
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Inline Documentation assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "file_size_limits",
+ "name": "File Size Limits",
+ "category": "Context Window Optimization",
+ "tier": 2,
+ "description": "Assessment for File Size Limits",
+ "criteria": "To be implemented",
+ "default_weight": 0.03
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "File Size Limits assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "dependency_freshness",
+ "name": "Dependency Freshness & Security",
+ "category": "Dependency Management",
+ "tier": 2,
+ "description": "Assessment for Dependency Freshness & Security",
+ "criteria": "To be implemented",
+ "default_weight": 0.03
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Dependency Freshness & Security assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "separation_concerns",
+ "name": "Separation of Concerns",
+ "category": "Repository Structure",
+ "tier": 2,
+ "description": "Assessment for Separation of Concerns",
+ "criteria": "To be implemented",
+ "default_weight": 0.03
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Separation of Concerns assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "structured_logging",
+ "name": "Structured Logging",
+ "category": "Error Handling",
+ "tier": 3,
+ "description": "Assessment for Structured Logging",
+ "criteria": "To be implemented",
+ "default_weight": 0.03
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Structured Logging assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "openapi_specs",
+ "name": "OpenAPI/Swagger Specifications",
+ "category": "API Documentation",
+ "tier": 3,
+ "description": "Assessment for OpenAPI/Swagger Specifications",
+ "criteria": "To be implemented",
+ "default_weight": 0.03
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "OpenAPI/Swagger Specifications assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "architecture_decisions",
+ "name": "Architecture Decision Records",
+ "category": "Documentation Standards",
+ "tier": 3,
+ "description": "Assessment for Architecture Decision Records",
+ "criteria": "To be implemented",
+ "default_weight": 0.03
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Architecture Decision Records assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "semantic_naming",
+ "name": "Semantic File & Directory Naming",
+ "category": "Modularity",
+ "tier": 3,
+ "description": "Assessment for Semantic File & Directory Naming",
+ "criteria": "To be implemented",
+ "default_weight": 0.03
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Semantic File & Directory Naming assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "security_scanning",
+ "name": "Security Scanning Automation",
+ "category": "Security",
+ "tier": 4,
+ "description": "Assessment for Security Scanning Automation",
+ "criteria": "To be implemented",
+ "default_weight": 0.01
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Security Scanning Automation assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "performance_benchmarks",
+ "name": "Performance Benchmarks",
+ "category": "Performance",
+ "tier": 4,
+ "description": "Assessment for Performance Benchmarks",
+ "criteria": "To be implemented",
+ "default_weight": 0.01
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Performance Benchmarks assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "code_smells",
+ "name": "Code Smell Elimination",
+ "category": "Code Quality",
+ "tier": 4,
+ "description": "Assessment for Code Smell Elimination",
+ "criteria": "To be implemented",
+ "default_weight": 0.01
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Code Smell Elimination assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "issue_pr_templates",
+ "name": "Issue & Pull Request Templates",
+ "category": "Git & Version Control",
+ "tier": 4,
+ "description": "Assessment for Issue & Pull Request Templates",
+ "criteria": "To be implemented",
+ "default_weight": 0.01
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Issue & Pull Request Templates assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ },
+ {
+ "attribute": {
+ "id": "container_setup",
+ "name": "Container/Virtualization Setup",
+ "category": "Build & Development",
+ "tier": 4,
+ "description": "Assessment for Container/Virtualization Setup",
+ "criteria": "To be implemented",
+ "default_weight": 0.01
+ },
+ "status": "not_applicable",
+ "score": null,
+ "measured_value": null,
+ "threshold": null,
+ "evidence": [
+ "Container/Virtualization Setup assessment not yet implemented"
+ ],
+ "remediation": null,
+ "error_message": null
+ }
+ ],
+ "config": null,
+ "duration_seconds": 1.8
+}
\ No newline at end of file
diff --git a/.agentready-demo/demo-report-20251121-051257.html b/.agentready-demo/demo-report-20251121-051257.html
new file mode 100644
index 0000000..bb4abc6
--- /dev/null
+++ b/.agentready-demo/demo-report-20251121-051257.html
@@ -0,0 +1,1890 @@
+
+
+
+
+
+ AgentReady Assessment - demo-repo
+
+
+
+
+
+
+
+
+
+
+
Certification
+
Silver
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - CLAUDE.md found at /var/folders/43/ffv83_l50cn2k0rzgxp63kyw0000gn/T/agentready-demo-zp8md4nd/demo-repo/CLAUDE.md
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Found 3/3 essential sections
+
+ - Installation: ✓
+
+ - Usage: ✓
+
+ - Development: ✓
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Typed functions: 5/5
+
+ - Coverage: 100.0%
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Found 2/2 standard directories
+
+ - src/: ✓
+
+ - tests/: ✓
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - No lock files found
+
+
+
+
+
+
+
+
Remediation
+
Add lock file for dependency reproducibility
+
+
+
+
+ - Use npm install, poetry lock, or equivalent to generate lock file
+
+
+
+
+
+
Commands
+
npm install # generates package-lock.json
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Coverage config: ✓
+
+ - pytest-cov: ✗
+
+
+
+
+
+
+
+
Remediation
+
Configure test coverage with ≥80% threshold
+
+
+
+
+ - Install coverage tool (pytest-cov for Python, jest for JavaScript)
+
+ - Configure coverage threshold in project config
+
+ - Add coverage reporting to CI/CD pipeline
+
+ - Run coverage locally before committing
+
+
+
+
+
+
Commands
+
# Python
+pip install pytest-cov
+pytest --cov=src --cov-report=term-missing --cov-fail-under=80
+
+# JavaScript
+npm install --save-dev jest
+npm test -- --coverage --coverageThreshold='{\'global\': {\'lines\': 80}}'
+
+
+
+
Examples
+
+
# Python - pyproject.toml
+[tool.pytest.ini_options]
+addopts = "--cov=src --cov-report=term-missing"
+
+[tool.coverage.report]
+fail_under = 80
+
+
+
// JavaScript - package.json
+{
+ "jest": {
+ "coverageThreshold": {
+ "global": {
+ "lines": 80,
+ "statements": 80,
+ "functions": 80,
+ "branches": 80
+ }
+ }
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - .pre-commit-config.yaml not found
+
+
+
+
+
+
+
+
Remediation
+
Configure pre-commit hooks for automated code quality checks
+
+
+
+
+ - Install pre-commit framework
+
+ - Create .pre-commit-config.yaml
+
+ - Add hooks for linting and formatting
+
+ - Install hooks: pre-commit install
+
+ - Run on all files: pre-commit run --all-files
+
+
+
+
+
+
Commands
+
pip install pre-commit
+pre-commit install
+pre-commit run --all-files
+
+
+
+
Examples
+
+
# .pre-commit-config.yaml
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.4.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-yaml
+ - id: check-added-large-files
+
+ - repo: https://github.com/psf/black
+ rev: 23.3.0
+ hooks:
+ - id: black
+
+ - repo: https://github.com/pycqa/isort
+ rev: 5.12.0
+ hooks:
+ - id: isort
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - No commitlint or husky configuration
+
+
+
+
+
+
+
+
Remediation
+
Configure conventional commits with commitlint
+
+
+
+
+ - Install commitlint
+
+ - Configure husky for commit-msg hook
+
+
+
+
+
+
Commands
+
npm install --save-dev @commitlint/cli @commitlint/config-conventional husky
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - .gitignore found (325 bytes)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Average cyclomatic complexity: 2.0
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - One-Command Build/Setup assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Concise Structured Documentation assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Inline Documentation assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - File Size Limits assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Dependency Freshness & Security assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Separation of Concerns assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Structured Logging assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - OpenAPI/Swagger Specifications assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Architecture Decision Records assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Semantic File & Directory Naming assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Security Scanning Automation assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Performance Benchmarks assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Code Smell Elimination assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Issue & Pull Request Templates assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Evidence
+
+
+ - Container/Virtualization Setup assessment not yet implemented
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.agentready-demo/demo-report-20251121-051257.md b/.agentready-demo/demo-report-20251121-051257.md
new file mode 100644
index 0000000..0f10f4b
--- /dev/null
+++ b/.agentready-demo/demo-report-20251121-051257.md
@@ -0,0 +1,308 @@
+# 🤖 AgentReady Assessment Report
+
+| Repository | Branch | Commit | Score | Level | Date |
+|------------|--------|--------|-------|-------|------|
+| **demo-repo** | main | `87989978` | **73.1/100** | **Silver** | 2025-11-21 05:12 |
+
+---
+
+## 📊 Summary
+
+| Metric | Value |
+|--------|-------|
+| **Overall Score** | **73.1/100** |
+| **Certification Level** | **Silver** |
+| **Attributes Assessed** | 10/25 |
+| **Attributes Skipped** | 15 |
+| **Assessment Duration** | 1.8s |
+
+### Languages Detected
+
+- **Python**: 4 files
+
+### Repository Stats
+
+- **Total Files**: 7
+- **Total Lines**: 100
+
+## 🎖️ Certification Ladder
+
+- 💎 **Platinum** (90-100)
+- 🥇 **Gold** (75-89)
+- 🥈 **Silver** (60-74) **→ YOUR LEVEL ←**
+- 🥉 **Bronze** (40-59)
+- ⚠️ **Needs Improvement** (0-39)
+
+## 📋 Detailed Findings
+
+### API Documentation
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| OpenAPI/Swagger Specifications | T3 | ⊘ not_applicable | — |
+
+### Build & Development
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| One-Command Build/Setup | T2 | ⊘ not_applicable | — |
+| Container/Virtualization Setup | T4 | ⊘ not_applicable | — |
+
+### Code Quality
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| Type Annotations | T1 | ✅ pass | 100 |
+| Cyclomatic Complexity Thresholds | T3 | ✅ pass | 100 |
+| Code Smell Elimination | T4 | ⊘ not_applicable | — |
+
+### Context Window Optimization
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| CLAUDE.md Configuration Files | T1 | ✅ pass | 100 |
+| Concise Structured Documentation | T2 | ⊘ not_applicable | — |
+| File Size Limits | T2 | ⊘ not_applicable | — |
+
+### Dependency Management
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| Lock Files for Reproducibility | T1 | ❌ fail | 0 |
+| Dependency Freshness & Security | T2 | ⊘ not_applicable | — |
+
+#### ❌ Lock Files for Reproducibility
+
+**Measured**: none (Threshold: at least one lock file)
+
+**Evidence**:
+- No lock files found
+
+📝 Remediation Steps
+
+
+Add lock file for dependency reproducibility
+
+1. Use npm install, poetry lock, or equivalent to generate lock file
+
+**Commands**:
+
+```bash
+npm install # generates package-lock.json
+```
+
+
+
+### Documentation Standards
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| README Structure | T1 | ✅ pass | 100 |
+| Inline Documentation | T2 | ⊘ not_applicable | — |
+| Architecture Decision Records | T3 | ⊘ not_applicable | — |
+
+### Error Handling
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| Structured Logging | T3 | ⊘ not_applicable | — |
+
+### Git & Version Control
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| Conventional Commit Messages | T2 | ❌ fail | 0 |
+| .gitignore Completeness | T2 | ✅ pass | 100 |
+| Issue & Pull Request Templates | T4 | ⊘ not_applicable | — |
+
+#### ❌ Conventional Commit Messages
+
+**Measured**: not configured (Threshold: configured)
+
+**Evidence**:
+- No commitlint or husky configuration
+
+📝 Remediation Steps
+
+
+Configure conventional commits with commitlint
+
+1. Install commitlint
+2. Configure husky for commit-msg hook
+
+**Commands**:
+
+```bash
+npm install --save-dev @commitlint/cli @commitlint/config-conventional husky
+```
+
+
+
+### Modularity
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| Semantic File & Directory Naming | T3 | ⊘ not_applicable | — |
+
+### Performance
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| Performance Benchmarks | T4 | ⊘ not_applicable | — |
+
+### Repository Structure
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| Standard Project Layouts | T1 | ✅ pass | 100 |
+| Separation of Concerns | T2 | ⊘ not_applicable | — |
+
+### Security
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| Security Scanning Automation | T4 | ⊘ not_applicable | — |
+
+### Testing & CI/CD
+
+| Attribute | Tier | Status | Score |
+|-----------|------|--------|-------|
+| Test Coverage Requirements | T2 | ❌ fail | 50 |
+| Pre-commit Hooks & CI/CD Linting | T2 | ❌ fail | 0 |
+
+#### ❌ Test Coverage Requirements
+
+**Measured**: not configured (Threshold: configured with >80% threshold)
+
+**Evidence**:
+- Coverage config: ✓
+- pytest-cov: ✗
+
+📝 Remediation Steps
+
+
+Configure test coverage with ≥80% threshold
+
+1. Install coverage tool (pytest-cov for Python, jest for JavaScript)
+2. Configure coverage threshold in project config
+3. Add coverage reporting to CI/CD pipeline
+4. Run coverage locally before committing
+
+**Commands**:
+
+```bash
+# Python
+pip install pytest-cov
+pytest --cov=src --cov-report=term-missing --cov-fail-under=80
+
+# JavaScript
+npm install --save-dev jest
+npm test -- --coverage --coverageThreshold='{\'global\': {\'lines\': 80}}'
+```
+
+**Examples**:
+
+```
+# Python - pyproject.toml
+[tool.pytest.ini_options]
+addopts = "--cov=src --cov-report=term-missing"
+
+[tool.coverage.report]
+fail_under = 80
+
+```
+```
+// JavaScript - package.json
+{
+ "jest": {
+ "coverageThreshold": {
+ "global": {
+ "lines": 80,
+ "statements": 80,
+ "functions": 80,
+ "branches": 80
+ }
+ }
+ }
+}
+
+```
+
+
+
+#### ❌ Pre-commit Hooks & CI/CD Linting
+
+**Measured**: not configured (Threshold: configured)
+
+**Evidence**:
+- .pre-commit-config.yaml not found
+
+📝 Remediation Steps
+
+
+Configure pre-commit hooks for automated code quality checks
+
+1. Install pre-commit framework
+2. Create .pre-commit-config.yaml
+3. Add hooks for linting and formatting
+4. Install hooks: pre-commit install
+5. Run on all files: pre-commit run --all-files
+
+**Commands**:
+
+```bash
+pip install pre-commit
+pre-commit install
+pre-commit run --all-files
+```
+
+**Examples**:
+
+```
+# .pre-commit-config.yaml
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.4.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-yaml
+ - id: check-added-large-files
+
+ - repo: https://github.com/psf/black
+ rev: 23.3.0
+ hooks:
+ - id: black
+
+ - repo: https://github.com/pycqa/isort
+ rev: 5.12.0
+ hooks:
+ - id: isort
+
+```
+
+
+
+## 🎯 Next Steps
+
+**Priority Improvements** (highest impact first):
+
+1. **Lock Files for Reproducibility** (Tier 1) - +10.0 points potential
+ - Add lock file for dependency reproducibility
+2. **Pre-commit Hooks & CI/CD Linting** (Tier 2) - +3.0 points potential
+ - Configure pre-commit hooks for automated code quality checks
+3. **Conventional Commit Messages** (Tier 2) - +3.0 points potential
+ - Configure conventional commits with commitlint
+4. **Test Coverage Requirements** (Tier 2) - +3.0 points potential
+ - Configure test coverage with ≥80% threshold
+
+---
+
+## 📝 Assessment Metadata
+
+- **Tool Version**: AgentReady v1.0.0
+- **Research Report**: Bundled version
+- **Repository Snapshot**: 8798997839b602cbb3f02073ca66ed95abc6cdda
+- **Assessment Duration**: 1.8s
+
+🤖 Generated with [Claude Code](https://claude.com/claude-code)
\ No newline at end of file
diff --git a/.github/CLAUDE_INTEGRATION.md b/.github/CLAUDE_INTEGRATION.md
new file mode 100644
index 0000000..091ebbd
--- /dev/null
+++ b/.github/CLAUDE_INTEGRATION.md
@@ -0,0 +1,276 @@
+# Dual Claude Integration Guide
+
+This repository uses **two different Claude integrations** that work together for different use cases.
+
+## 🤖 Integration Overview
+
+| Integration | Trigger | Behavior | Use Case |
+|------------|---------|----------|----------|
+| **Claude Code Action** | Assign issue to `claude-bot` | Automated implementation in GitHub Actions | Autonomous feature development |
+| **Direct @claude** | @mention claude in comments | Interactive conversation in comment thread | Discussion, guidance, code review |
+
+---
+
+## 1. Automated Implementation (Claude Code Action)
+
+### How It Works
+
+When you **assign an issue to `claude-bot`**:
+1. GitHub Actions workflow triggers
+2. Claude Code CLI spins up in runner
+3. Reads `CLAUDE.md` for project context
+4. Implements the issue autonomously
+5. Creates feature branch and commits
+6. Opens PR for review
+
+### Configuration
+
+**Workflow**: `.github/workflows/claude-code-action.yml`
+- Triggers only on `issues.assigned` events
+- Checks `if: github.event.assignee.login == 'claude-bot'`
+- Uses `ANTHROPIC_API_KEY` secret
+
+**System Prompts**:
+- `CLAUDE.md` (project-level) - Automatically read
+- `.github/claude-bot-prompt.md` (automation-specific) - Optional, currently commented out
+
+### Usage
+
+```bash
+# Create issue on GitHub
+# Assign to @claude-bot user
+# Wait for PR to be created
+```
+
+### Example Workflow
+
+1. Create issue: "Implement dependency freshness assessor"
+2. Assign to `claude-bot`
+3. Wait 2-5 minutes
+4. Review PR created by claude-bot
+5. Merge or request changes
+
+---
+
+## 2. Interactive Conversation (Direct @claude)
+
+### How It Works
+
+When you **@mention claude in any comment**:
+1. Claude responds directly in the comment thread
+2. No code is written automatically
+3. Interactive back-and-forth discussion
+4. You control when to apply suggestions
+
+### Setup Required
+
+**Install the Claude GitHub App**:
+1. Navigate to https://github.com/apps/claude-ai
+2. Click "Install"
+3. Select `ambient-code/agentready` repository
+4. Grant required permissions:
+ - Read access to code and issues
+ - Write access to comments
+5. Complete installation
+
+**Connect Your Account**:
+1. Go to https://claude.ai/settings
+2. Navigate to "Integrations"
+3. Connect your GitHub account
+4. Authorize the app
+
+### Usage
+
+```markdown
+@claude How should I structure the DependencyFreshnessAssessor?
+```
+
+Claude will respond with:
+- Architecture suggestions
+- Code examples
+- Best practices
+- Questions for clarification
+
+You then choose which suggestions to implement manually (or ask claude-bot to implement via assignment).
+
+---
+
+## 3. When to Use Each Integration
+
+### Use Claude Code Action (assign to claude-bot) when:
+- ✅ You want autonomous implementation
+- ✅ The task is well-defined
+- ✅ You're okay with reviewing a PR afterward
+- ✅ You want to save development time
+
+### Use Direct @claude when:
+- ✅ You need design discussion first
+- ✅ You want to explore options interactively
+- ✅ You need code review feedback
+- ✅ You want to implement manually with guidance
+- ✅ The task has ambiguity or trade-offs
+
+### Use Both Together:
+1. Create issue with @claude mention
+2. Discuss approach with interactive Claude
+3. Once design is settled, assign to claude-bot
+4. claude-bot implements the agreed-upon design
+
+---
+
+## 4. Customizing Automation Behavior
+
+### Editing CLAUDE.md
+
+`CLAUDE.md` is the **main source of truth** for project context:
+- Architecture overview
+- Development workflow
+- Code quality standards
+- Testing requirements
+
+**Changes to CLAUDE.md affect both integrations.**
+
+### Editing .github/claude-bot-prompt.md
+
+This file provides **automation-specific instructions**:
+- Feature branch naming conventions
+- PR creation templates
+- TDD requirements
+- Commit message formats
+
+**To enable**: Uncomment the `claude_args` line in `claude-code-action.yml`
+
+### Example Customization
+
+```yaml
+# In .github/workflows/claude-code-action.yml
+claude_args: --append-system-prompt "$(cat .github/claude-bot-prompt.md)"
+```
+
+---
+
+## 5. Troubleshooting
+
+### Claude Code Action Not Triggering
+
+**Check**:
+- [ ] Issue is assigned to `claude-bot` user (exact spelling)
+- [ ] GitHub Actions workflow is enabled in Settings > Actions
+- [ ] `ANTHROPIC_API_KEY` secret is set in repository secrets
+- [ ] Workflow file syntax is valid (no YAML errors)
+
+**View Logs**:
+1. Go to Actions tab
+2. Find the failed/running workflow
+3. Click to view logs
+4. Check "Claude Code Action" step for errors
+
+### Direct @claude Not Responding
+
+**Check**:
+- [ ] Claude GitHub App is installed on repository
+- [ ] Your GitHub account is connected at claude.ai
+- [ ] You used `@claude` (not `@claude-bot`)
+- [ ] Comment is on an issue or PR (not commit)
+
+**Note**: Direct @claude may take 30-60 seconds to respond initially.
+
+### Both Integrations Triggering
+
+**This shouldn't happen with current config**:
+- Claude Code Action only triggers on assignment to `claude-bot`
+- Direct @claude responds to @mentions
+- These are mutually exclusive triggers
+
+If both respond, check that:
+- Workflow file has correct `if:` condition
+- You're not both assigning AND mentioning
+
+---
+
+## 6. Security Considerations
+
+### API Keys
+- `ANTHROPIC_API_KEY` stored as GitHub secret (encrypted)
+- Never exposed in logs or PR comments
+- Automatically rotated every 90 days
+
+### Permissions
+- Claude Code Action has `write` access (needed for PRs)
+- Direct @claude has `read` + `comment` access only
+- Both run in isolated environments
+
+### Code Review
+- **Always review PRs** created by claude-bot before merging
+- Check for security issues (hardcoded secrets, injection vulnerabilities)
+- Verify tests pass and coverage maintained
+- Run local linters before merge
+
+---
+
+## 7. Cost Management
+
+### Claude Code Action
+- Uses Anthropic API (metered by tokens)
+- Typical cost: $0.10-$0.50 per issue implementation
+- Monitor usage in Anthropic Console
+
+### Direct @claude
+- Free for individual use
+- Subject to rate limits (TBD by Anthropic)
+
+### Best Practices
+- Use claude-bot for well-defined tasks only
+- Use direct @claude for exploration/discussion (cheaper)
+- Review generated code before running (avoid wasted API calls)
+
+---
+
+## 8. Examples
+
+### Example 1: Feature Discussion → Implementation
+```markdown
+# GitHub Issue #42: "Add dependency freshness assessor"
+
+@claude What's the best way to check if dependencies are up-to-date?
+
+[Claude responds with options: pip-audit, safety, custom parser]
+
+Thanks! Let's use pip-audit. Assigning to @claude-bot for implementation.
+
+[Assigns issue to claude-bot]
+[claude-bot creates PR with pip-audit integration]
+```
+
+### Example 2: Code Review
+```markdown
+# PR #43: "Implement dependency freshness assessor"
+
+@claude Can you review this implementation for security issues?
+
+[Claude provides detailed security review in comment]
+```
+
+### Example 3: Quick Implementation
+```markdown
+# GitHub Issue #44: "Fix typo in README"
+
+[Assigns to claude-bot immediately]
+[claude-bot fixes typo in 30 seconds]
+```
+
+---
+
+## 9. References
+
+- **Claude Code Action**: https://github.com/anthropics/claude-code-action
+- **Claude GitHub App**: https://github.com/apps/claude-ai
+- **CLAUDE.md Best Practices**: https://arize.com/blog/claude-md-best-practices
+- **AgentReady CLAUDE.md**: `/CLAUDE.md` (this repository)
+- **Automation Prompt**: `/.github/claude-bot-prompt.md` (this repository)
+
+---
+
+**Last Updated**: 2025-11-21
+**Maintained By**: @jeder
+**Status**: Active
diff --git a/.github/claude-bot-prompt.md b/.github/claude-bot-prompt.md
new file mode 100644
index 0000000..33dba32
--- /dev/null
+++ b/.github/claude-bot-prompt.md
@@ -0,0 +1,40 @@
+# Claude Bot Automation Context
+
+This file provides additional instructions for the automated Claude Code Action when implementing issues assigned to `claude-bot`.
+
+## Automation Workflow
+
+When assigned an issue, you should:
+
+1. **Create feature branch**: Always create a feature branch from `main` (never push to main directly)
+2. **Follow TDD**: Write tests before implementation when applicable
+3. **Run linters**: Always run `black`, `isort`, `ruff` before committing
+4. **Run tests**: Ensure all tests pass with `pytest`
+5. **Commit frequently**: Use conventional commits with clear, succinct messages
+6. **Open PR**: Create a pull request for review (don't merge automatically)
+
+## Implementation Standards
+
+- **Python**: Follow PEP 8, use type hints, support Python 3.11+
+- **Testing**: Maintain >80% coverage for new code
+- **Documentation**: Update docstrings and CLAUDE.md as needed
+- **Security**: Never expose secrets, validate inputs, follow OWASP guidelines
+
+## PR Template
+
+When creating pull requests, include:
+- Summary of changes
+- Test plan
+- Breaking changes (if any)
+- Related issues/tickets
+
+## Important Context
+
+- This is an open-source project under MIT license
+- Target audience: Software engineering teams using AI-assisted development
+- Code quality and user experience are paramount
+- Prefer simple, focused solutions over complex abstractions
+
+---
+
+**Note**: CLAUDE.md is automatically read by the action. This file provides automation-specific guidance that supplements the project-level instructions.
diff --git a/.github/workflows/agentready-assessment.yml b/.github/workflows/agentready-assessment.yml
index 63f5432..e724c09 100644
--- a/.github/workflows/agentready-assessment.yml
+++ b/.github/workflows/agentready-assessment.yml
@@ -25,7 +25,7 @@ jobs:
- name: Install AgentReady
run: |
- pip install agentready
+ pip install -e .
- name: Run AgentReady Assessment
run: |
diff --git a/.github/workflows/claude-code-action.yml b/.github/workflows/claude-code-action.yml
index 28bd2b4..687694e 100644
--- a/.github/workflows/claude-code-action.yml
+++ b/.github/workflows/claude-code-action.yml
@@ -1,14 +1,8 @@
-name: Claude Code Action
+name: Claude Code Action (Automated)
on:
- issue_comment:
- types: [created]
- pull_request_review_comment:
- types: [created]
issues:
- types: [opened, assigned]
- pull_request:
- types: [opened, ready_for_review]
+ types: [assigned]
permissions:
contents: write
@@ -18,6 +12,8 @@ permissions:
jobs:
claude-code:
runs-on: ubuntu-latest
+ # Only run when assigned to claude-bot user
+ if: github.event.assignee.login == 'claude-bot'
steps:
- name: Checkout repository
uses: actions/checkout@v4
@@ -29,3 +25,6 @@ jobs:
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
github_token: ${{ secrets.GITHUB_TOKEN }}
+ # CLAUDE.md is automatically read by the action
+ # Optional: Uncomment to add automation-specific instructions
+ # claude_args: --append-system-prompt "Focus on implementation. Create feature branches, write tests, and open PRs for review."
diff --git a/.github/workflows/continuous-learning.yml b/.github/workflows/continuous-learning.yml
new file mode 100644
index 0000000..3f66d59
--- /dev/null
+++ b/.github/workflows/continuous-learning.yml
@@ -0,0 +1,134 @@
+name: Continuous Learning - Extract Skills
+
+on:
+ # Manual trigger
+ workflow_dispatch:
+ inputs:
+ output_format:
+ description: 'Output format for skills'
+ required: true
+ default: 'github-issues'
+ type: choice
+ options:
+ - github-issues
+ - skill-files
+ - both
+
+ # Automatic on new releases
+ release:
+ types: [published]
+
+ # Weekly analysis on Sundays at midnight UTC
+ schedule:
+ - cron: '0 0 * * 0'
+
+jobs:
+ extract-skills:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ issues: write
+ pull-requests: write
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Setup Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.11'
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v3
+
+ - name: Install AgentReady
+ run: |
+ uv pip install -e .
+
+ - name: Run self-assessment
+ run: |
+ uv run agentready assess . --output-dir .agentready
+
+ - name: Extract learnings
+ id: learn
+ run: |
+ uv run agentready learn . --output-format json > .skills-proposals/discovered-skills.json
+ echo "skill_count=$(jq '.skill_count' .skills-proposals/discovered-skills.json)" >> $GITHUB_OUTPUT
+
+ - name: Generate skill proposals
+ if: steps.learn.outputs.skill_count > 0
+ run: |
+ uv run agentready learn . --output-format all --output-dir .skills-proposals
+
+ - name: Create GitHub issues for each skill
+ if: (inputs.output_format == 'github-issues' || inputs.output_format == 'both') && steps.learn.outputs.skill_count > 0
+ env:
+ GH_TOKEN: ${{ github.token }}
+ run: |
+ for skill_file in .skills-proposals/skill-*.md; do
+ if [ -f "$skill_file" ]; then
+ # Extract skill name from filename
+ skill_name=$(basename "$skill_file" .md | sed 's/^skill-//' | sed 's/-/ /g')
+
+ # Create issue with skill proposal
+ gh issue create \
+ --title "Skill Proposal: ${skill_name}" \
+ --label "skill-proposal,enhancement,ai-agent" \
+ --body-file "$skill_file"
+
+ echo "Created issue for: $skill_name"
+ fi
+ done
+
+ - name: Create PR with skill files
+ if: (inputs.output_format == 'skill-files' || inputs.output_format == 'both') && steps.learn.outputs.skill_count > 0
+ run: |
+ # Configure git
+ git config user.name "github-actions[bot]"
+ git config user.email "github-actions[bot]@users.noreply.github.com"
+
+ # Create new branch
+ BRANCH_NAME="skills/auto-$(date +%Y%m%d-%H%M%S)"
+ git checkout -b "$BRANCH_NAME"
+
+ # Copy SKILL.md files to .claude/skills
+ mkdir -p .claude/skills
+ for skill_dir in .skills-proposals/*/; do
+ if [ -d "$skill_dir" ] && [ -f "${skill_dir}SKILL.md" ]; then
+ skill_id=$(basename "$skill_dir")
+ mkdir -p ".claude/skills/$skill_id"
+ cp "${skill_dir}SKILL.md" ".claude/skills/$skill_id/"
+ echo "Copied skill: $skill_id"
+ fi
+ done
+
+ # Commit and push
+ git add .claude/skills
+ git commit -m "feat: add discovered skills from continuous learning
+
+Automatically extracted skills from latest assessment.
+
+🤖 Generated with Claude Code
+Co-Authored-By: Claude "
+
+ git push origin "$BRANCH_NAME"
+
+ # Create PR
+ gh pr create \
+ --title "Add discovered skills from continuous learning" \
+ --body "Automatically discovered new Claude Code skills from AgentReady assessment. Review and merge to make available."
+
+ - name: Upload skill proposals as artifacts
+ if: steps.learn.outputs.skill_count > 0
+ uses: actions/upload-artifact@v4
+ with:
+ name: skill-proposals-${{ github.run_number }}
+ path: .skills-proposals/
+ retention-days: 90
+
+ - name: Summary
+ if: steps.learn.outputs.skill_count > 0
+ run: |
+ echo "✅ Discovered ${{ steps.learn.outputs.skill_count }} skills with confidence ≥70%"
+ echo "📁 Artifacts uploaded for review"
diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
new file mode 100644
index 0000000..68d2620
--- /dev/null
+++ b/.github/workflows/publish-pypi.yml
@@ -0,0 +1,128 @@
+name: Publish to PyPI
+
+on:
+ workflow_dispatch:
+ inputs:
+ version:
+ description: 'Version to publish (leave empty to use current version from pyproject.toml)'
+ required: false
+ type: string
+ dry_run:
+ description: 'Perform a dry run (publish to TestPyPI instead)'
+ required: false
+ type: boolean
+ default: true
+
+jobs:
+ publish:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ id-token: write # Required for trusted publishing
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.11'
+
+ - name: Install build dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install build twine
+
+ - name: Update version if specified
+ if: inputs.version != ''
+ env:
+ VERSION: ${{ inputs.version }}
+ run: |
+ # Update version in pyproject.toml
+ sed -i "s/^version = .*/version = \"$VERSION\"/" pyproject.toml
+ echo "Updated version to $VERSION"
+
+ - name: Build package
+ run: |
+ python -m build
+ echo "📦 Built distribution files:"
+ ls -lh dist/
+
+ - name: Check distribution
+ run: |
+ twine check dist/*
+
+ - name: Publish to TestPyPI (dry run)
+ if: inputs.dry_run == true
+ env:
+ TWINE_USERNAME: __token__
+ TWINE_PASSWORD: ${{ secrets.TEST_PYPI_TOKEN }}
+ run: |
+ echo "🧪 Publishing to TestPyPI..."
+ twine upload --repository testpypi dist/*
+ echo "✅ Published to TestPyPI: https://test.pypi.org/project/agentready/"
+
+ - name: Publish to PyPI (production)
+ if: inputs.dry_run == false
+ env:
+ TWINE_USERNAME: __token__
+ TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
+ run: |
+ echo "🚀 Publishing to PyPI..."
+ twine upload dist/*
+ echo "✅ Published to PyPI: https://pypi.org/project/agentready/"
+
+ - name: Create GitHub Release
+ if: inputs.dry_run == false && inputs.version != ''
+ env:
+ VERSION: ${{ inputs.version }}
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const version = process.env.VERSION;
+ const tag = `v${version}`;
+
+ // Create tag
+ await github.rest.git.createRef({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ ref: `refs/tags/${tag}`,
+ sha: context.sha
+ });
+
+ // Create release
+ await github.rest.repos.createRelease({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ tag_name: tag,
+ name: `Release ${version}`,
+ body: `Published to PyPI: https://pypi.org/project/agentready/${version}/`,
+ draft: false,
+ prerelease: false
+ });
+
+ - name: Summary
+ env:
+ DRY_RUN: ${{ inputs.dry_run }}
+ run: |
+ echo "## Publication Summary" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ if [ "$DRY_RUN" == "true" ]; then
+ echo "✅ **Dry Run Complete**" >> $GITHUB_STEP_SUMMARY
+ echo "Published to TestPyPI: https://test.pypi.org/project/agentready/" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "To install and test:" >> $GITHUB_STEP_SUMMARY
+ echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY
+ echo "pip install --index-url https://test.pypi.org/simple/ agentready" >> $GITHUB_STEP_SUMMARY
+ echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "🚀 **Published to PyPI**" >> $GITHUB_STEP_SUMMARY
+ echo "Package: https://pypi.org/project/agentready/" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "To install:" >> $GITHUB_STEP_SUMMARY
+ echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY
+ echo "pip install agentready" >> $GITHUB_STEP_SUMMARY
+ echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
+ fi
diff --git a/.skills-proposals/discovered-skills.json b/.skills-proposals/discovered-skills.json
new file mode 100644
index 0000000..30e90c4
--- /dev/null
+++ b/.skills-proposals/discovered-skills.json
@@ -0,0 +1,36 @@
+{
+ "generated_at": "2025-11-21T14:17:47.941871",
+ "skill_count": 2,
+ "min_confidence": 70,
+ "discovered_skills": [
+ {
+ "skill_id": "setup-claude-md",
+ "name": "Setup CLAUDE.md Configuration",
+ "description": "Create comprehensive CLAUDE.md files with tech stack, standard commands, repository structure, and boundaries to optimize repositories for AI-assisted development",
+ "confidence": 100.0,
+ "source_attribute_id": "claude_md_file",
+ "reusability_score": 100.0,
+ "impact_score": 50.0,
+ "pattern_summary": "Project-specific configuration for Claude Code",
+ "code_examples": [
+ "CLAUDE.md found at /Users/jeder/repos/agentready/CLAUDE.md"
+ ],
+ "citations": []
+ },
+ {
+ "skill_id": "implement-type-annotations",
+ "name": "Implement Type Annotations",
+ "description": "Add comprehensive type hints to Python/TypeScript code to improve IDE support, catch errors early, and enable better AI code understanding",
+ "confidence": 100.0,
+ "source_attribute_id": "type_annotations",
+ "reusability_score": 100.0,
+ "impact_score": 50.0,
+ "pattern_summary": "Type hints in function signatures",
+ "code_examples": [
+ "Typed functions: 180/186",
+ "Coverage: 96.8%"
+ ],
+ "citations": []
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.skills-proposals/implement-type-annotations-report.md b/.skills-proposals/implement-type-annotations-report.md
new file mode 100644
index 0000000..658fad4
--- /dev/null
+++ b/.skills-proposals/implement-type-annotations-report.md
@@ -0,0 +1,63 @@
+# Skill Report: Implement Type Annotations
+
+## Overview
+
+**Skill ID**: `implement-type-annotations`
+**Confidence**: 100.0%
+**Impact**: +50.0 pts
+**Reusability**: 100.0%
+**Source Attribute**: type_annotations
+
+---
+
+## Description
+
+Add comprehensive type hints to Python/TypeScript code to improve IDE support, catch errors early, and enable better AI code understanding
+
+---
+
+## Pattern Summary
+
+Type hints in function signatures
+
+---
+
+## Implementation Guidance
+
+### When to Use This Skill
+
+Use this skill when you need to apply the pattern described above to your repository.
+
+### Code Examples
+
+
+#### Example 1
+
+```
+Typed functions: 180/186
+```
+
+#### Example 2
+
+```
+Coverage: 96.8%
+```
+
+---
+
+## Research Citations
+
+_No citations available_
+
+---
+
+## Metrics
+
+- **Confidence Score**: 100.0% - How confident we are this is a valid pattern
+- **Impact Score**: 50.0 pts - Expected score improvement from applying this skill
+- **Reusability Score**: 100.0% - How often this pattern applies across projects
+
+---
+
+**Generated by**: AgentReady Skill Generator
+**Source**: Pattern extracted from type_annotations assessment
diff --git a/.skills-proposals/implement-type-annotations/SKILL.md b/.skills-proposals/implement-type-annotations/SKILL.md
new file mode 100644
index 0000000..39b320e
--- /dev/null
+++ b/.skills-proposals/implement-type-annotations/SKILL.md
@@ -0,0 +1,49 @@
+---
+name: implement-type-annotations
+description: Add comprehensive type hints to Python/TypeScript code to improve IDE support, catch errors early, and enable better AI code understanding
+---
+# Implement Type Annotations
+
+## When to Use This Skill
+
+Type hints in function signatures
+
+## Instructions
+
+1. Review the pattern summary above
+2. Apply the pattern to your repository
+3. Verify the implementation matches the examples below
+
+
+## Examples
+
+### Example 1
+
+```
+Typed functions: 180/186
+```
+
+### Example 2
+
+```
+Coverage: 96.8%
+```
+
+## Best Practices
+
+- Follow the pattern consistently across your codebase
+- Refer to the citations below for authoritative guidance
+- Test the implementation after applying the pattern
+
+
+## Citations
+
+_No citations available_
+
+---
+
+**Generated by**: AgentReady Skill Generator
+**Confidence**: 100.0%
+**Source Attribute**: type_annotations
+**Reusability**: 100.0%
+**Impact**: +50.0 pts
diff --git a/.skills-proposals/setup-claude-md-report.md b/.skills-proposals/setup-claude-md-report.md
new file mode 100644
index 0000000..2e36fb6
--- /dev/null
+++ b/.skills-proposals/setup-claude-md-report.md
@@ -0,0 +1,57 @@
+# Skill Report: Setup CLAUDE.md Configuration
+
+## Overview
+
+**Skill ID**: `setup-claude-md`
+**Confidence**: 100.0%
+**Impact**: +50.0 pts
+**Reusability**: 100.0%
+**Source Attribute**: claude_md_file
+
+---
+
+## Description
+
+Create comprehensive CLAUDE.md files with tech stack, standard commands, repository structure, and boundaries to optimize repositories for AI-assisted development
+
+---
+
+## Pattern Summary
+
+Project-specific configuration for Claude Code
+
+---
+
+## Implementation Guidance
+
+### When to Use This Skill
+
+Use this skill when you need to apply the pattern described above to your repository.
+
+### Code Examples
+
+
+#### Example 1
+
+```
+CLAUDE.md found at /Users/jeder/repos/agentready/CLAUDE.md
+```
+
+---
+
+## Research Citations
+
+_No citations available_
+
+---
+
+## Metrics
+
+- **Confidence Score**: 100.0% - How confident we are this is a valid pattern
+- **Impact Score**: 50.0 pts - Expected score improvement from applying this skill
+- **Reusability Score**: 100.0% - How often this pattern applies across projects
+
+---
+
+**Generated by**: AgentReady Skill Generator
+**Source**: Pattern extracted from claude_md_file assessment
diff --git a/.skills-proposals/setup-claude-md/SKILL.md b/.skills-proposals/setup-claude-md/SKILL.md
new file mode 100644
index 0000000..f3f1b35
--- /dev/null
+++ b/.skills-proposals/setup-claude-md/SKILL.md
@@ -0,0 +1,43 @@
+---
+name: setup-claude-md
+description: Create comprehensive CLAUDE.md files with tech stack, standard commands, repository structure, and boundaries to optimize repositories for AI-assisted development
+---
+# Setup CLAUDE.md Configuration
+
+## When to Use This Skill
+
+Project-specific configuration for Claude Code
+
+## Instructions
+
+1. Review the pattern summary above
+2. Apply the pattern to your repository
+3. Verify the implementation matches the examples below
+
+
+## Examples
+
+### Example 1
+
+```
+CLAUDE.md found at /Users/jeder/repos/agentready/CLAUDE.md
+```
+
+## Best Practices
+
+- Follow the pattern consistently across your codebase
+- Refer to the citations below for authoritative guidance
+- Test the implementation after applying the pattern
+
+
+## Citations
+
+_No citations available_
+
+---
+
+**Generated by**: AgentReady Skill Generator
+**Confidence**: 100.0%
+**Source Attribute**: claude_md_file
+**Reusability**: 100.0%
+**Impact**: +50.0 pts
diff --git a/.skills-proposals/skill-implement-type-annotations.md b/.skills-proposals/skill-implement-type-annotations.md
new file mode 100644
index 0000000..00bbf95
--- /dev/null
+++ b/.skills-proposals/skill-implement-type-annotations.md
@@ -0,0 +1,123 @@
+---
+name: Skill Proposal - Implement Type Annotations
+about: Automatically generated skill proposal from AgentReady continuous learning
+title: 'Skill Proposal: Implement Type Annotations'
+labels: 'skill-proposal, enhancement, ai-agent'
+assignees: ''
+---
+
+## Skill Proposal: Implement Type Annotations
+
+**Skill ID**: `implement-type-annotations`
+**Confidence**: 100.0%
+**Impact**: +50.0 pts
+**Reusability**: 100.0%
+**Source Attribute**: type_annotations
+
+---
+
+## Description
+
+Add comprehensive type hints to Python/TypeScript code to improve IDE support, catch errors early, and enable better AI code understanding
+
+---
+
+## Pattern Summary
+
+Type hints in function signatures
+
+---
+
+## Proposed SKILL.md
+
+```markdown
+---
+name: implement-type-annotations
+description: Add comprehensive type hints to Python/TypeScript code to improve IDE support, catch errors early, and enable better AI code understanding
+---
+# Implement Type Annotations
+
+## When to Use This Skill
+
+Type hints in function signatures
+
+## Instructions
+
+1. Review the pattern summary above
+2. Apply the pattern to your repository
+3. Verify the implementation matches the examples below
+
+
+## Examples
+
+### Example 1
+
+```
+Typed functions: 180/186
+```
+
+### Example 2
+
+```
+Coverage: 96.8%
+```
+
+## Best Practices
+
+- Follow the pattern consistently across your codebase
+- Refer to the citations below for authoritative guidance
+- Test the implementation after applying the pattern
+
+
+## Citations
+
+_No citations available_
+
+---
+
+**Generated by**: AgentReady Skill Generator
+**Confidence**: 100.0%
+**Source Attribute**: type_annotations
+**Reusability**: 100.0%
+**Impact**: +50.0 pts
+
+```
+
+---
+
+## Implementation Plan
+
+- [ ] Review proposed skill for accuracy
+- [ ] Test skill on 3-5 repositories
+- [ ] Refine instructions based on testing
+- [ ] Create final SKILL.md file
+- [ ] Add to `~/.claude/skills/` or `.claude/skills/`
+- [ ] Document skill in AgentReady catalog
+- [ ] Update skill generator with learnings
+
+---
+
+## Code Examples from Assessment
+
+### Example 1
+
+```
+Typed functions: 180/186
+```
+
+### Example 2
+
+```
+Coverage: 96.8%
+```
+
+---
+
+## Research Citations
+
+_No citations available_
+
+---
+
+**Auto-generated by**: AgentReady Continuous Learning Loop
+**Assessment Date**: 2025-11-21T10:02:46.428125
diff --git a/.skills-proposals/skill-setup-claude-md.md b/.skills-proposals/skill-setup-claude-md.md
new file mode 100644
index 0000000..a031d54
--- /dev/null
+++ b/.skills-proposals/skill-setup-claude-md.md
@@ -0,0 +1,111 @@
+---
+name: Skill Proposal - Setup CLAUDE.md Configuration
+about: Automatically generated skill proposal from AgentReady continuous learning
+title: 'Skill Proposal: Setup CLAUDE.md Configuration'
+labels: 'skill-proposal, enhancement, ai-agent'
+assignees: ''
+---
+
+## Skill Proposal: Setup CLAUDE.md Configuration
+
+**Skill ID**: `setup-claude-md`
+**Confidence**: 100.0%
+**Impact**: +50.0 pts
+**Reusability**: 100.0%
+**Source Attribute**: claude_md_file
+
+---
+
+## Description
+
+Create comprehensive CLAUDE.md files with tech stack, standard commands, repository structure, and boundaries to optimize repositories for AI-assisted development
+
+---
+
+## Pattern Summary
+
+Project-specific configuration for Claude Code
+
+---
+
+## Proposed SKILL.md
+
+```markdown
+---
+name: setup-claude-md
+description: Create comprehensive CLAUDE.md files with tech stack, standard commands, repository structure, and boundaries to optimize repositories for AI-assisted development
+---
+# Setup CLAUDE.md Configuration
+
+## When to Use This Skill
+
+Project-specific configuration for Claude Code
+
+## Instructions
+
+1. Review the pattern summary above
+2. Apply the pattern to your repository
+3. Verify the implementation matches the examples below
+
+
+## Examples
+
+### Example 1
+
+```
+CLAUDE.md found at /Users/jeder/repos/agentready/CLAUDE.md
+```
+
+## Best Practices
+
+- Follow the pattern consistently across your codebase
+- Refer to the citations below for authoritative guidance
+- Test the implementation after applying the pattern
+
+
+## Citations
+
+_No citations available_
+
+---
+
+**Generated by**: AgentReady Skill Generator
+**Confidence**: 100.0%
+**Source Attribute**: claude_md_file
+**Reusability**: 100.0%
+**Impact**: +50.0 pts
+
+```
+
+---
+
+## Implementation Plan
+
+- [ ] Review proposed skill for accuracy
+- [ ] Test skill on 3-5 repositories
+- [ ] Refine instructions based on testing
+- [ ] Create final SKILL.md file
+- [ ] Add to `~/.claude/skills/` or `.claude/skills/`
+- [ ] Document skill in AgentReady catalog
+- [ ] Update skill generator with learnings
+
+---
+
+## Code Examples from Assessment
+
+### Example 1
+
+```
+CLAUDE.md found at /Users/jeder/repos/agentready/CLAUDE.md
+```
+
+---
+
+## Research Citations
+
+_No citations available_
+
+---
+
+**Auto-generated by**: AgentReady Continuous Learning Loop
+**Assessment Date**: 2025-11-21T10:02:46.427730
diff --git a/CLAUDE.md b/CLAUDE.md
index 0cb2bac..c1b1064 100644
--- a/CLAUDE.md
+++ b/CLAUDE.md
@@ -40,6 +40,80 @@ agentready assess /path/to/repo --output-dir ./reports
---
+## Continuous Learning Loop (LLM-Powered)
+
+**NEW in v1.1**: Extract high-quality skills from assessments using Claude API
+
+The `learn` command analyzes assessment results to identify successful patterns and generates Claude Code skills. With `--enable-llm`, it uses Claude Sonnet 4.5 to create detailed, context-aware skill documentation.
+
+### Basic Usage (Heuristic)
+
+```bash
+# Extract skills using heuristic pattern extraction
+agentready learn .
+
+# Generate SKILL.md files
+agentready learn . --output-format skill_md
+
+# Create GitHub issue templates
+agentready learn . --output-format github_issues
+```
+
+### LLM-Powered Enrichment
+
+```bash
+# Set API key
+export ANTHROPIC_API_KEY=sk-ant-api03-...
+
+# Extract skills with LLM enrichment (top 5 skills)
+agentready learn . --enable-llm
+
+# Enrich more skills with custom budget
+agentready learn . --enable-llm --llm-budget 10
+
+# Bypass cache for fresh analysis
+agentready learn . --enable-llm --llm-no-cache
+
+# Generate all formats with LLM enrichment
+agentready learn . --enable-llm --output-format all
+```
+
+### LLM Enrichment Features
+
+**What it does**:
+- Analyzes repository code samples for real examples
+- Generates 5-10 step detailed instructions
+- Extracts file paths and code snippets from actual implementation
+- Derives best practices from high-scoring attributes
+- Identifies anti-patterns to avoid
+
+**How it works**:
+1. Heuristics extract basic skills from assessment findings
+2. Top N skills (default: 5) are sent to Claude API
+3. Code sampler provides relevant files from repository
+4. Claude analyzes patterns and generates structured JSON
+5. Enriched skills merged with detailed instructions/examples
+6. Results cached for 7 days to reduce API costs
+
+**Caching**:
+- Responses cached in `.agentready/llm-cache/`
+- 7-day TTL (time-to-live)
+- Cache key based on attribute + score + evidence hash
+- Use `--llm-no-cache` to force fresh API calls
+
+**Cost Control**:
+- `--llm-budget N` limits enrichment to top N skills
+- Default: 5 skills (approximately 5-10 API calls)
+- Each enrichment: ~2-6 seconds, ~2000-4000 tokens
+- Caching prevents redundant calls on repeated assessments
+
+**Graceful Fallback**:
+- Missing API key → falls back to heuristic skills
+- API errors → uses original heuristic skill
+- Rate limits → retries with exponential backoff
+
+---
+
## Architecture
### Core Components
@@ -48,6 +122,7 @@ agentready assess /path/to/repo --output-dir ./reports
src/agentready/
├── models/ # Data models (Repository, Attribute, Finding, Assessment)
├── services/ # Scanner orchestration and language detection
+│ └── llm_cache.py # LLM response caching (7-day TTL)
├── assessors/ # Attribute assessment implementations
│ ├── base.py # BaseAssessor abstract class
│ ├── documentation.py # CLAUDE.md, README assessors
@@ -55,13 +130,20 @@ src/agentready/
│ ├── testing.py # Test coverage, pre-commit hooks
│ ├── structure.py # Standard layout, gitignore
│ └── stub_assessors.py # 15 not-yet-implemented assessors
+├── learners/ # Pattern extraction and LLM enrichment
+│ ├── pattern_extractor.py # Heuristic skill extraction
+│ ├── skill_generator.py # SKILL.md generation
+│ ├── code_sampler.py # Repository code sampling
+│ ├── llm_enricher.py # Claude API integration
+│ └── prompt_templates.py # LLM prompt engineering
├── reporters/ # Report generation (HTML, Markdown, JSON)
│ ├── html.py # Interactive HTML with Jinja2
│ └── markdown.py # GitHub-Flavored Markdown
├── templates/ # Jinja2 templates
│ └── report.html.j2 # Self-contained HTML report (73KB)
└── cli/ # Click-based CLI
- └── main.py # assess, research-version, generate-config commands
+ ├── main.py # assess, research-version, generate-config commands
+ └── learn.py # Continuous learning loop with LLM enrichment
```
### Data Flow
@@ -192,6 +274,7 @@ agentready/
- **Python 3.11+** (only N and N-1 versions supported)
- **Click** - CLI framework
- **Jinja2** - HTML template engine
+- **Anthropic** - Claude API client (for LLM enrichment)
- **Pytest** - Testing framework
- **Black** - Code formatter
- **isort** - Import sorter
@@ -290,6 +373,7 @@ See `BACKLOG.md` for full feature list.
## Related Documents
+- **.github/CLAUDE_INTEGRATION.md** - Dual Claude integration guide (automated + interactive)
- **BACKLOG.md** - Future features and enhancements (11 items)
- **GITHUB_ISSUES.md** - GitHub-ready issue templates
- **README.md** - User-facing documentation
diff --git a/pyproject.toml b/pyproject.toml
index b77bd79..f28919b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -22,6 +22,7 @@ dependencies = [
"gitpython>=3.1.0",
"radon>=6.0.0",
"lizard>=1.17.0",
+ "anthropic>=0.74.0",
]
[project.optional-dependencies]
@@ -42,7 +43,7 @@ build-backend = "setuptools.build_meta"
[tool.setuptools]
package-dir = {"" = "src"}
-packages = ["agentready", "agentready.cli", "agentready.assessors", "agentready.models", "agentready.services", "agentready.reporters"]
+packages = ["agentready", "agentready.cli", "agentready.assessors", "agentready.models", "agentready.services", "agentready.reporters", "agentready.learners"]
[tool.setuptools.package-data]
agentready = [
diff --git a/specs/llm-pattern-extraction.md b/specs/llm-pattern-extraction.md
new file mode 100644
index 0000000..6925073
--- /dev/null
+++ b/specs/llm-pattern-extraction.md
@@ -0,0 +1,1141 @@
+# LLM-Powered Pattern Extraction - Implementation Specification
+
+**Status**: Ready for Implementation
+**Priority**: P1 (High Value)
+**Estimated Effort**: 1-2 weeks
+**Dependencies**: `anthropic>=0.74.0`
+
+---
+
+## Overview
+
+Enhance AgentReady's continuous learning loop with Claude API integration to generate high-quality, context-aware skills instead of using hardcoded heuristics.
+
+**Current State**: Pattern extraction uses hardcoded skill descriptions, generic 3-step instructions, and evidence strings as "code examples"
+
+**Target State**: LLM analyzes actual repository code to generate detailed instructions, real code examples with file paths, best practices, and anti-patterns
+
+**Architecture**: Hybrid approach - heuristics for discovery, optional LLM enrichment for top N skills
+
+---
+
+## Requirements
+
+### Functional Requirements
+
+1. **Opt-in LLM Enrichment**: Users must explicitly enable with `--enable-llm` flag
+2. **API Key Management**: Use `ANTHROPIC_API_KEY` environment variable
+3. **Selective Enrichment**: Enrich only top N skills (default: 5) to control cost
+4. **Caching**: Cache LLM responses for 7 days to avoid redundant API calls
+5. **Graceful Fallback**: If LLM enrichment fails, fall back to heuristic-generated skill
+6. **Code Sample Extraction**: Read relevant files from repository for analysis
+7. **Structured Output**: LLM returns JSON matching expected schema
+
+### Non-Functional Requirements
+
+1. **Performance**: 2-6 seconds per skill enrichment, parallelizable
+2. **Reliability**: Handle API rate limits with exponential backoff
+3. **Maintainability**: Prompts stored in separate templates file
+4. **Testability**: Mock Anthropic client in unit tests
+
+---
+
+## Architecture
+
+### Component Diagram
+
+```
+┌─────────────────────────────────────────────────────────────┐
+│ LearningService │
+│ │
+│ 1. extract_patterns_from_file() │
+│ ├─> PatternExtractor (heuristics) │
+│ │ └─> Returns list[DiscoveredSkill] │
+│ │ │
+│ └─> If --enable-llm: │
+│ ├─> LLMEnricher.enrich_skill() for top N │
+│ │ ├─> CodeSampler.get_relevant_code() │
+│ │ ├─> LLMCache.get() [check cache] │
+│ │ ├─> Anthropic API call [if cache miss] │
+│ │ └─> LLMCache.set() [save response] │
+│ │ │
+│ └─> Merge enriched skills back into list │
+│ │
+│ 2. generate_skills() [unchanged] │
+└─────────────────────────────────────────────────────────────┘
+```
+
+### Data Flow
+
+```
+Assessment JSON
+ ↓
+PatternExtractor (heuristic extraction)
+ ↓
+List[DiscoveredSkill] (basic skills with placeholders)
+ ↓
+[IF --enable-llm]
+ ↓
+LLMEnricher
+ ├─> CodeSampler → Read relevant .py files from repo
+ ├─> PromptTemplates → Build analysis prompt
+ ├─> LLMCache.get() → Check cache
+ ├─> Anthropic API → Call Claude Sonnet 4.5
+ ├─> Parse JSON response
+ ├─> Merge into DiscoveredSkill
+ └─> LLMCache.set() → Save for 7 days
+ ↓
+List[DiscoveredSkill] (enriched with LLM content)
+ ↓
+SkillGenerator → Generate SKILL.md files
+```
+
+---
+
+## Implementation Plan
+
+### Phase 1: Core Infrastructure
+
+**File**: `src/agentready/learners/prompt_templates.py`
+
+```python
+"""Prompt templates for LLM-powered pattern extraction."""
+
+PATTERN_EXTRACTION_PROMPT = """You are analyzing a high-scoring repository to extract a reusable pattern as a Claude Code skill.
+
+## Context
+Repository: {repo_name}
+Attribute: {attribute_name} ({attribute_description})
+Tier: {tier} (1=Essential, 4=Advanced)
+Score: {score}/100
+Primary Language: {primary_language}
+
+## Evidence from Assessment
+{evidence}
+
+## Code Samples from Repository
+{code_samples}
+
+---
+
+## Task
+
+Extract this pattern as a Claude Code skill with the following components:
+
+### 1. Skill Description (1-2 sentences)
+Write an invocation-optimized description that helps Claude Code decide when to use this skill.
+Focus on WHAT problem it solves and WHEN to apply it.
+
+### 2. Step-by-Step Instructions (5-10 steps)
+Provide concrete, actionable steps. Each step should:
+- Start with an action verb
+- Include specific commands or code where applicable
+- Define success criteria for that step
+
+Be explicit. Do not assume prior knowledge.
+
+### 3. Code Examples (2-3 examples)
+Extract real code snippets from the repository that demonstrate this pattern.
+For EACH example:
+- Include the file path
+- Show the relevant code (10-50 lines)
+- Explain WHY this demonstrates the pattern
+
+### 4. Best Practices (3-5 principles)
+Derive best practices from the successful implementation you analyzed.
+What made this repository score {score}/100?
+
+### 5. Anti-Patterns to Avoid (2-3 mistakes)
+What common mistakes did this repository avoid?
+What would have reduced the score?
+
+---
+
+## Output Format
+
+Return ONLY valid JSON matching this schema:
+
+{{
+ "skill_description": "One sentence explaining what and when",
+ "instructions": [
+ "Step 1: Specific action with command",
+ "Step 2: Next action with success criteria",
+ ...
+ ],
+ "code_examples": [
+ {{
+ "file_path": "relative/path/to/file.py",
+ "code": "actual code snippet",
+ "explanation": "Why this demonstrates the pattern"
+ }},
+ ...
+ ],
+ "best_practices": [
+ "Principle 1 derived from this repository",
+ ...
+ ],
+ "anti_patterns": [
+ "Common mistake this repo avoided",
+ ...
+ ]
+}}
+
+## Rules
+
+1. NEVER invent code - only use code from the samples provided
+2. Be specific - use exact file paths, line numbers, command syntax
+3. Focus on actionable guidance, not theory
+4. Derive insights from THIS repository, not general knowledge
+5. Return ONLY the JSON object, no markdown formatting
+"""
+
+CODE_SAMPLING_GUIDANCE = """When selecting code samples to analyze:
+
+1. For `claude_md_file`: Include the CLAUDE.md file itself
+2. For `type_annotations`: Sample 3-5 .py files with type hints
+3. For `pre_commit_hooks`: Include .pre-commit-config.yaml
+4. For `standard_project_layout`: Show directory tree + key files
+5. For `lock_files`: Include requirements.txt, poetry.lock, or go.sum
+
+Limit to 3-5 files, max 100 lines per file to stay under token limits.
+"""
+```
+
+---
+
+**File**: `src/agentready/learners/code_sampler.py`
+
+```python
+"""Smart code sampling from repositories for LLM analysis."""
+
+import logging
+from pathlib import Path
+from agentready.models import Repository, Finding
+
+logger = logging.getLogger(__name__)
+
+class CodeSampler:
+ """Extracts relevant code samples from repository for LLM analysis."""
+
+ # Mapping of attribute IDs to file patterns to sample
+ ATTRIBUTE_FILE_PATTERNS = {
+ "claude_md_file": ["CLAUDE.md"],
+ "readme_file": ["README.md"],
+ "type_annotations": ["**/*.py"], # Sample Python files
+ "pre_commit_hooks": [".pre-commit-config.yaml", ".github/workflows/*.yml"],
+ "standard_project_layout": ["**/", "src/", "tests/", "docs/"], # Directory structure
+ "lock_files": ["requirements.txt", "poetry.lock", "package-lock.json", "go.sum", "Cargo.lock"],
+ "test_coverage": ["pytest.ini", "pyproject.toml", ".coveragerc"],
+ "conventional_commits": [".github/workflows/*.yml"], # CI configs
+ "gitignore": [".gitignore"],
+ }
+
+ def __init__(self, repository: Repository, max_files: int = 5, max_lines_per_file: int = 100):
+ """Initialize code sampler.
+
+ Args:
+ repository: Repository to sample from
+ max_files: Maximum number of files to include
+ max_lines_per_file: Maximum lines per file to prevent token overflow
+ """
+ self.repository = repository
+ self.max_files = max_files
+ self.max_lines_per_file = max_lines_per_file
+
+ def get_relevant_code(self, finding: Finding) -> str:
+ """Get relevant code samples for a finding.
+
+ Args:
+ finding: The finding to get code for
+
+ Returns:
+ Formatted string with code samples
+ """
+ attribute_id = finding.attribute.id
+ patterns = self.ATTRIBUTE_FILE_PATTERNS.get(attribute_id, [])
+
+ if not patterns:
+ logger.warning(f"No file patterns defined for {attribute_id}")
+ return "No code samples available"
+
+ # Collect files matching patterns
+ files_to_sample = []
+ for pattern in patterns:
+ if pattern.endswith("/"):
+ # Directory listing
+ files_to_sample.append(self._get_directory_tree(pattern))
+ else:
+ # File pattern
+ matching_files = list(self.repository.path.glob(pattern))
+ files_to_sample.extend(matching_files[:self.max_files])
+
+ # Format as string
+ return self._format_code_samples(files_to_sample)
+
+ def _get_directory_tree(self, dir_pattern: str) -> dict:
+ """Get directory tree structure."""
+ base_path = self.repository.path / dir_pattern.rstrip("/")
+ if not base_path.exists():
+ return {}
+
+ tree = {"type": "directory", "path": str(base_path.relative_to(self.repository.path)), "children": []}
+
+ for item in base_path.iterdir():
+ if item.is_file():
+ tree["children"].append({"type": "file", "name": item.name})
+ elif item.is_dir() and not item.name.startswith("."):
+ tree["children"].append({"type": "directory", "name": item.name})
+
+ return tree
+
+ def _format_code_samples(self, files: list) -> str:
+ """Format files as readable code samples."""
+ samples = []
+
+ for file_item in files[:self.max_files]:
+ if isinstance(file_item, dict):
+ # Directory tree
+ samples.append(f"## Directory Structure: {file_item['path']}\n")
+ samples.append(self._format_tree(file_item))
+ elif isinstance(file_item, Path):
+ # Regular file
+ try:
+ rel_path = file_item.relative_to(self.repository.path)
+ content = file_item.read_text(encoding="utf-8", errors="ignore")
+
+ # Truncate if too long
+ lines = content.splitlines()
+ if len(lines) > self.max_lines_per_file:
+ lines = lines[:self.max_lines_per_file]
+ lines.append("... (truncated)")
+
+ samples.append(f"## File: {rel_path}\n")
+ samples.append("```\n" + "\n".join(lines) + "\n```\n")
+
+ except Exception as e:
+ logger.warning(f"Could not read {file_item}: {e}")
+
+ return "\n".join(samples) if samples else "No code samples available"
+
+ def _format_tree(self, tree: dict, indent: int = 0) -> str:
+ """Format directory tree as text."""
+ lines = []
+ prefix = " " * indent
+
+ for child in tree.get("children", []):
+ if child["type"] == "file":
+ lines.append(f"{prefix}├── {child['name']}")
+ elif child["type"] == "directory":
+ lines.append(f"{prefix}├── {child['name']}/")
+
+ return "\n".join(lines)
+```
+
+---
+
+**File**: `src/agentready/services/llm_cache.py`
+
+```python
+"""LLM response caching to avoid redundant API calls."""
+
+import json
+import hashlib
+import logging
+from datetime import datetime, timedelta
+from pathlib import Path
+from agentready.models import DiscoveredSkill
+
+logger = logging.getLogger(__name__)
+
+class LLMCache:
+ """Caches LLM enrichment responses."""
+
+ def __init__(self, cache_dir: Path, ttl_days: int = 7):
+ """Initialize cache.
+
+ Args:
+ cache_dir: Directory to store cache files
+ ttl_days: Time-to-live in days (default: 7)
+ """
+ self.cache_dir = cache_dir
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
+ self.ttl_days = ttl_days
+
+ def get(self, cache_key: str) -> DiscoveredSkill | None:
+ """Get cached skill if exists and not expired.
+
+ Args:
+ cache_key: Unique cache key
+
+ Returns:
+ Cached DiscoveredSkill or None if miss/expired
+ """
+ cache_file = self.cache_dir / f"{cache_key}.json"
+
+ if not cache_file.exists():
+ logger.debug(f"Cache miss: {cache_key}")
+ return None
+
+ try:
+ with open(cache_file, "r", encoding="utf-8") as f:
+ data = json.load(f)
+
+ # Check expiration
+ cached_at = datetime.fromisoformat(data["cached_at"])
+ if datetime.now() - cached_at > timedelta(days=self.ttl_days):
+ logger.info(f"Cache expired: {cache_key}")
+ cache_file.unlink() # Delete expired cache
+ return None
+
+ logger.info(f"Cache hit: {cache_key}")
+ return DiscoveredSkill(**data["skill"])
+
+ except Exception as e:
+ logger.warning(f"Cache read error for {cache_key}: {e}")
+ return None
+
+ def set(self, cache_key: str, skill: DiscoveredSkill):
+ """Save skill to cache.
+
+ Args:
+ cache_key: Unique cache key
+ skill: DiscoveredSkill to cache
+ """
+ cache_file = self.cache_dir / f"{cache_key}.json"
+
+ try:
+ data = {
+ "cached_at": datetime.now().isoformat(),
+ "skill": skill.to_dict(),
+ }
+
+ with open(cache_file, "w", encoding="utf-8") as f:
+ json.dump(data, f, indent=2)
+
+ logger.debug(f"Cached: {cache_key}")
+
+ except Exception as e:
+ logger.warning(f"Cache write error for {cache_key}: {e}")
+
+ @staticmethod
+ def generate_key(attribute_id: str, score: float, evidence_hash: str) -> str:
+ """Generate cache key from finding attributes.
+
+ Args:
+ attribute_id: Attribute ID (e.g., "claude_md_file")
+ score: Finding score
+ evidence_hash: Hash of evidence list
+
+ Returns:
+ Cache key string
+ """
+ key_data = f"{attribute_id}_{score}_{evidence_hash}"
+ return hashlib.sha256(key_data.encode()).hexdigest()[:16]
+```
+
+---
+
+**File**: `src/agentready/learners/llm_enricher.py`
+
+```python
+"""LLM-powered skill enrichment using Claude API."""
+
+import json
+import logging
+import hashlib
+from pathlib import Path
+from anthropic import Anthropic, APIError, RateLimitError
+from time import sleep
+
+from agentready.models import DiscoveredSkill, Repository, Finding
+from .code_sampler import CodeSampler
+from .prompt_templates import PATTERN_EXTRACTION_PROMPT
+from ..services.llm_cache import LLMCache
+
+logger = logging.getLogger(__name__)
+
+class LLMEnricher:
+ """Enriches discovered skills using Claude API."""
+
+ def __init__(
+ self,
+ client: Anthropic,
+ cache_dir: Path | None = None,
+ model: str = "claude-sonnet-4-5-20250929"
+ ):
+ """Initialize LLM enricher.
+
+ Args:
+ client: Anthropic API client
+ cache_dir: Cache directory (default: .agentready/llm-cache)
+ model: Claude model to use
+ """
+ self.client = client
+ self.model = model
+ self.cache = LLMCache(cache_dir or Path(".agentready/llm-cache"))
+ self.code_sampler = None # Set per-repository
+
+ def enrich_skill(
+ self,
+ skill: DiscoveredSkill,
+ repository: Repository,
+ finding: Finding,
+ use_cache: bool = True
+ ) -> DiscoveredSkill:
+ """Enrich skill with LLM-generated content.
+
+ Args:
+ skill: Basic skill from heuristic extraction
+ repository: Repository being assessed
+ finding: Finding that generated this skill
+ use_cache: Whether to use cached responses
+
+ Returns:
+ Enriched DiscoveredSkill with LLM-generated content
+ """
+ # Generate cache key
+ evidence_str = "".join(finding.evidence) if finding.evidence else ""
+ evidence_hash = hashlib.sha256(evidence_str.encode()).hexdigest()[:16]
+ cache_key = LLMCache.generate_key(skill.skill_id, finding.score, evidence_hash)
+
+ # Check cache first
+ if use_cache:
+ cached = self.cache.get(cache_key)
+ if cached:
+ logger.info(f"Using cached enrichment for {skill.skill_id}")
+ return cached
+
+ # Initialize code sampler for this repository
+ self.code_sampler = CodeSampler(repository)
+
+ # Get relevant code samples
+ code_samples = self.code_sampler.get_relevant_code(finding)
+
+ # Call Claude API
+ try:
+ enrichment_data = self._call_claude_api(skill, finding, repository, code_samples)
+
+ # Merge enrichment into skill
+ enriched_skill = self._merge_enrichment(skill, enrichment_data)
+
+ # Cache result
+ if use_cache:
+ self.cache.set(cache_key, enriched_skill)
+
+ logger.info(f"Successfully enriched {skill.skill_id}")
+ return enriched_skill
+
+ except RateLimitError as e:
+ logger.warning(f"Rate limit hit for {skill.skill_id}: {e}")
+ # Exponential backoff
+ retry_after = int(getattr(e, 'retry_after', 60))
+ logger.info(f"Retrying after {retry_after} seconds...")
+ sleep(retry_after)
+ return self.enrich_skill(skill, repository, finding, use_cache)
+
+ except APIError as e:
+ logger.error(f"API error enriching {skill.skill_id}: {e}")
+ return skill # Fallback to original heuristic skill
+
+ except Exception as e:
+ logger.error(f"Unexpected error enriching {skill.skill_id}: {e}")
+ return skill # Fallback to original heuristic skill
+
+ def _call_claude_api(
+ self,
+ skill: DiscoveredSkill,
+ finding: Finding,
+ repository: Repository,
+ code_samples: str
+ ) -> dict:
+ """Call Claude API for pattern extraction.
+
+ Args:
+ skill: Basic skill
+ finding: Associated finding
+ repository: Repository context
+ code_samples: Code samples from repository
+
+ Returns:
+ Parsed JSON response from Claude
+ """
+ # Build prompt
+ prompt = PATTERN_EXTRACTION_PROMPT.format(
+ repo_name=repository.name,
+ attribute_name=finding.attribute.name,
+ attribute_description=finding.attribute.description,
+ tier=finding.attribute.tier,
+ score=finding.score,
+ primary_language=getattr(repository, 'primary_language', 'Unknown'),
+ evidence="\n".join(finding.evidence) if finding.evidence else "No evidence available",
+ code_samples=code_samples
+ )
+
+ # Call API
+ response = self.client.messages.create(
+ model=self.model,
+ max_tokens=4096,
+ messages=[
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ]
+ )
+
+ # Parse response
+ response_text = response.content[0].text
+
+ # Extract JSON (handle markdown code blocks if present)
+ if "```json" in response_text:
+ json_start = response_text.find("```json") + 7
+ json_end = response_text.find("```", json_start)
+ response_text = response_text[json_start:json_end].strip()
+ elif "```" in response_text:
+ json_start = response_text.find("```") + 3
+ json_end = response_text.find("```", json_start)
+ response_text = response_text[json_start:json_end].strip()
+
+ try:
+ return json.loads(response_text)
+ except json.JSONDecodeError as e:
+ logger.error(f"Failed to parse LLM JSON response: {e}")
+ logger.debug(f"Response text: {response_text}")
+ return {}
+
+ def _merge_enrichment(self, skill: DiscoveredSkill, enrichment: dict) -> DiscoveredSkill:
+ """Merge LLM enrichment data into DiscoveredSkill.
+
+ Args:
+ skill: Original skill
+ enrichment: LLM response data
+
+ Returns:
+ New DiscoveredSkill with enriched content
+ """
+ if not enrichment:
+ return skill
+
+ # Update description if provided
+ description = enrichment.get("skill_description", skill.description)
+
+ # Update pattern summary (from instructions or keep original)
+ instructions = enrichment.get("instructions", [])
+ pattern_summary = skill.pattern_summary
+ if instructions:
+ pattern_summary = f"{skill.pattern_summary}\n\nDetailed implementation steps provided by LLM analysis."
+
+ # Format code examples
+ code_examples = []
+ for example in enrichment.get("code_examples", []):
+ if isinstance(example, dict):
+ formatted = f"File: {example.get('file_path', 'unknown')}\n{example.get('code', '')}\n\nExplanation: {example.get('explanation', '')}"
+ code_examples.append(formatted)
+ elif isinstance(example, str):
+ code_examples.append(example)
+
+ # If no LLM examples, keep original
+ if not code_examples:
+ code_examples = skill.code_examples
+
+ # Create new skill with enriched data
+ # Store enrichment in code_examples for now (can extend DiscoveredSkill model later)
+ enriched_examples = code_examples.copy()
+
+ # Append best practices and anti-patterns as additional "examples"
+ best_practices = enrichment.get("best_practices", [])
+ if best_practices:
+ enriched_examples.append("=== BEST PRACTICES ===\n" + "\n".join(f"- {bp}" for bp in best_practices))
+
+ anti_patterns = enrichment.get("anti_patterns", [])
+ if anti_patterns:
+ enriched_examples.append("=== ANTI-PATTERNS TO AVOID ===\n" + "\n".join(f"- {ap}" for ap in anti_patterns))
+
+ # Add instructions as first example
+ if instructions:
+ enriched_examples.insert(0, "=== INSTRUCTIONS ===\n" + "\n".join(f"{i+1}. {step}" for i, step in enumerate(instructions)))
+
+ return DiscoveredSkill(
+ skill_id=skill.skill_id,
+ name=skill.name,
+ description=description,
+ confidence=skill.confidence,
+ source_attribute_id=skill.source_attribute_id,
+ reusability_score=skill.reusability_score,
+ impact_score=skill.impact_score,
+ pattern_summary=pattern_summary,
+ code_examples=enriched_examples,
+ citations=skill.citations,
+ )
+```
+
+---
+
+### Phase 2: Service Integration
+
+**File**: `src/agentready/services/learning_service.py` (modifications)
+
+```python
+# Add imports at top
+import os
+from anthropic import Anthropic
+
+# Modify extract_patterns_from_file method signature
+def extract_patterns_from_file(
+ self,
+ assessment_file: Path,
+ attribute_ids: list[str] | None = None,
+ enable_llm: bool = False,
+ llm_budget: int = 5
+) -> list[DiscoveredSkill]:
+ """Extract patterns from an assessment file.
+
+ Args:
+ assessment_file: Path to assessment JSON
+ attribute_ids: Optional list of specific attributes to extract
+ enable_llm: Enable LLM enrichment
+ llm_budget: Max number of skills to enrich with LLM
+
+ Returns:
+ List of discovered skills meeting confidence threshold
+ """
+ # ... existing code to load assessment and create Assessment object ...
+
+ # Extract patterns using heuristics
+ extractor = PatternExtractor(assessment, min_score=self.min_confidence)
+
+ if attribute_ids:
+ discovered_skills = extractor.extract_specific_patterns(attribute_ids)
+ else:
+ discovered_skills = extractor.extract_all_patterns()
+
+ # Filter by min confidence
+ discovered_skills = [s for s in discovered_skills if s.confidence >= self.min_confidence]
+
+ # Optionally enrich with LLM
+ if enable_llm and discovered_skills:
+ discovered_skills = self._enrich_with_llm(
+ discovered_skills,
+ assessment,
+ llm_budget
+ )
+
+ return discovered_skills
+
+def _enrich_with_llm(
+ self,
+ skills: list[DiscoveredSkill],
+ assessment: Assessment,
+ budget: int
+) -> list[DiscoveredSkill]:
+ """Enrich top N skills with LLM analysis.
+
+ Args:
+ skills: List of discovered skills
+ assessment: Full assessment with findings
+ budget: Max skills to enrich
+
+ Returns:
+ List with top skills enriched
+ """
+ from anthropic import Anthropic
+ from agentready.learners.llm_enricher import LLMEnricher
+
+ # Get API key
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
+ if not api_key:
+ logger.warning("LLM enrichment enabled but ANTHROPIC_API_KEY not set")
+ return skills
+
+ # Initialize LLM enricher
+ client = Anthropic(api_key=api_key)
+ enricher = LLMEnricher(client)
+
+ # Enrich top N skills
+ enriched_skills = []
+ for i, skill in enumerate(skills):
+ if i < budget:
+ # Find the finding for this skill
+ finding = self._find_finding_for_skill(assessment, skill)
+ if finding:
+ try:
+ enriched = enricher.enrich_skill(skill, assessment.repository, finding)
+ enriched_skills.append(enriched)
+ except Exception as e:
+ logger.warning(f"Enrichment failed for {skill.skill_id}: {e}")
+ enriched_skills.append(skill) # Fallback to original
+ else:
+ enriched_skills.append(skill)
+ else:
+ # Beyond budget, keep original
+ enriched_skills.append(skill)
+
+ return enriched_skills
+
+def _find_finding_for_skill(self, assessment: Assessment, skill: DiscoveredSkill) -> Finding | None:
+ """Find the Finding that generated a skill."""
+ for finding in assessment.findings:
+ if finding.attribute.id == skill.source_attribute_id:
+ return finding
+ return None
+
+# Modify run_full_workflow to pass through LLM params
+def run_full_workflow(
+ self,
+ assessment_file: Path,
+ output_format: str = "all",
+ attribute_ids: list[str] | None = None,
+ enable_llm: bool = False,
+ llm_budget: int = 5
+) -> dict:
+ """Run complete learning workflow: extract + generate.
+
+ Args:
+ assessment_file: Path to assessment JSON
+ output_format: Format for generated skills
+ attribute_ids: Optional specific attributes to extract
+ enable_llm: Enable LLM enrichment
+ llm_budget: Max skills to enrich with LLM
+
+ Returns:
+ Dictionary with workflow results
+ """
+ # Extract patterns
+ skills = self.extract_patterns_from_file(
+ assessment_file,
+ attribute_ids,
+ enable_llm=enable_llm,
+ llm_budget=llm_budget
+ )
+
+ # ... rest of method unchanged ...
+```
+
+---
+
+### Phase 3: CLI Enhancement
+
+**File**: `src/agentready/cli/learn.py` (modifications)
+
+```python
+# Add new options to learn command
+@click.option(
+ "--enable-llm",
+ is_flag=True,
+ help="Enable LLM-powered skill enrichment (requires ANTHROPIC_API_KEY)",
+)
+@click.option(
+ "--llm-budget",
+ type=int,
+ default=5,
+ help="Maximum number of skills to enrich with LLM (default: 5)",
+)
+@click.option(
+ "--llm-no-cache",
+ is_flag=True,
+ help="Bypass LLM response cache (always call API)",
+)
+def learn(repository, output_format, output_dir, attribute, min_confidence, verbose, enable_llm, llm_budget, llm_no_cache):
+ """Extract reusable patterns and generate Claude Code skills.
+
+ ... existing docstring ...
+ """
+
+ # ... existing setup code ...
+
+ # Display LLM status in header
+ if enable_llm:
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
+ if api_key:
+ click.echo(f"LLM enrichment: ENABLED (budget: {llm_budget} skills)")
+ if llm_no_cache:
+ click.echo("LLM cache: DISABLED")
+ else:
+ click.echo("⚠️ LLM enrichment: DISABLED (ANTHROPIC_API_KEY not set)")
+ enable_llm = False
+
+ # Run learning workflow with LLM params
+ try:
+ results = learning_service.run_full_workflow(
+ assessment_file=assessment_file,
+ output_format=output_format,
+ attribute_ids=list(attribute) if attribute else None,
+ enable_llm=enable_llm,
+ llm_budget=llm_budget,
+ )
+ except Exception as e:
+ click.echo(f"\nError during learning: {str(e)}", err=True)
+ if verbose:
+ import traceback
+ traceback.print_exc()
+ sys.exit(1)
+
+ # ... existing results display ...
+
+ # Show LLM info if used
+ if enable_llm and results["skills_discovered"] > 0:
+ enriched_count = min(llm_budget, results["skills_discovered"])
+ click.echo(f"\n🤖 LLM-enriched {enriched_count} skill(s)")
+```
+
+---
+
+### Phase 4: Testing
+
+**File**: `tests/unit/learners/test_llm_enricher.py`
+
+```python
+"""Tests for LLM enrichment functionality."""
+
+import json
+import pytest
+from pathlib import Path
+from unittest.mock import Mock, patch, MagicMock
+from anthropic import Anthropic
+
+from agentready.learners.llm_enricher import LLMEnricher
+from agentready.models import DiscoveredSkill, Repository, Finding, Attribute, Citation
+
+@pytest.fixture
+def mock_anthropic_client():
+ """Mock Anthropic client."""
+ client = Mock(spec=Anthropic)
+
+ # Mock response
+ mock_response = Mock()
+ mock_response.content = [Mock(text=json.dumps({
+ "skill_description": "Enhanced description from LLM",
+ "instructions": [
+ "Step 1: Do something specific",
+ "Step 2: Verify it worked",
+ "Step 3: Commit the changes"
+ ],
+ "code_examples": [
+ {
+ "file_path": "src/example.py",
+ "code": "def example():\n pass",
+ "explanation": "This shows the pattern"
+ }
+ ],
+ "best_practices": [
+ "Always use type hints",
+ "Test your code"
+ ],
+ "anti_patterns": [
+ "Don't use global variables",
+ "Avoid mutable defaults"
+ ]
+ }))]
+
+ client.messages.create.return_value = mock_response
+ return client
+
+@pytest.fixture
+def basic_skill():
+ """Basic skill from heuristic extraction."""
+ return DiscoveredSkill(
+ skill_id="test-skill",
+ name="Test Skill",
+ description="Basic description",
+ confidence=95.0,
+ source_attribute_id="test_attribute",
+ reusability_score=100.0,
+ impact_score=50.0,
+ pattern_summary="Test pattern",
+ code_examples=["Basic example"],
+ citations=[]
+ )
+
+@pytest.fixture
+def sample_repository(tmp_path):
+ """Sample repository."""
+ repo_path = tmp_path / "test-repo"
+ repo_path.mkdir()
+
+ # Create .git directory
+ (repo_path / ".git").mkdir()
+
+ # Create a sample file
+ (repo_path / "test.py").write_text("def test():\n pass")
+
+ return Repository(
+ path=repo_path,
+ name="test-repo",
+ url=None,
+ branch="main",
+ commit_hash="abc123",
+ languages={"Python": 1},
+ total_files=1,
+ total_lines=2
+ )
+
+@pytest.fixture
+def sample_finding():
+ """Sample finding."""
+ attr = Attribute(
+ id="test_attribute",
+ name="Test Attribute",
+ category="Testing",
+ tier=1,
+ description="A test attribute",
+ criteria="Must pass",
+ default_weight=1.0
+ )
+
+ return Finding(
+ attribute=attr,
+ status="pass",
+ score=95.0,
+ measured_value="passing",
+ threshold="pass",
+ evidence=["Test evidence 1", "Test evidence 2"],
+ remediation=None,
+ error_message=None
+ )
+
+def test_enrich_skill_success(mock_anthropic_client, basic_skill, sample_repository, sample_finding, tmp_path):
+ """Test successful skill enrichment."""
+ cache_dir = tmp_path / "cache"
+ enricher = LLMEnricher(mock_anthropic_client, cache_dir=cache_dir)
+
+ enriched = enricher.enrich_skill(basic_skill, sample_repository, sample_finding)
+
+ # Verify API was called
+ assert mock_anthropic_client.messages.create.called
+
+ # Verify enrichment
+ assert enriched.description == "Enhanced description from LLM"
+ assert len(enriched.code_examples) > len(basic_skill.code_examples)
+
+def test_enrich_skill_uses_cache(mock_anthropic_client, basic_skill, sample_repository, sample_finding, tmp_path):
+ """Test that second enrichment uses cache."""
+ cache_dir = tmp_path / "cache"
+ enricher = LLMEnricher(mock_anthropic_client, cache_dir=cache_dir)
+
+ # First call
+ enriched1 = enricher.enrich_skill(basic_skill, sample_repository, sample_finding)
+ first_call_count = mock_anthropic_client.messages.create.call_count
+
+ # Second call (should use cache)
+ enriched2 = enricher.enrich_skill(basic_skill, sample_repository, sample_finding)
+ second_call_count = mock_anthropic_client.messages.create.call_count
+
+ # Verify cache was used
+ assert second_call_count == first_call_count
+
+def test_enrich_skill_api_error_fallback(basic_skill, sample_repository, sample_finding, tmp_path):
+ """Test fallback to original skill on API error."""
+ client = Mock(spec=Anthropic)
+ client.messages.create.side_effect = Exception("API Error")
+
+ cache_dir = tmp_path / "cache"
+ enricher = LLMEnricher(client, cache_dir=cache_dir)
+
+ enriched = enricher.enrich_skill(basic_skill, sample_repository, sample_finding)
+
+ # Should return original skill
+ assert enriched.skill_id == basic_skill.skill_id
+ assert enriched.description == basic_skill.description
+```
+
+---
+
+## Dependencies
+
+Add to `pyproject.toml`:
+
+```toml
+dependencies = [
+ "anthropic>=0.74.0",
+ # ... existing dependencies
+]
+```
+
+---
+
+## Success Criteria
+
+1. **LLM Integration Works**:
+ - `agentready learn . --enable-llm` successfully calls Claude API
+ - Enriched skills have detailed instructions (5-10 steps)
+ - Code examples include real file paths from repository
+
+2. **Caching Works**:
+ - First run calls API
+ - Second run uses cache (verify no API calls)
+ - Cache respects 7-day TTL
+
+3. **Graceful Fallback**:
+ - Works without `ANTHROPIC_API_KEY` (uses heuristics)
+ - API errors don't crash, fallback to heuristic skills
+ - Rate limit errors retry with backoff
+
+4. **CLI Integration**:
+ - `--enable-llm` flag works
+ - `--llm-budget` limits enrichment count
+ - Verbose output shows which skills were enriched
+
+5. **Test Coverage**:
+ - Unit tests pass with mocked Anthropic client
+ - Integration test enriches at least 1 skill
+ - Tests cover cache hit/miss, API errors, fallback
+
+---
+
+## Example Usage
+
+```bash
+# Set API key
+export ANTHROPIC_API_KEY=sk-ant-api03-...
+
+# Run assessment
+agentready assess .
+
+# Extract skills with LLM enrichment
+agentready learn . --enable-llm --llm-budget 5 --verbose
+
+# Expected output:
+# 🧠 AgentReady Learning Loop
+# ==================================================
+# Repository: /Users/jeder/repos/agentready
+# LLM enrichment: ENABLED (budget: 5 skills)
+#
+# Enriching skill 1/5: setup-claude-md... ✓
+# Enriching skill 2/5: implement-type-annotations... ✓ (cached)
+#
+# ==================================================
+# ✅ Discovered 5 skill(s) with confidence ≥70%
+# 🤖 LLM-enriched 5 skill(s)
+```
+
+---
+
+## Implementation Checklist
+
+- [ ] Add `anthropic>=0.74.0` to pyproject.toml
+- [ ] Create `src/agentready/learners/prompt_templates.py`
+- [ ] Create `src/agentready/learners/code_sampler.py`
+- [ ] Create `src/agentready/services/llm_cache.py`
+- [ ] Create `src/agentready/learners/llm_enricher.py`
+- [ ] Modify `src/agentready/services/learning_service.py`
+- [ ] Modify `src/agentready/cli/learn.py`
+- [ ] Create `tests/unit/learners/test_llm_enricher.py`
+- [ ] Run linters (black, isort, ruff)
+- [ ] Test on AgentReady repository (dogfooding)
+- [ ] Update CLAUDE.md with LLM enrichment documentation
+- [ ] Update README.md with API key setup instructions
+
+---
+
+## Notes
+
+- **Cost**: Not tracking tokens/cost in this version to simplify implementation
+- **Model**: Using `claude-sonnet-4-5-20250929` (latest Sonnet 4.5)
+- **Rate Limiting**: Basic exponential backoff on RateLimitError
+- **Caching**: Simple file-based cache with 7-day TTL
+- **Code Sampling**: Limits to 5 files, 100 lines per file to manage token usage
+- **Fallback**: Always preserves heuristic behavior if LLM fails
+
+---
+
+**Ready for Implementation**: This spec provides all necessary code, architecture decisions, and implementation steps to add Claude API support to AgentReady's continuous learning loop.
diff --git a/src/agentready/cli/demo.py b/src/agentready/cli/demo.py
new file mode 100644
index 0000000..dea0475
--- /dev/null
+++ b/src/agentready/cli/demo.py
@@ -0,0 +1,562 @@
+"""Demo command for showcasing AgentReady capabilities."""
+
+import sys
+import tempfile
+import time
+import webbrowser
+from pathlib import Path
+
+import click
+
+from ..services.scanner import Scanner
+
+
+def create_demo_repository(demo_path: Path, language: str = "python") -> None:
+ """Create a sample repository for demonstration.
+
+ Args:
+ demo_path: Path where demo repo should be created
+ language: Language for demo repo (python, javascript, go)
+ """
+ demo_path.mkdir(parents=True, exist_ok=True)
+
+ if language == "python":
+ # Create basic Python project structure
+ src_dir = demo_path / "src" / "demoapp"
+ src_dir.mkdir(parents=True, exist_ok=True)
+
+ tests_dir = demo_path / "tests"
+ tests_dir.mkdir(parents=True, exist_ok=True)
+
+ # Create README.md
+ readme_content = """# Demo Python Project
+
+A sample Python application demonstrating AgentReady assessment.
+
+## Overview
+
+This is a minimal Python project created to showcase the AgentReady tool's
+capabilities in assessing repository quality for AI-assisted development.
+
+## Features
+
+- Basic Python package structure
+- Simple module with type annotations
+- Test coverage
+- Git repository
+
+## Installation
+
+```bash
+pip install -e .
+```
+
+## Usage
+
+```python
+from demoapp import greet
+
+greet("World")
+```
+
+## Testing
+
+```bash
+pytest
+```
+"""
+ (demo_path / "README.md").write_text(readme_content)
+
+ # Create CLAUDE.md (for high score on that attribute)
+ claude_md_content = """# Demo Python Project - AI Assistant Guide
+
+## Overview
+
+This is a demonstration project for the AgentReady assessment tool.
+
+## Project Structure
+
+```
+demo-repo/
+├── src/demoapp/ # Main application code
+├── tests/ # Test suite
+├── README.md # User documentation
+└── pyproject.toml # Python package configuration
+```
+
+## Development
+
+### Setup
+
+```bash
+# Create virtual environment
+python -m venv venv
+source venv/bin/activate
+
+# Install dependencies
+pip install -e .
+```
+
+### Running Tests
+
+```bash
+pytest
+```
+
+## Architecture
+
+The project uses a simple module structure with type-annotated functions
+for better IDE support and AI code generation.
+"""
+ (demo_path / "CLAUDE.md").write_text(claude_md_content)
+
+ # Create main module with type annotations
+ main_py_content = '''"""Main module for demo application."""
+
+
+def greet(name: str) -> str:
+ """Generate a greeting message.
+
+ Args:
+ name: Name of the person to greet
+
+ Returns:
+ Greeting message
+ """
+ return f"Hello, {name}!"
+
+
+def add_numbers(a: int, b: int) -> int:
+ """Add two numbers together.
+
+ Args:
+ a: First number
+ b: Second number
+
+ Returns:
+ Sum of a and b
+ """
+ return a + b
+
+
+def main() -> None:
+ """Main entry point."""
+ print(greet("World"))
+ print(f"2 + 2 = {add_numbers(2, 2)}")
+
+
+if __name__ == "__main__":
+ main()
+'''
+ (src_dir / "__init__.py").write_text(
+ '"""Demo application package."""\n\nfrom .main import greet, add_numbers\n\n__all__ = ["greet", "add_numbers"]\n'
+ )
+ (src_dir / "main.py").write_text(main_py_content)
+
+ # Create test file
+ test_content = '''"""Tests for demo application."""
+
+import pytest
+
+from demoapp import greet, add_numbers
+
+
+def test_greet():
+ """Test greet function."""
+ assert greet("Alice") == "Hello, Alice!"
+ assert greet("Bob") == "Hello, Bob!"
+
+
+def test_add_numbers():
+ """Test add_numbers function."""
+ assert add_numbers(2, 2) == 4
+ assert add_numbers(-1, 1) == 0
+ assert add_numbers(0, 0) == 0
+'''
+ (tests_dir / "test_main.py").write_text(test_content)
+ (tests_dir / "__init__.py").write_text("")
+
+ # Create pyproject.toml
+ pyproject_content = """[project]
+name = "demoapp"
+version = "0.1.0"
+description = "Demo application for AgentReady"
+requires-python = ">=3.11"
+
+[build-system]
+requires = ["setuptools>=68.0.0"]
+build-backend = "setuptools.build_meta"
+
+[tool.setuptools]
+package-dir = {"" = "src"}
+packages = ["demoapp"]
+"""
+ (demo_path / "pyproject.toml").write_text(pyproject_content)
+
+ # Create .gitignore
+ gitignore_content = """# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Virtual environments
+venv/
+ENV/
+env/
+
+# IDE
+.vscode/
+.idea/
+*.swp
+*.swo
+
+# Testing
+.pytest_cache/
+.coverage
+htmlcov/
+
+# OS
+.DS_Store
+Thumbs.db
+"""
+ (demo_path / ".gitignore").write_text(gitignore_content)
+
+ elif language == "javascript":
+ # Create basic JavaScript/Node.js project
+ src_dir = demo_path / "src"
+ src_dir.mkdir(parents=True, exist_ok=True)
+
+ # README
+ readme = """# Demo JavaScript Project
+
+A sample Node.js application for AgentReady demonstration.
+
+## Installation
+
+```bash
+npm install
+```
+
+## Usage
+
+```javascript
+const { greet } = require('./src/index');
+console.log(greet('World'));
+```
+"""
+ (demo_path / "README.md").write_text(readme)
+
+ # package.json
+ package_json = """{
+ "name": "demo-js-app",
+ "version": "0.1.0",
+ "description": "Demo JavaScript app for AgentReady",
+ "main": "src/index.js",
+ "scripts": {
+ "test": "echo \\"No tests yet\\""
+ },
+ "keywords": ["demo"],
+ "author": "",
+ "license": "MIT"
+}
+"""
+ (demo_path / "package.json").write_text(package_json)
+
+ # Main JS file
+ index_js = """/**
+ * Demo JavaScript application
+ */
+
+function greet(name) {
+ return `Hello, ${name}!`;
+}
+
+function addNumbers(a, b) {
+ return a + b;
+}
+
+module.exports = { greet, addNumbers };
+"""
+ (src_dir / "index.js").write_text(index_js)
+
+ # .gitignore
+ gitignore = """node_modules/
+.DS_Store
+*.log
+"""
+ (demo_path / ".gitignore").write_text(gitignore)
+
+ # Initialize git repository
+ import git
+
+ repo = git.Repo.init(demo_path)
+ repo.index.add(["*"])
+ repo.index.commit("Initial commit - Demo repository for AgentReady")
+
+
+@click.command()
+@click.option(
+ "--language",
+ type=click.Choice(["python", "javascript"], case_sensitive=False),
+ default="python",
+ help="Language for demo repository (default: python)",
+)
+@click.option(
+ "--no-browser",
+ is_flag=True,
+ help="Don't open HTML report in browser automatically",
+)
+@click.option(
+ "--keep-repo",
+ is_flag=True,
+ help="Keep demo repository after assessment (for debugging)",
+)
+def demo(language, no_browser, keep_repo):
+ """Run an automated demonstration of AgentReady.
+
+ Creates a sample repository, runs a full assessment, generates reports,
+ and displays the results. Perfect for presentations, demos, and onboarding.
+
+ Examples:
+
+ \b
+ # Run Python demo (default)
+ agentready demo
+
+ \b
+ # Run JavaScript demo
+ agentready demo --language javascript
+
+ \b
+ # Run without opening browser
+ agentready demo --no-browser
+ """
+ click.echo("🤖 AgentReady Demo")
+ click.echo("=" * 60)
+ click.echo()
+
+ # Create temporary directory for demo repo
+ temp_dir = tempfile.mkdtemp(prefix="agentready-demo-")
+ demo_repo_path = Path(temp_dir) / "demo-repo"
+
+ try:
+ # Step 1: Create sample repository
+ click.echo("📦 Creating sample repository...")
+ time.sleep(0.3) # Dramatic pause
+ create_demo_repository(demo_repo_path, language)
+ click.echo(f" ✓ Sample {language} project created")
+ click.echo()
+
+ # Step 2: Initialize scanner
+ click.echo("🔍 Analyzing repository structure...")
+ time.sleep(0.3)
+ scanner = Scanner(demo_repo_path, config=None)
+ click.echo(" ✓ Repository validated")
+ click.echo()
+
+ # Step 3: Run assessment
+ click.echo("⚙️ Running 25 attribute assessments...")
+ click.echo()
+
+ # Import assessors here to avoid circular import
+ from ..assessors.code_quality import (
+ CyclomaticComplexityAssessor,
+ TypeAnnotationsAssessor,
+ )
+ from ..assessors.documentation import CLAUDEmdAssessor, READMEAssessor
+ from ..assessors.structure import StandardLayoutAssessor
+ from ..assessors.stub_assessors import (
+ ConventionalCommitsAssessor,
+ GitignoreAssessor,
+ LockFilesAssessor,
+ create_stub_assessors,
+ )
+ from ..assessors.testing import PreCommitHooksAssessor, TestCoverageAssessor
+
+ # Create all 25 assessors
+ assessors = [
+ # Tier 1 Essential (5 assessors)
+ CLAUDEmdAssessor(),
+ READMEAssessor(),
+ TypeAnnotationsAssessor(),
+ StandardLayoutAssessor(),
+ LockFilesAssessor(),
+ # Tier 2 Critical (10 assessors - 3 implemented, 7 stubs)
+ TestCoverageAssessor(),
+ PreCommitHooksAssessor(),
+ ConventionalCommitsAssessor(),
+ GitignoreAssessor(),
+ CyclomaticComplexityAssessor(), # Actually Tier 3, but including here
+ ]
+ # Add remaining stub assessors
+ assessors.extend(create_stub_assessors())
+
+ # Show progress with actual assessor execution
+ start_time = time.time()
+
+ # Build repository model
+ repository = scanner._build_repository_model(verbose=False)
+
+ # Execute assessors with live progress
+ findings = []
+ for i, assessor in enumerate(assessors, 1):
+ attr_id = assessor.attribute_id
+ click.echo(f" [{i:2d}/25] {attr_id:30s} ", nl=False)
+
+ finding = scanner._execute_assessor(assessor, repository, verbose=False)
+ findings.append(finding)
+
+ # Show result with color
+ if finding.status == "pass":
+ click.secho(f"✓ PASS ({finding.score:.0f})", fg="green")
+ elif finding.status == "fail":
+ click.secho(f"✗ FAIL ({finding.score:.0f})", fg="red")
+ elif finding.status == "skipped":
+ click.secho("⊘ SKIP", fg="yellow")
+ elif finding.status == "not_applicable":
+ click.secho("- N/A", fg="bright_black")
+ else:
+ click.secho(f"? {finding.status.upper()}", fg="yellow")
+
+ time.sleep(0.05) # Small delay for visual effect
+
+ duration = time.time() - start_time
+
+ # Step 4: Calculate scores
+ click.echo()
+ click.echo("📊 Calculating scores...")
+ time.sleep(0.2)
+
+ from ..services.scorer import Scorer
+
+ scorer = Scorer()
+ overall_score = scorer.calculate_overall_score(findings, None)
+ certification_level = scorer.determine_certification_level(overall_score)
+ assessed, skipped = scorer.count_assessed_attributes(findings)
+
+ click.echo()
+ click.echo("=" * 60)
+ click.echo()
+ click.echo("Assessment Complete!")
+ click.echo()
+
+ # Display score with color based on level
+ score_color = "green" if overall_score >= 75 else "yellow" if overall_score >= 60 else "red"
+ click.echo(f" Overall Score: ", nl=False)
+ click.secho(f"{overall_score:.1f}/100", fg=score_color, bold=True)
+ click.echo(f" Certification: ", nl=False)
+ click.secho(certification_level, fg=score_color, bold=True)
+ click.echo(f" Assessed: {assessed}/25 attributes")
+ click.echo(f" Skipped: {skipped} attributes")
+ click.echo(f" Duration: {duration:.1f}s")
+ click.echo()
+
+ # Step 5: Generate reports
+ click.echo("📄 Generating reports...")
+ time.sleep(0.3)
+
+ from datetime import datetime
+
+ from ..models.assessment import Assessment
+ from ..reporters.html import HTMLReporter
+ from ..reporters.markdown import MarkdownReporter
+
+ # Create assessment object
+ assessment = Assessment(
+ repository=repository,
+ timestamp=datetime.now(),
+ overall_score=overall_score,
+ certification_level=certification_level,
+ attributes_assessed=assessed,
+ attributes_skipped=skipped,
+ attributes_total=len(findings),
+ findings=findings,
+ config=None,
+ duration_seconds=round(duration, 1),
+ )
+
+ # Create output directory in current directory
+ output_dir = Path.cwd() / ".agentready-demo"
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ # Generate reports
+ timestamp = assessment.timestamp.strftime("%Y%m%d-%H%M%S")
+
+ html_reporter = HTMLReporter()
+ html_file = output_dir / f"demo-report-{timestamp}.html"
+ html_reporter.generate(assessment, html_file)
+ click.echo(f" ✓ HTML report: {html_file}")
+
+ markdown_reporter = MarkdownReporter()
+ md_file = output_dir / f"demo-report-{timestamp}.md"
+ markdown_reporter.generate(assessment, md_file)
+ click.echo(f" ✓ Markdown report: {md_file}")
+
+ import json
+
+ json_file = output_dir / f"demo-assessment-{timestamp}.json"
+ with open(json_file, "w", encoding="utf-8") as f:
+ json.dump(assessment.to_dict(), f, indent=2)
+ click.echo(f" ✓ JSON assessment: {json_file}")
+
+ click.echo()
+ click.echo("=" * 60)
+ click.echo()
+
+ # Step 6: Open browser
+ if not no_browser:
+ click.echo("🌐 Opening HTML report in browser...")
+ time.sleep(0.2)
+ try:
+ webbrowser.open(html_file.as_uri())
+ click.echo(" ✓ Browser opened")
+ except Exception as e:
+ click.echo(f" ⚠ Could not open browser: {e}", err=True)
+ click.echo(f" Open manually: {html_file}")
+
+ click.echo()
+ click.secho("✅ Demo complete!", fg="green", bold=True)
+ click.echo()
+ click.echo("Next steps:")
+ click.echo(f" • View HTML report: {html_file}")
+ click.echo(f" • View Markdown report: {md_file}")
+ click.echo(f" • Assess your own repo: agentready assess /path/to/repo")
+ click.echo()
+
+ if keep_repo:
+ click.echo(f"Demo repository saved at: {demo_repo_path}")
+
+ except Exception as e:
+ click.echo()
+ click.secho(f"❌ Error during demo: {str(e)}", fg="red", err=True)
+ import traceback
+
+ traceback.print_exc()
+ sys.exit(1)
+
+ finally:
+ # Clean up temporary directory unless --keep-repo
+ if not keep_repo:
+ import shutil
+
+ try:
+ shutil.rmtree(temp_dir)
+ except Exception:
+ pass # Best effort cleanup
diff --git a/src/agentready/cli/learn.py b/src/agentready/cli/learn.py
new file mode 100644
index 0000000..b46cb5e
--- /dev/null
+++ b/src/agentready/cli/learn.py
@@ -0,0 +1,241 @@
+"""Learn command for extracting patterns and generating skills."""
+
+import os
+import sys
+from pathlib import Path
+
+import click
+
+from ..services.learning_service import LearningService
+
+
+@click.command()
+@click.argument("repository", type=click.Path(exists=True), default=".")
+@click.option(
+ "--output-format",
+ type=click.Choice(
+ ["json", "skill_md", "github_issues", "markdown", "all"], case_sensitive=False
+ ),
+ default="json",
+ help="Output format for discovered skills (default: json)",
+)
+@click.option(
+ "--output-dir",
+ type=click.Path(),
+ default=".skills-proposals",
+ help="Directory for generated skill files (default: .skills-proposals)",
+)
+@click.option(
+ "--attribute",
+ multiple=True,
+ help="Specific attribute(s) to extract (can be specified multiple times)",
+)
+@click.option(
+ "--min-confidence",
+ type=int,
+ default=70,
+ help="Minimum confidence score to include skills (default: 70)",
+)
+@click.option(
+ "--verbose",
+ "-v",
+ is_flag=True,
+ help="Enable verbose output with detailed skill information",
+)
+@click.option(
+ "--enable-llm",
+ is_flag=True,
+ help="Enable LLM-powered skill enrichment (requires ANTHROPIC_API_KEY)",
+)
+@click.option(
+ "--llm-budget",
+ type=int,
+ default=5,
+ help="Maximum number of skills to enrich with LLM (default: 5)",
+)
+@click.option(
+ "--llm-no-cache",
+ is_flag=True,
+ help="Bypass LLM response cache (always call API)",
+)
+def learn(
+ repository,
+ output_format,
+ output_dir,
+ attribute,
+ min_confidence,
+ verbose,
+ enable_llm,
+ llm_budget,
+ llm_no_cache,
+):
+ """Extract reusable patterns and generate Claude Code skills.
+
+ Analyzes assessment results to identify successful patterns that could
+ be extracted as reusable Claude Code skills for other repositories.
+
+ This command looks for the most recent assessment in .agentready/ and
+ extracts skills from high-scoring attributes (default: ≥70% confidence).
+
+ REPOSITORY: Path to repository (default: current directory)
+
+ Examples:
+
+ \b
+ # Discover skills from current repository
+ agentready learn .
+
+ \b
+ # Generate SKILL.md files
+ agentready learn . --output-format skill_md
+
+ \b
+ # Create GitHub issue templates
+ agentready learn . --output-format github_issues
+
+ \b
+ # Extract specific attributes only
+ agentready learn . --attribute claude_md_file --attribute type_annotations
+
+ \b
+ # Generate all formats with higher confidence threshold
+ agentready learn . --output-format all --min-confidence 85
+ """
+ repo_path = Path(repository).resolve()
+
+ # Validate repository exists
+ if not repo_path.exists():
+ click.echo(f"Error: Repository not found: {repo_path}", err=True)
+ sys.exit(1)
+
+ # Find latest assessment file
+ agentready_dir = repo_path / ".agentready"
+ if not agentready_dir.exists():
+ click.echo(
+ "Error: No assessment found in .agentready/\n"
+ "Run 'agentready assess .' first to generate an assessment.",
+ err=True,
+ )
+ sys.exit(1)
+
+ # Look for assessment files
+ assessment_files = sorted(agentready_dir.glob("assessment-*.json"))
+ if not assessment_files:
+ click.echo(
+ "Error: No assessment files found in .agentready/\n"
+ "Run 'agentready assess .' first to generate an assessment.",
+ err=True,
+ )
+ sys.exit(1)
+
+ # Use most recent assessment
+ assessment_file = assessment_files[-1]
+
+ # Display header
+ click.echo("🧠 AgentReady Learning Loop")
+ click.echo("=" * 50)
+ click.echo(f"\nRepository: {repo_path}")
+ click.echo(f"Assessment: {assessment_file.name}")
+ click.echo(f"Output format: {output_format}")
+ click.echo(f"Min confidence: {min_confidence}%")
+ if attribute:
+ click.echo(f"Filtering attributes: {', '.join(attribute)}")
+
+ # Display LLM status
+ if enable_llm:
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
+ if api_key:
+ click.echo(f"LLM enrichment: ENABLED (budget: {llm_budget} skills)")
+ if llm_no_cache:
+ click.echo("LLM cache: DISABLED")
+ else:
+ click.echo("⚠️ LLM enrichment: DISABLED (ANTHROPIC_API_KEY not set)")
+ enable_llm = False
+ click.echo()
+
+ # Create learning service
+ learning_service = LearningService(
+ min_confidence=min_confidence,
+ output_dir=output_dir,
+ )
+
+ # Run learning workflow
+ try:
+ results = learning_service.run_full_workflow(
+ assessment_file=assessment_file,
+ output_format=output_format,
+ attribute_ids=list(attribute) if attribute else None,
+ enable_llm=enable_llm,
+ llm_budget=llm_budget,
+ )
+ except Exception as e:
+ click.echo(f"\nError during learning: {str(e)}", err=True)
+ if verbose:
+ import traceback
+
+ traceback.print_exc()
+ sys.exit(1)
+
+ # Display results
+ skills_count = results["skills_discovered"]
+ generated_files = results["generated_files"]
+
+ click.echo("=" * 50)
+ click.echo(
+ f"\n✅ Discovered {skills_count} skill(s) with confidence ≥{min_confidence}%\n"
+ )
+
+ # Show LLM info if used
+ if enable_llm and skills_count > 0:
+ enriched_count = min(llm_budget, skills_count)
+ click.echo(f"🤖 LLM-enriched {enriched_count} skill(s)\n")
+
+ if skills_count == 0:
+ click.echo("No skills met the confidence threshold.")
+ click.echo(
+ f"Try lowering --min-confidence (current: {min_confidence}) "
+ "or run assessment on a higher-scoring repository."
+ )
+ return
+
+ # Display discovered skills
+ if verbose:
+ click.echo("Discovered Skills:")
+ click.echo("-" * 50)
+ for skill in results["skills"]:
+ click.echo(f"\n📚 {skill.name}")
+ click.echo(f" ID: {skill.skill_id}")
+ click.echo(f" Confidence: {skill.confidence}%")
+ click.echo(f" Impact: +{skill.impact_score} pts")
+ click.echo(f" Reusability: {skill.reusability_score}%")
+ click.echo(f" Source: {skill.source_attribute_id}")
+ click.echo(f"\n {skill.pattern_summary}")
+ click.echo()
+
+ # Display generated files
+ click.echo("\nGenerated Files:")
+ click.echo("-" * 50)
+ for file_path in generated_files:
+ click.echo(f" ✓ {file_path}")
+
+ # Next steps
+ click.echo("\n" + "=" * 50)
+ click.echo("\n📖 Next Steps:\n")
+
+ if output_format in ["skill_md", "all"]:
+ click.echo(" 1. Review generated SKILL.md files in " + output_dir)
+ click.echo(" 2. Test skills on 3-5 repositories")
+ click.echo(" 3. Refine instructions based on testing")
+ click.echo(" 4. Copy to ~/.claude/skills/ or .claude/skills/")
+
+ if output_format in ["github_issues", "all"]:
+ click.echo(f" 1. Review issue templates in {output_dir}")
+ click.echo(" 2. Create GitHub issues:")
+ click.echo(" gh issue create --body-file .skills-proposals/skill-*.md")
+
+ if output_format == "json":
+ click.echo(f" 1. Review discovered-skills.json in {output_dir}")
+ click.echo(" 2. Generate other formats:")
+ click.echo(" agentready learn . --output-format all")
+
+ click.echo()
diff --git a/src/agentready/cli/main.py b/src/agentready/cli/main.py
index f5bb1dc..582e8d4 100644
--- a/src/agentready/cli/main.py
+++ b/src/agentready/cli/main.py
@@ -27,6 +27,8 @@
from ..services.research_loader import ResearchLoader
from ..services.scanner import Scanner
from .bootstrap import bootstrap
+from .demo import demo
+from .learn import learn
def create_all_assessors():
@@ -117,7 +119,7 @@ def run_assessment(repository_path, verbose, output_dir, config_path):
repo_path = Path(repository_path).resolve()
if verbose:
- click.echo(f"AgentReady Repository Scorer")
+ click.echo("AgentReady Repository Scorer")
click.echo(f"{'=' * 50}\n")
# Load configuration if provided
@@ -200,7 +202,7 @@ def run_assessment(repository_path, verbose, output_dir, config_path):
if verbose:
click.echo(f"\n{'=' * 50}")
- click.echo(f"\nAssessment complete!")
+ click.echo("\nAssessment complete!")
click.echo(
f" Score: {assessment.overall_score:.1f}/100 ({assessment.certification_level})"
)
@@ -209,7 +211,7 @@ def run_assessment(repository_path, verbose, output_dir, config_path):
)
click.echo(f" Skipped: {assessment.attributes_skipped}")
click.echo(f" Duration: {assessment.duration_seconds:.1f}s")
- click.echo(f"\nReports generated:")
+ click.echo("\nReports generated:")
click.echo(f" JSON: {json_file}")
click.echo(f" HTML: {html_file}")
click.echo(f" Markdown: {markdown_file}")
@@ -280,8 +282,10 @@ def generate_config():
click.echo("Edit this file to customize weights and behavior.")
-# Register bootstrap command
+# Register commands
cli.add_command(bootstrap)
+cli.add_command(demo)
+cli.add_command(learn)
def show_version():
diff --git a/src/agentready/learners/__init__.py b/src/agentready/learners/__init__.py
new file mode 100644
index 0000000..28db74c
--- /dev/null
+++ b/src/agentready/learners/__init__.py
@@ -0,0 +1,16 @@
+"""LLM-powered pattern extraction and skill enrichment."""
+
+from .code_sampler import CodeSampler
+from .llm_enricher import LLMEnricher
+from .pattern_extractor import PatternExtractor
+from .prompt_templates import CODE_SAMPLING_GUIDANCE, PATTERN_EXTRACTION_PROMPT
+from .skill_generator import SkillGenerator
+
+__all__ = [
+ "CodeSampler",
+ "LLMEnricher",
+ "PatternExtractor",
+ "SkillGenerator",
+ "PATTERN_EXTRACTION_PROMPT",
+ "CODE_SAMPLING_GUIDANCE",
+]
diff --git a/src/agentready/learners/code_sampler.py b/src/agentready/learners/code_sampler.py
new file mode 100644
index 0000000..db5a2a8
--- /dev/null
+++ b/src/agentready/learners/code_sampler.py
@@ -0,0 +1,142 @@
+"""Smart code sampling from repositories for LLM analysis."""
+
+import logging
+from pathlib import Path
+
+from agentready.models import Finding, Repository
+
+logger = logging.getLogger(__name__)
+
+
+class CodeSampler:
+ """Extracts relevant code samples from repository for LLM analysis."""
+
+ # Mapping of attribute IDs to file patterns to sample
+ ATTRIBUTE_FILE_PATTERNS = {
+ "claude_md_file": ["CLAUDE.md"],
+ "readme_file": ["README.md"],
+ "type_annotations": ["**/*.py"], # Sample Python files
+ "pre_commit_hooks": [".pre-commit-config.yaml", ".github/workflows/*.yml"],
+ "standard_project_layout": [
+ "**/",
+ "src/",
+ "tests/",
+ "docs/",
+ ], # Directory structure
+ "lock_files": [
+ "requirements.txt",
+ "poetry.lock",
+ "package-lock.json",
+ "go.sum",
+ "Cargo.lock",
+ ],
+ "test_coverage": ["pytest.ini", "pyproject.toml", ".coveragerc"],
+ "conventional_commits": [".github/workflows/*.yml"], # CI configs
+ "gitignore": [".gitignore"],
+ }
+
+ def __init__(
+ self, repository: Repository, max_files: int = 5, max_lines_per_file: int = 100
+ ):
+ """Initialize code sampler.
+
+ Args:
+ repository: Repository to sample from
+ max_files: Maximum number of files to include
+ max_lines_per_file: Maximum lines per file to prevent token overflow
+ """
+ self.repository = repository
+ self.max_files = max_files
+ self.max_lines_per_file = max_lines_per_file
+
+ def get_relevant_code(self, finding: Finding) -> str:
+ """Get relevant code samples for a finding.
+
+ Args:
+ finding: The finding to get code for
+
+ Returns:
+ Formatted string with code samples
+ """
+ attribute_id = finding.attribute.id
+ patterns = self.ATTRIBUTE_FILE_PATTERNS.get(attribute_id, [])
+
+ if not patterns:
+ logger.warning(f"No file patterns defined for {attribute_id}")
+ return "No code samples available"
+
+ # Collect files matching patterns
+ files_to_sample = []
+ for pattern in patterns:
+ if pattern.endswith("/"):
+ # Directory listing
+ files_to_sample.append(self._get_directory_tree(pattern))
+ else:
+ # File pattern
+ matching_files = list(self.repository.path.glob(pattern))
+ files_to_sample.extend(matching_files[: self.max_files])
+
+ # Format as string
+ return self._format_code_samples(files_to_sample)
+
+ def _get_directory_tree(self, dir_pattern: str) -> dict:
+ """Get directory tree structure."""
+ base_path = self.repository.path / dir_pattern.rstrip("/")
+ if not base_path.exists():
+ return {}
+
+ tree = {
+ "type": "directory",
+ "path": str(base_path.relative_to(self.repository.path)),
+ "children": [],
+ }
+
+ for item in base_path.iterdir():
+ if item.is_file():
+ tree["children"].append({"type": "file", "name": item.name})
+ elif item.is_dir() and not item.name.startswith("."):
+ tree["children"].append({"type": "directory", "name": item.name})
+
+ return tree
+
+ def _format_code_samples(self, files: list) -> str:
+ """Format files as readable code samples."""
+ samples = []
+
+ for file_item in files[: self.max_files]:
+ if isinstance(file_item, dict):
+ # Directory tree
+ samples.append(f"## Directory Structure: {file_item['path']}\n")
+ samples.append(self._format_tree(file_item))
+ elif isinstance(file_item, Path):
+ # Regular file
+ try:
+ rel_path = file_item.relative_to(self.repository.path)
+ content = file_item.read_text(encoding="utf-8", errors="ignore")
+
+ # Truncate if too long
+ lines = content.splitlines()
+ if len(lines) > self.max_lines_per_file:
+ lines = lines[: self.max_lines_per_file]
+ lines.append("... (truncated)")
+
+ samples.append(f"## File: {rel_path}\n")
+ samples.append("```\n" + "\n".join(lines) + "\n```\n")
+
+ except Exception as e:
+ logger.warning(f"Could not read {file_item}: {e}")
+
+ return "\n".join(samples) if samples else "No code samples available"
+
+ def _format_tree(self, tree: dict, indent: int = 0) -> str:
+ """Format directory tree as text."""
+ lines = []
+ prefix = " " * indent
+
+ for child in tree.get("children", []):
+ if child["type"] == "file":
+ lines.append(f"{prefix}├── {child['name']}")
+ elif child["type"] == "directory":
+ lines.append(f"{prefix}├── {child['name']}/")
+
+ return "\n".join(lines)
diff --git a/src/agentready/learners/llm_enricher.py b/src/agentready/learners/llm_enricher.py
new file mode 100644
index 0000000..a5c1b2b
--- /dev/null
+++ b/src/agentready/learners/llm_enricher.py
@@ -0,0 +1,245 @@
+"""LLM-powered skill enrichment using Claude API."""
+
+import hashlib
+import json
+import logging
+from pathlib import Path
+from time import sleep
+
+from anthropic import Anthropic, APIError, RateLimitError
+
+from agentready.models import DiscoveredSkill, Finding, Repository
+from agentready.services.llm_cache import LLMCache
+
+from .code_sampler import CodeSampler
+from .prompt_templates import PATTERN_EXTRACTION_PROMPT
+
+logger = logging.getLogger(__name__)
+
+
+class LLMEnricher:
+ """Enriches discovered skills using Claude API."""
+
+ def __init__(
+ self,
+ client: Anthropic,
+ cache_dir: Path | None = None,
+ model: str = "claude-sonnet-4-5-20250929",
+ ):
+ """Initialize LLM enricher.
+
+ Args:
+ client: Anthropic API client
+ cache_dir: Cache directory (default: .agentready/llm-cache)
+ model: Claude model to use
+ """
+ self.client = client
+ self.model = model
+ self.cache = LLMCache(cache_dir or Path(".agentready/llm-cache"))
+ self.code_sampler = None # Set per-repository
+
+ def enrich_skill(
+ self,
+ skill: DiscoveredSkill,
+ repository: Repository,
+ finding: Finding,
+ use_cache: bool = True,
+ ) -> DiscoveredSkill:
+ """Enrich skill with LLM-generated content.
+
+ Args:
+ skill: Basic skill from heuristic extraction
+ repository: Repository being assessed
+ finding: Finding that generated this skill
+ use_cache: Whether to use cached responses
+
+ Returns:
+ Enriched DiscoveredSkill with LLM-generated content
+ """
+ # Generate cache key
+ evidence_str = "".join(finding.evidence) if finding.evidence else ""
+ evidence_hash = hashlib.sha256(evidence_str.encode()).hexdigest()[:16]
+ cache_key = LLMCache.generate_key(skill.skill_id, finding.score, evidence_hash)
+
+ # Check cache first
+ if use_cache:
+ cached = self.cache.get(cache_key)
+ if cached:
+ logger.info(f"Using cached enrichment for {skill.skill_id}")
+ return cached
+
+ # Initialize code sampler for this repository
+ self.code_sampler = CodeSampler(repository)
+
+ # Get relevant code samples
+ code_samples = self.code_sampler.get_relevant_code(finding)
+
+ # Call Claude API
+ try:
+ enrichment_data = self._call_claude_api(
+ skill, finding, repository, code_samples
+ )
+
+ # Merge enrichment into skill
+ enriched_skill = self._merge_enrichment(skill, enrichment_data)
+
+ # Cache result
+ if use_cache:
+ self.cache.set(cache_key, enriched_skill)
+
+ logger.info(f"Successfully enriched {skill.skill_id}")
+ return enriched_skill
+
+ except RateLimitError as e:
+ logger.warning(f"Rate limit hit for {skill.skill_id}: {e}")
+ # Exponential backoff
+ retry_after = int(getattr(e, "retry_after", 60))
+ logger.info(f"Retrying after {retry_after} seconds...")
+ sleep(retry_after)
+ return self.enrich_skill(skill, repository, finding, use_cache)
+
+ except APIError as e:
+ logger.error(f"API error enriching {skill.skill_id}: {e}")
+ return skill # Fallback to original heuristic skill
+
+ except Exception as e:
+ logger.error(f"Unexpected error enriching {skill.skill_id}: {e}")
+ return skill # Fallback to original heuristic skill
+
+ def _call_claude_api(
+ self,
+ skill: DiscoveredSkill,
+ finding: Finding,
+ repository: Repository,
+ code_samples: str,
+ ) -> dict:
+ """Call Claude API for pattern extraction.
+
+ Args:
+ skill: Basic skill
+ finding: Associated finding
+ repository: Repository context
+ code_samples: Code samples from repository
+
+ Returns:
+ Parsed JSON response from Claude
+ """
+ # Build prompt
+ prompt = PATTERN_EXTRACTION_PROMPT.format(
+ repo_name=repository.name,
+ attribute_name=finding.attribute.name,
+ attribute_description=finding.attribute.description,
+ tier=finding.attribute.tier,
+ score=finding.score,
+ primary_language=getattr(repository, "primary_language", "Unknown"),
+ evidence=(
+ "\n".join(finding.evidence)
+ if finding.evidence
+ else "No evidence available"
+ ),
+ code_samples=code_samples,
+ )
+
+ # Call API
+ response = self.client.messages.create(
+ model=self.model,
+ max_tokens=4096,
+ messages=[{"role": "user", "content": prompt}],
+ )
+
+ # Parse response
+ response_text = response.content[0].text
+
+ # Extract JSON (handle markdown code blocks if present)
+ if "```json" in response_text:
+ json_start = response_text.find("```json") + 7
+ json_end = response_text.find("```", json_start)
+ response_text = response_text[json_start:json_end].strip()
+ elif "```" in response_text:
+ json_start = response_text.find("```") + 3
+ json_end = response_text.find("```", json_start)
+ response_text = response_text[json_start:json_end].strip()
+
+ try:
+ return json.loads(response_text)
+ except json.JSONDecodeError as e:
+ logger.error(f"Failed to parse LLM JSON response: {e}")
+ logger.debug(f"Response text: {response_text}")
+ return {}
+
+ def _merge_enrichment(
+ self, skill: DiscoveredSkill, enrichment: dict
+ ) -> DiscoveredSkill:
+ """Merge LLM enrichment data into DiscoveredSkill.
+
+ Args:
+ skill: Original skill
+ enrichment: LLM response data
+
+ Returns:
+ New DiscoveredSkill with enriched content
+ """
+ if not enrichment:
+ return skill
+
+ # Update description if provided
+ description = enrichment.get("skill_description", skill.description)
+
+ # Update pattern summary (from instructions or keep original)
+ instructions = enrichment.get("instructions", [])
+ pattern_summary = skill.pattern_summary
+ if instructions:
+ pattern_summary = f"{skill.pattern_summary}\n\nDetailed implementation steps provided by LLM analysis."
+
+ # Format code examples
+ code_examples = []
+ for example in enrichment.get("code_examples", []):
+ if isinstance(example, dict):
+ formatted = f"File: {example.get('file_path', 'unknown')}\n{example.get('code', '')}\n\nExplanation: {example.get('explanation', '')}"
+ code_examples.append(formatted)
+ elif isinstance(example, str):
+ code_examples.append(example)
+
+ # If no LLM examples, keep original
+ if not code_examples:
+ code_examples = skill.code_examples
+
+ # Create new skill with enriched data
+ # Store enrichment in code_examples for now (can extend DiscoveredSkill model later)
+ enriched_examples = code_examples.copy()
+
+ # Append best practices and anti-patterns as additional "examples"
+ best_practices = enrichment.get("best_practices", [])
+ if best_practices:
+ enriched_examples.append(
+ "=== BEST PRACTICES ===\n"
+ + "\n".join(f"- {bp}" for bp in best_practices)
+ )
+
+ anti_patterns = enrichment.get("anti_patterns", [])
+ if anti_patterns:
+ enriched_examples.append(
+ "=== ANTI-PATTERNS TO AVOID ===\n"
+ + "\n".join(f"- {ap}" for ap in anti_patterns)
+ )
+
+ # Add instructions as first example
+ if instructions:
+ enriched_examples.insert(
+ 0,
+ "=== INSTRUCTIONS ===\n"
+ + "\n".join(f"{i+1}. {step}" for i, step in enumerate(instructions)),
+ )
+
+ return DiscoveredSkill(
+ skill_id=skill.skill_id,
+ name=skill.name,
+ description=description,
+ confidence=skill.confidence,
+ source_attribute_id=skill.source_attribute_id,
+ reusability_score=skill.reusability_score,
+ impact_score=skill.impact_score,
+ pattern_summary=pattern_summary,
+ code_examples=enriched_examples,
+ citations=skill.citations,
+ )
diff --git a/src/agentready/learners/pattern_extractor.py b/src/agentready/learners/pattern_extractor.py
new file mode 100644
index 0000000..8a33058
--- /dev/null
+++ b/src/agentready/learners/pattern_extractor.py
@@ -0,0 +1,223 @@
+"""Pattern extraction from assessment findings."""
+
+from agentready.models import Assessment, DiscoveredSkill, Finding
+
+
+class PatternExtractor:
+ """Extracts reusable patterns from high-scoring assessment findings.
+
+ Uses heuristic-based analysis to identify successful implementations
+ that could be extracted as Claude Code skills.
+ """
+
+ # Minimum score threshold for pattern extraction
+ MIN_SCORE_THRESHOLD = 80.0
+
+ # Tier-based impact scores (how much each tier contributes to overall score)
+ TIER_IMPACT_SCORES = {
+ 1: 50.0, # Tier 1 (Essential) - highest impact
+ 2: 30.0, # Tier 2 (Critical)
+ 3: 15.0, # Tier 3 (Important)
+ 4: 5.0, # Tier 4 (Advanced) - lowest impact
+ }
+
+ # Skill ID to human-readable name mapping for top tier-1 skills
+ SKILL_NAMES = {
+ "claude_md_file": {
+ "skill_id": "setup-claude-md",
+ "name": "Setup CLAUDE.md Configuration",
+ "description": "Create comprehensive CLAUDE.md files with tech stack, standard commands, repository structure, and boundaries to optimize repositories for AI-assisted development",
+ },
+ "type_annotations": {
+ "skill_id": "implement-type-annotations",
+ "name": "Implement Type Annotations",
+ "description": "Add comprehensive type hints to Python/TypeScript code to improve IDE support, catch errors early, and enable better AI code understanding",
+ },
+ "pre_commit_hooks": {
+ "skill_id": "setup-pre-commit-hooks",
+ "name": "Setup Pre-commit Hooks",
+ "description": "Configure pre-commit hooks with formatters and linters to automatically enforce code quality standards before each commit",
+ },
+ "standard_project_layout": {
+ "skill_id": "structure-repository-layout",
+ "name": "Structure Repository Layout",
+ "description": "Organize code according to language-specific standard project layouts to improve navigation and AI code understanding",
+ },
+ "lock_files": {
+ "skill_id": "create-dependency-lock-files",
+ "name": "Create Dependency Lock Files",
+ "description": "Generate lock files to pin exact dependency versions for reproducible builds and consistent development environments",
+ },
+ }
+
+ def __init__(self, assessment: Assessment, min_score: float = MIN_SCORE_THRESHOLD):
+ """Initialize pattern extractor.
+
+ Args:
+ assessment: The assessment to extract patterns from
+ min_score: Minimum finding score to consider (default: 80.0)
+ """
+ self.assessment = assessment
+ self.min_score = min_score
+
+ def extract_all_patterns(self) -> list[DiscoveredSkill]:
+ """Extract all reusable patterns from the assessment.
+
+ Returns:
+ List of discovered skills, sorted by confidence (highest first)
+ """
+ discovered_skills = []
+
+ for finding in self.assessment.findings:
+ if self._should_extract_pattern(finding):
+ skill = self._create_skill_from_finding(finding)
+ if skill:
+ discovered_skills.append(skill)
+
+ # Sort by confidence descending
+ discovered_skills.sort(key=lambda s: s.confidence, reverse=True)
+
+ return discovered_skills
+
+ def extract_specific_patterns(
+ self, attribute_ids: list[str]
+ ) -> list[DiscoveredSkill]:
+ """Extract patterns only from specific attributes.
+
+ Args:
+ attribute_ids: List of attribute IDs to extract patterns from
+
+ Returns:
+ List of discovered skills for specified attributes
+ """
+ discovered_skills = []
+
+ for finding in self.assessment.findings:
+ if (
+ finding.attribute.attribute_id in attribute_ids
+ and self._should_extract_pattern(finding)
+ ):
+ skill = self._create_skill_from_finding(finding)
+ if skill:
+ discovered_skills.append(skill)
+
+ # Sort by confidence descending
+ discovered_skills.sort(key=lambda s: s.confidence, reverse=True)
+
+ return discovered_skills
+
+ def _should_extract_pattern(self, finding: Finding) -> bool:
+ """Determine if a finding should have its pattern extracted.
+
+ Args:
+ finding: The finding to evaluate
+
+ Returns:
+ True if pattern should be extracted
+ """
+ # Only extract from passing findings with high scores
+ if finding.status != "pass":
+ return False
+
+ if finding.score < self.min_score:
+ return False
+
+ # Skip if attribute not in our known skills mapping
+ if finding.attribute.id not in self.SKILL_NAMES:
+ return False
+
+ return True
+
+ def _create_skill_from_finding(self, finding: Finding) -> DiscoveredSkill | None:
+ """Create a DiscoveredSkill from a high-scoring finding.
+
+ Args:
+ finding: The finding to convert to a skill
+
+ Returns:
+ DiscoveredSkill object or None if skill info not found
+ """
+ attribute_id = finding.attribute.id
+ skill_info = self.SKILL_NAMES.get(attribute_id)
+
+ if not skill_info:
+ return None
+
+ # Calculate confidence (directly from score)
+ confidence = finding.score
+
+ # Calculate impact based on tier
+ tier = finding.attribute.tier
+ impact_score = self.TIER_IMPACT_SCORES.get(tier, 5.0)
+
+ # Calculate reusability (for now, use a simple heuristic based on tier)
+ # Tier 1 attributes are more reusable across projects
+ reusability_score = 100.0 - (tier - 1) * 20.0 # T1=100, T2=80, T3=60, T4=40
+
+ # Extract code examples from finding details
+ code_examples = self._extract_code_examples(finding)
+
+ # Create pattern summary from finding
+ pattern_summary = self._create_pattern_summary(finding)
+
+ # Citations are not stored in current Attribute model, use empty list
+ citations = []
+
+ return DiscoveredSkill(
+ skill_id=skill_info["skill_id"],
+ name=skill_info["name"],
+ description=skill_info["description"],
+ confidence=confidence,
+ source_attribute_id=attribute_id,
+ reusability_score=reusability_score,
+ impact_score=impact_score,
+ pattern_summary=pattern_summary,
+ code_examples=code_examples,
+ citations=citations,
+ )
+
+ def _extract_code_examples(self, finding: Finding) -> list[str]:
+ """Extract code examples from finding details.
+
+ Args:
+ finding: The finding to extract examples from
+
+ Returns:
+ List of code example strings
+ """
+ examples = []
+
+ # Use evidence as examples
+ if finding.evidence:
+ for item in finding.evidence:
+ if item and item.strip():
+ examples.append(item)
+
+ # Add remediation steps as examples if available
+ if finding.remediation and finding.remediation.steps:
+ for step in finding.remediation.steps:
+ if step.strip():
+ examples.append(step)
+
+ return examples[:3] # Limit to 3 examples
+
+ def _create_pattern_summary(self, finding: Finding) -> str:
+ """Create a human-readable pattern summary from a finding.
+
+ Args:
+ finding: The finding to summarize
+
+ Returns:
+ Pattern summary string
+ """
+ # Use the attribute's description as the pattern summary
+ if finding.attribute.description:
+ return finding.attribute.description
+
+ # Fallback to finding evidence
+ if finding.evidence and len(finding.evidence) > 0:
+ evidence_str = "; ".join(finding.evidence[:2])
+ return f"This repository successfully implements {finding.attribute.name}. {evidence_str}"
+
+ # Final fallback
+ return f"This repository successfully implements {finding.attribute.name} at a high level ({finding.score:.1f}/100)."
diff --git a/src/agentready/learners/prompt_templates.py b/src/agentready/learners/prompt_templates.py
new file mode 100644
index 0000000..41d16e5
--- /dev/null
+++ b/src/agentready/learners/prompt_templates.py
@@ -0,0 +1,100 @@
+"""Prompt templates for LLM-powered pattern extraction."""
+
+PATTERN_EXTRACTION_PROMPT = """You are analyzing a high-scoring repository to extract a reusable pattern as a Claude Code skill.
+
+## Context
+Repository: {repo_name}
+Attribute: {attribute_name} ({attribute_description})
+Tier: {tier} (1=Essential, 4=Advanced)
+Score: {score}/100
+Primary Language: {primary_language}
+
+## Evidence from Assessment
+{evidence}
+
+## Code Samples from Repository
+{code_samples}
+
+---
+
+## Task
+
+Extract this pattern as a Claude Code skill with the following components:
+
+### 1. Skill Description (1-2 sentences)
+Write an invocation-optimized description that helps Claude Code decide when to use this skill.
+Focus on WHAT problem it solves and WHEN to apply it.
+
+### 2. Step-by-Step Instructions (5-10 steps)
+Provide concrete, actionable steps. Each step should:
+- Start with an action verb
+- Include specific commands or code where applicable
+- Define success criteria for that step
+
+Be explicit. Do not assume prior knowledge.
+
+### 3. Code Examples (2-3 examples)
+Extract real code snippets from the repository that demonstrate this pattern.
+For EACH example:
+- Include the file path
+- Show the relevant code (10-50 lines)
+- Explain WHY this demonstrates the pattern
+
+### 4. Best Practices (3-5 principles)
+Derive best practices from the successful implementation you analyzed.
+What made this repository score {score}/100?
+
+### 5. Anti-Patterns to Avoid (2-3 mistakes)
+What common mistakes did this repository avoid?
+What would have reduced the score?
+
+---
+
+## Output Format
+
+Return ONLY valid JSON matching this schema:
+
+{{
+ "skill_description": "One sentence explaining what and when",
+ "instructions": [
+ "Step 1: Specific action with command",
+ "Step 2: Next action with success criteria",
+ ...
+ ],
+ "code_examples": [
+ {{
+ "file_path": "relative/path/to/file.py",
+ "code": "actual code snippet",
+ "explanation": "Why this demonstrates the pattern"
+ }},
+ ...
+ ],
+ "best_practices": [
+ "Principle 1 derived from this repository",
+ ...
+ ],
+ "anti_patterns": [
+ "Common mistake this repo avoided",
+ ...
+ ]
+}}
+
+## Rules
+
+1. NEVER invent code - only use code from the samples provided
+2. Be specific - use exact file paths, line numbers, command syntax
+3. Focus on actionable guidance, not theory
+4. Derive insights from THIS repository, not general knowledge
+5. Return ONLY the JSON object, no markdown formatting
+"""
+
+CODE_SAMPLING_GUIDANCE = """When selecting code samples to analyze:
+
+1. For `claude_md_file`: Include the CLAUDE.md file itself
+2. For `type_annotations`: Sample 3-5 .py files with type hints
+3. For `pre_commit_hooks`: Include .pre-commit-config.yaml
+4. For `standard_project_layout`: Show directory tree + key files
+5. For `lock_files`: Include requirements.txt, poetry.lock, or go.sum
+
+Limit to 3-5 files, max 100 lines per file to stay under token limits.
+"""
diff --git a/src/agentready/learners/skill_generator.py b/src/agentready/learners/skill_generator.py
new file mode 100644
index 0000000..ec36f15
--- /dev/null
+++ b/src/agentready/learners/skill_generator.py
@@ -0,0 +1,202 @@
+"""Skill generation from discovered patterns."""
+
+from pathlib import Path
+
+from agentready.models import DiscoveredSkill
+
+
+class SkillGenerator:
+ """Generates Claude Code skills from discovered patterns.
+
+ Handles file I/O and format conversion for skill proposals.
+ """
+
+ def __init__(self, output_dir: Path | str = ".skills-proposals"):
+ """Initialize skill generator.
+
+ Args:
+ output_dir: Directory to write generated skills
+ """
+ self.output_dir = Path(output_dir)
+
+ def generate_skill_file(self, skill: DiscoveredSkill) -> Path:
+ """Generate a SKILL.md file from a discovered skill.
+
+ Args:
+ skill: The discovered skill to generate
+
+ Returns:
+ Path to the generated SKILL.md file
+ """
+ # Create skill directory
+ skill_dir = self.output_dir / skill.skill_id
+ skill_dir.mkdir(parents=True, exist_ok=True)
+
+ # Generate SKILL.md content
+ skill_content = skill.to_skill_md()
+
+ # Write to file
+ skill_file = skill_dir / "SKILL.md"
+ skill_file.write_text(skill_content, encoding="utf-8")
+
+ return skill_file
+
+ def generate_github_issue(self, skill: DiscoveredSkill) -> Path:
+ """Generate a GitHub issue template from a discovered skill.
+
+ Args:
+ skill: The discovered skill to generate
+
+ Returns:
+ Path to the generated issue template file
+ """
+ # Create output directory
+ self.output_dir.mkdir(parents=True, exist_ok=True)
+
+ # Generate issue content
+ issue_content = skill.to_github_issue()
+
+ # Write to file
+ issue_file = self.output_dir / f"skill-{skill.skill_id}.md"
+ issue_file.write_text(issue_content, encoding="utf-8")
+
+ return issue_file
+
+ def generate_markdown_report(self, skill: DiscoveredSkill) -> Path:
+ """Generate a detailed markdown report for a skill.
+
+ Args:
+ skill: The discovered skill to document
+
+ Returns:
+ Path to the generated markdown report
+ """
+ # Create output directory
+ self.output_dir.mkdir(parents=True, exist_ok=True)
+
+ # Generate markdown content
+ markdown_content = self._create_markdown_report(skill)
+
+ # Write to file
+ report_file = self.output_dir / f"{skill.skill_id}-report.md"
+ report_file.write_text(markdown_content, encoding="utf-8")
+
+ return report_file
+
+ def generate_all_formats(self, skill: DiscoveredSkill) -> dict[str, Path]:
+ """Generate all output formats for a skill.
+
+ Args:
+ skill: The discovered skill to generate
+
+ Returns:
+ Dictionary mapping format name to file path
+ """
+ return {
+ "skill_md": self.generate_skill_file(skill),
+ "github_issue": self.generate_github_issue(skill),
+ "markdown_report": self.generate_markdown_report(skill),
+ }
+
+ def generate_batch(
+ self, skills: list[DiscoveredSkill], output_format: str = "skill_md"
+ ) -> list[Path]:
+ """Generate multiple skills in batch.
+
+ Args:
+ skills: List of discovered skills to generate
+ output_format: Format to generate (skill_md, github_issue, markdown_report, all)
+
+ Returns:
+ List of generated file paths
+ """
+ generated_files = []
+
+ for skill in skills:
+ if output_format == "skill_md":
+ generated_files.append(self.generate_skill_file(skill))
+ elif output_format == "github_issue":
+ generated_files.append(self.generate_github_issue(skill))
+ elif output_format == "markdown_report":
+ generated_files.append(self.generate_markdown_report(skill))
+ elif output_format == "all":
+ results = self.generate_all_formats(skill)
+ generated_files.extend(results.values())
+
+ return generated_files
+
+ def _create_markdown_report(self, skill: DiscoveredSkill) -> str:
+ """Create a detailed markdown report for a skill.
+
+ Args:
+ skill: The skill to document
+
+ Returns:
+ Markdown report content
+ """
+ report = f"""# Skill Report: {skill.name}
+
+## Overview
+
+**Skill ID**: `{skill.skill_id}`
+**Confidence**: {skill.confidence}%
+**Impact**: +{skill.impact_score} pts
+**Reusability**: {skill.reusability_score}%
+**Source Attribute**: {skill.source_attribute_id}
+
+---
+
+## Description
+
+{skill.description}
+
+---
+
+## Pattern Summary
+
+{skill.pattern_summary}
+
+---
+
+## Implementation Guidance
+
+### When to Use This Skill
+
+Use this skill when you need to apply the pattern described above to your repository.
+
+### Code Examples
+
+"""
+
+ if skill.code_examples:
+ for idx, example in enumerate(skill.code_examples, 1):
+ report += f"\n#### Example {idx}\n\n```\n{example}\n```\n"
+ else:
+ report += "_No code examples available_\n"
+
+ report += "\n---\n\n## Research Citations\n\n"
+
+ if skill.citations:
+ for citation in skill.citations:
+ url_part = f" - [Link]({citation.url})" if citation.url else ""
+ report += f"### {citation.source}: {citation.title}{url_part}\n\n"
+ report += f"**Relevance**: {citation.relevance}\n\n"
+ else:
+ report += "_No citations available_\n"
+
+ report += f"""
+---
+
+## Metrics
+
+- **Confidence Score**: {skill.confidence}% - How confident we are this is a valid pattern
+- **Impact Score**: {skill.impact_score} pts - Expected score improvement from applying this skill
+- **Reusability Score**: {skill.reusability_score}% - How often this pattern applies across projects
+
+---
+
+**Generated by**: AgentReady Skill Generator
+**Source**: Pattern extracted from {skill.source_attribute_id} assessment
+"""
+
+ return report
diff --git a/src/agentready/models/__init__.py b/src/agentready/models/__init__.py
index e69de29..58d17ed 100644
--- a/src/agentready/models/__init__.py
+++ b/src/agentready/models/__init__.py
@@ -0,0 +1,19 @@
+"""Data models for AgentReady assessment system."""
+
+from agentready.models.assessment import Assessment
+from agentready.models.attribute import Attribute
+from agentready.models.citation import Citation
+from agentready.models.config import Config
+from agentready.models.discovered_skill import DiscoveredSkill
+from agentready.models.finding import Finding
+from agentready.models.repository import Repository
+
+__all__ = [
+ "Assessment",
+ "Attribute",
+ "Citation",
+ "Config",
+ "DiscoveredSkill",
+ "Finding",
+ "Repository",
+]
diff --git a/src/agentready/models/assessment.py b/src/agentready/models/assessment.py
index 21e08d4..9158ce3 100644
--- a/src/agentready/models/assessment.py
+++ b/src/agentready/models/assessment.py
@@ -1,9 +1,10 @@
"""Assessment model representing complete repository evaluation."""
-from dataclasses import dataclass
+from dataclasses import dataclass, field
from datetime import datetime
from .config import Config
+from .discovered_skill import DiscoveredSkill
from .finding import Finding
from .repository import Repository
@@ -23,6 +24,7 @@ class Assessment:
findings: Individual attribute results
config: Custom configuration used (if any)
duration_seconds: Time taken for assessment
+ discovered_skills: Patterns extracted from this assessment (optional)
"""
repository: Repository
@@ -35,6 +37,7 @@ class Assessment:
findings: list[Finding]
config: Config | None
duration_seconds: float
+ discovered_skills: list[DiscoveredSkill] = field(default_factory=list)
VALID_LEVELS = {"Platinum", "Gold", "Silver", "Bronze", "Needs Improvement"}
@@ -77,6 +80,7 @@ def to_dict(self) -> dict:
"findings": [f.to_dict() for f in self.findings],
"config": self.config.to_dict() if self.config else None,
"duration_seconds": self.duration_seconds,
+ "discovered_skills": [s.to_dict() for s in self.discovered_skills],
}
@staticmethod
diff --git a/src/agentready/models/discovered_skill.py b/src/agentready/models/discovered_skill.py
new file mode 100644
index 0000000..a7e3bc3
--- /dev/null
+++ b/src/agentready/models/discovered_skill.py
@@ -0,0 +1,281 @@
+"""DiscoveredSkill model for patterns extracted from assessments."""
+
+from dataclasses import dataclass, field
+
+from .citation import Citation
+
+
+@dataclass
+class DiscoveredSkill:
+ """Represents a pattern that could become a Claude Code skill.
+
+ Attributes:
+ skill_id: Unique identifier (lowercase-hyphen format, e.g., "setup-claude-md")
+ name: Human-readable name (e.g., "Setup CLAUDE.md Configuration")
+ description: Invocation-optimized description for SKILL.md frontmatter
+ confidence: How confident we are in this pattern (0-100)
+ source_attribute_id: ID of attribute this pattern came from
+ reusability_score: How often this pattern appears (0-100)
+ impact_score: Potential score improvement from applying this skill (0-100)
+ pattern_summary: Human-readable pattern description
+ code_examples: List of example implementations
+ citations: Research sources supporting this pattern
+ """
+
+ skill_id: str
+ name: str
+ description: str
+ confidence: float
+ source_attribute_id: str
+ reusability_score: float
+ impact_score: float
+ pattern_summary: str
+ code_examples: list[str] = field(default_factory=list)
+ citations: list[Citation] = field(default_factory=list)
+
+ def __post_init__(self):
+ """Validate discovered skill data after initialization."""
+ if not self.skill_id:
+ raise ValueError("Skill ID must be non-empty")
+
+ # Validate skill_id format (lowercase-hyphen)
+ if not self.skill_id.replace("-", "").replace("_", "").isalnum():
+ raise ValueError(
+ f"Skill ID must be lowercase alphanumeric with hyphens: {self.skill_id}"
+ )
+
+ if not self.name:
+ raise ValueError("Skill name must be non-empty")
+
+ if not self.description:
+ raise ValueError("Skill description must be non-empty")
+
+ if len(self.description) > 1024:
+ raise ValueError(
+ f"Skill description too long ({len(self.description)} chars, max 1024)"
+ )
+
+ if not 0.0 <= self.confidence <= 100.0:
+ raise ValueError(
+ f"Confidence must be in range [0.0, 100.0]: {self.confidence}"
+ )
+
+ if not 0.0 <= self.reusability_score <= 100.0:
+ raise ValueError(
+ f"Reusability score must be in range [0.0, 100.0]: {self.reusability_score}"
+ )
+
+ if not 0.0 <= self.impact_score <= 100.0:
+ raise ValueError(
+ f"Impact score must be in range [0.0, 100.0]: {self.impact_score}"
+ )
+
+ if not self.source_attribute_id:
+ raise ValueError("Source attribute ID must be non-empty")
+
+ if not self.pattern_summary:
+ raise ValueError("Pattern summary must be non-empty")
+
+ def to_dict(self) -> dict:
+ """Convert to dictionary for JSON serialization."""
+ return {
+ "skill_id": self.skill_id,
+ "name": self.name,
+ "description": self.description,
+ "confidence": self.confidence,
+ "source_attribute_id": self.source_attribute_id,
+ "reusability_score": self.reusability_score,
+ "impact_score": self.impact_score,
+ "pattern_summary": self.pattern_summary,
+ "code_examples": self.code_examples,
+ "citations": [c.to_dict() for c in self.citations],
+ }
+
+ def to_skill_md(self) -> str:
+ """Generate SKILL.md content from this discovered skill.
+
+ Returns:
+ Complete SKILL.md file content with frontmatter
+ """
+ # Build frontmatter
+ frontmatter = f"""---
+name: {self.skill_id}
+description: {self.description}
+---
+"""
+
+ # Build main content
+ content = f"""# {self.name}
+
+## When to Use This Skill
+
+{self.pattern_summary}
+
+## Instructions
+
+{self._generate_instructions()}
+
+## Examples
+
+{self._generate_examples()}
+
+## Best Practices
+
+{self._generate_best_practices()}
+
+## Citations
+
+{self._generate_citations()}
+
+---
+
+**Generated by**: AgentReady Skill Generator
+**Confidence**: {self.confidence}%
+**Source Attribute**: {self.source_attribute_id}
+**Reusability**: {self.reusability_score}%
+**Impact**: +{self.impact_score} pts
+"""
+
+ return frontmatter + content
+
+ def _generate_instructions(self) -> str:
+ """Generate instructions section from pattern summary."""
+ # This is a placeholder - will be enhanced with templates
+ return """1. Review the pattern summary above
+2. Apply the pattern to your repository
+3. Verify the implementation matches the examples below
+"""
+
+ def _generate_examples(self) -> str:
+ """Generate examples section from code_examples."""
+ if not self.code_examples:
+ return "_No examples available_"
+
+ examples_md = ""
+ for idx, example in enumerate(self.code_examples, 1):
+ examples_md += f"\n### Example {idx}\n\n```\n{example}\n```\n"
+
+ return examples_md.strip()
+
+ def _generate_best_practices(self) -> str:
+ """Generate best practices section."""
+ # This is a placeholder - will be enhanced with templates
+ return """- Follow the pattern consistently across your codebase
+- Refer to the citations below for authoritative guidance
+- Test the implementation after applying the pattern
+"""
+
+ def _generate_citations(self) -> str:
+ """Generate citations section from citations list."""
+ if not self.citations:
+ return "_No citations available_"
+
+ citations_md = ""
+ for citation in self.citations:
+ url_part = f" ([link]({citation.url}))" if citation.url else ""
+ citations_md += f'- {citation.source}: "{citation.title}"{url_part}\n'
+ citations_md += f" - {citation.relevance}\n\n"
+
+ return citations_md.strip()
+
+ def to_github_issue(self) -> str:
+ """Generate GitHub issue template content for this skill proposal.
+
+ Returns:
+ Markdown content for GitHub issue
+ """
+ return f"""---
+name: Skill Proposal - {self.name}
+about: Automatically generated skill proposal from AgentReady continuous learning
+title: 'Skill Proposal: {self.name}'
+labels: 'skill-proposal, enhancement, ai-agent'
+assignees: ''
+---
+
+## Skill Proposal: {self.name}
+
+**Skill ID**: `{self.skill_id}`
+**Confidence**: {self.confidence}%
+**Impact**: +{self.impact_score} pts
+**Reusability**: {self.reusability_score}%
+**Source Attribute**: {self.source_attribute_id}
+
+---
+
+## Description
+
+{self.description}
+
+---
+
+## Pattern Summary
+
+{self.pattern_summary}
+
+---
+
+## Proposed SKILL.md
+
+```markdown
+{self.to_skill_md()}
+```
+
+---
+
+## Implementation Plan
+
+- [ ] Review proposed skill for accuracy
+- [ ] Test skill on 3-5 repositories
+- [ ] Refine instructions based on testing
+- [ ] Create final SKILL.md file
+- [ ] Add to `~/.claude/skills/` or `.claude/skills/`
+- [ ] Document skill in AgentReady catalog
+- [ ] Update skill generator with learnings
+
+---
+
+## Code Examples from Assessment
+
+{self._format_code_examples_for_issue()}
+
+---
+
+## Research Citations
+
+{self._format_citations_for_issue()}
+
+---
+
+**Auto-generated by**: AgentReady Continuous Learning Loop
+**Assessment Date**: {self._get_timestamp()}
+"""
+
+ def _format_code_examples_for_issue(self) -> str:
+ """Format code examples for GitHub issue."""
+ if not self.code_examples:
+ return "_No code examples available_"
+
+ examples_md = ""
+ for idx, example in enumerate(self.code_examples, 1):
+ examples_md += f"\n### Example {idx}\n\n```\n{example}\n```\n"
+
+ return examples_md.strip()
+
+ def _format_citations_for_issue(self) -> str:
+ """Format citations with relevance for GitHub issue."""
+ if not self.citations:
+ return "_No citations available_"
+
+ citations_md = ""
+ for citation in self.citations:
+ url_part = f" - [Link]({citation.url})" if citation.url else ""
+ citations_md += f"\n### {citation.source}: {citation.title}{url_part}\n\n"
+ citations_md += f"**Relevance**: {citation.relevance}\n"
+
+ return citations_md.strip()
+
+ def _get_timestamp(self) -> str:
+ """Get current timestamp for issue metadata."""
+ from datetime import datetime
+
+ return datetime.now().isoformat()
diff --git a/src/agentready/services/learning_service.py b/src/agentready/services/learning_service.py
new file mode 100644
index 0000000..f4c77c7
--- /dev/null
+++ b/src/agentready/services/learning_service.py
@@ -0,0 +1,334 @@
+"""Learning service for extracting patterns and generating skills."""
+
+import json
+import logging
+import os
+from datetime import datetime
+from pathlib import Path
+
+from agentready.learners import PatternExtractor, SkillGenerator
+from agentready.models import Assessment, DiscoveredSkill, Finding
+
+logger = logging.getLogger(__name__)
+
+
+class LearningService:
+ """Orchestrates continuous learning workflow for skill extraction.
+
+ Coordinates pattern extraction from assessments and skill generation
+ in various output formats.
+ """
+
+ def __init__(
+ self,
+ min_confidence: float = 70.0,
+ output_dir: Path | str = ".skills-proposals",
+ ):
+ """Initialize learning service.
+
+ Args:
+ min_confidence: Minimum confidence score to include skills (0-100)
+ output_dir: Directory for generated skill files
+ """
+ self.min_confidence = min_confidence
+ self.output_dir = Path(output_dir)
+ self.skill_generator = SkillGenerator(output_dir=self.output_dir)
+
+ def load_assessment(self, assessment_file: Path) -> Assessment:
+ """Load assessment from JSON file.
+
+ Args:
+ assessment_file: Path to assessment JSON file
+
+ Returns:
+ Loaded Assessment object
+
+ Raises:
+ FileNotFoundError: If assessment file doesn't exist
+ ValueError: If assessment file is invalid JSON
+ """
+ if not assessment_file.exists():
+ raise FileNotFoundError(f"Assessment file not found: {assessment_file}")
+
+ with open(assessment_file, encoding="utf-8") as f:
+ try:
+ data = json.load(f)
+ except json.JSONDecodeError as e:
+ raise ValueError(f"Invalid JSON in assessment file: {e}")
+
+ # For now, we work with the dict directly
+ # In future, could deserialize to Assessment object
+ return data
+
+ def extract_patterns_from_file(
+ self,
+ assessment_file: Path,
+ attribute_ids: list[str] | None = None,
+ enable_llm: bool = False,
+ llm_budget: int = 5,
+ ) -> list[DiscoveredSkill]:
+ """Extract patterns from an assessment file.
+
+ Args:
+ assessment_file: Path to assessment JSON file
+ attribute_ids: Optional list of specific attributes to extract
+ enable_llm: Enable LLM enrichment
+ llm_budget: Max number of skills to enrich with LLM
+
+ Returns:
+ List of discovered skills meeting confidence threshold
+ """
+ # Load assessment (returns dict for now)
+ assessment_data = self.load_assessment(assessment_file)
+
+ # Convert to Assessment object for pattern extraction
+ # For MVP, we'll work with the dict and create Finding objects manually
+ # In future, add proper deserialization
+ from agentready.models import Attribute, Finding, Repository
+
+ # Reconstruct Assessment object from dict
+ repo_data = assessment_data["repository"]
+
+ # Use the parent directory of the assessment file as the actual repo path
+ # This handles cases where the assessment was from a different path
+ actual_repo_path = assessment_file.parent.parent
+
+ repo = Repository(
+ path=actual_repo_path,
+ name=repo_data.get("name", "unknown"),
+ url=repo_data.get("url"),
+ branch=repo_data.get("branch", "unknown"),
+ commit_hash=repo_data.get("commit_hash", "unknown"),
+ languages=repo_data.get("languages", {}),
+ total_files=repo_data["total_files"],
+ total_lines=repo_data["total_lines"],
+ )
+
+ findings = []
+ for finding_data in assessment_data["findings"]:
+ # Reconstruct Attribute
+ attr_data = finding_data["attribute"]
+
+ attribute = Attribute(
+ id=attr_data["id"],
+ name=attr_data["name"],
+ category=attr_data.get("category", "Unknown"),
+ tier=attr_data["tier"],
+ description=attr_data["description"],
+ criteria=attr_data.get("criteria", ""),
+ default_weight=attr_data.get("default_weight", 1.0),
+ )
+
+ # Reconstruct Finding
+ finding = Finding(
+ attribute=attribute,
+ status=finding_data["status"],
+ score=finding_data.get("score"),
+ measured_value=finding_data.get("measured_value"),
+ threshold=finding_data.get("threshold"),
+ evidence=finding_data.get("evidence", []),
+ remediation=None, # Skip complex Remediation reconstruction for now
+ error_message=finding_data.get("error_message"),
+ )
+ findings.append(finding)
+
+ assessment = Assessment(
+ repository=repo,
+ timestamp=datetime.fromisoformat(assessment_data["timestamp"]),
+ overall_score=assessment_data["overall_score"],
+ certification_level=assessment_data["certification_level"],
+ attributes_assessed=assessment_data["attributes_assessed"],
+ attributes_skipped=assessment_data["attributes_skipped"],
+ attributes_total=assessment_data["attributes_total"],
+ findings=findings,
+ config=None, # Skip config for now
+ duration_seconds=assessment_data["duration_seconds"],
+ )
+
+ # Extract patterns
+ extractor = PatternExtractor(assessment, min_score=self.min_confidence)
+
+ if attribute_ids:
+ discovered_skills = extractor.extract_specific_patterns(attribute_ids)
+ else:
+ discovered_skills = extractor.extract_all_patterns()
+
+ # Filter by min confidence
+ discovered_skills = [
+ s for s in discovered_skills if s.confidence >= self.min_confidence
+ ]
+
+ # Optionally enrich with LLM
+ if enable_llm and discovered_skills:
+ discovered_skills = self._enrich_with_llm(
+ discovered_skills, assessment, llm_budget
+ )
+
+ return discovered_skills
+
+ def generate_skills(
+ self, skills: list[DiscoveredSkill], output_format: str = "json"
+ ) -> list[Path]:
+ """Generate skill files in specified format.
+
+ Args:
+ skills: List of discovered skills
+ output_format: Format to generate (json, skill_md, github_issues, all)
+
+ Returns:
+ List of generated file paths
+ """
+ generated_files = []
+
+ if output_format == "json":
+ json_file = self._generate_json(skills)
+ generated_files.append(json_file)
+
+ elif output_format == "skill_md":
+ for skill in skills:
+ skill_file = self.skill_generator.generate_skill_file(skill)
+ generated_files.append(skill_file)
+
+ elif output_format == "github_issues":
+ for skill in skills:
+ issue_file = self.skill_generator.generate_github_issue(skill)
+ generated_files.append(issue_file)
+
+ elif output_format == "markdown":
+ for skill in skills:
+ report_file = self.skill_generator.generate_markdown_report(skill)
+ generated_files.append(report_file)
+
+ elif output_format == "all":
+ # Generate JSON summary
+ json_file = self._generate_json(skills)
+ generated_files.append(json_file)
+
+ # Generate all formats for each skill
+ for skill in skills:
+ results = self.skill_generator.generate_all_formats(skill)
+ generated_files.extend(results.values())
+
+ return generated_files
+
+ def _generate_json(self, skills: list[DiscoveredSkill]) -> Path:
+ """Generate JSON file with discovered skills.
+
+ Args:
+ skills: List of discovered skills
+
+ Returns:
+ Path to generated JSON file
+ """
+ self.output_dir.mkdir(parents=True, exist_ok=True)
+
+ data = {
+ "generated_at": datetime.now().isoformat(),
+ "skill_count": len(skills),
+ "min_confidence": self.min_confidence,
+ "discovered_skills": [skill.to_dict() for skill in skills],
+ }
+
+ json_file = self.output_dir / "discovered-skills.json"
+ with open(json_file, "w", encoding="utf-8") as f:
+ json.dump(data, f, indent=2)
+
+ return json_file
+
+ def _enrich_with_llm(
+ self, skills: list[DiscoveredSkill], assessment: Assessment, budget: int
+ ) -> list[DiscoveredSkill]:
+ """Enrich top N skills with LLM analysis.
+
+ Args:
+ skills: List of discovered skills
+ assessment: Full assessment with findings
+ budget: Max skills to enrich
+
+ Returns:
+ List with top skills enriched
+ """
+ from anthropic import Anthropic
+
+ from agentready.learners.llm_enricher import LLMEnricher
+
+ # Get API key
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
+ if not api_key:
+ logger.warning("LLM enrichment enabled but ANTHROPIC_API_KEY not set")
+ return skills
+
+ # Initialize LLM enricher
+ client = Anthropic(api_key=api_key)
+ enricher = LLMEnricher(client)
+
+ # Enrich top N skills
+ enriched_skills = []
+ for i, skill in enumerate(skills):
+ if i < budget:
+ # Find the finding for this skill
+ finding = self._find_finding_for_skill(assessment, skill)
+ if finding:
+ try:
+ enriched = enricher.enrich_skill(
+ skill, assessment.repository, finding
+ )
+ enriched_skills.append(enriched)
+ except Exception as e:
+ logger.warning(f"Enrichment failed for {skill.skill_id}: {e}")
+ enriched_skills.append(skill) # Fallback to original
+ else:
+ enriched_skills.append(skill)
+ else:
+ # Beyond budget, keep original
+ enriched_skills.append(skill)
+
+ return enriched_skills
+
+ def _find_finding_for_skill(
+ self, assessment: Assessment, skill: DiscoveredSkill
+ ) -> Finding | None:
+ """Find the Finding that generated a skill."""
+ for finding in assessment.findings:
+ if finding.attribute.id == skill.source_attribute_id:
+ return finding
+ return None
+
+ def run_full_workflow(
+ self,
+ assessment_file: Path,
+ output_format: str = "all",
+ attribute_ids: list[str] | None = None,
+ enable_llm: bool = False,
+ llm_budget: int = 5,
+ ) -> dict:
+ """Run complete learning workflow: extract + generate.
+
+ Args:
+ assessment_file: Path to assessment JSON
+ output_format: Format for generated skills
+ attribute_ids: Optional specific attributes to extract
+ enable_llm: Enable LLM enrichment
+ llm_budget: Max skills to enrich with LLM
+
+ Returns:
+ Dictionary with workflow results
+ """
+ # Extract patterns
+ skills = self.extract_patterns_from_file(
+ assessment_file,
+ attribute_ids,
+ enable_llm=enable_llm,
+ llm_budget=llm_budget,
+ )
+
+ # Generate output files
+ generated_files = self.generate_skills(skills, output_format)
+
+ return {
+ "skills_discovered": len(skills),
+ "min_confidence": self.min_confidence,
+ "output_format": output_format,
+ "generated_files": [str(f) for f in generated_files],
+ "skills": skills,
+ }
diff --git a/src/agentready/services/llm_cache.py b/src/agentready/services/llm_cache.py
new file mode 100644
index 0000000..3ce2db7
--- /dev/null
+++ b/src/agentready/services/llm_cache.py
@@ -0,0 +1,97 @@
+"""LLM response caching to avoid redundant API calls."""
+
+import hashlib
+import json
+import logging
+from datetime import datetime, timedelta
+from pathlib import Path
+
+from agentready.models import DiscoveredSkill
+
+logger = logging.getLogger(__name__)
+
+
+class LLMCache:
+ """Caches LLM enrichment responses."""
+
+ def __init__(self, cache_dir: Path, ttl_days: int = 7):
+ """Initialize cache.
+
+ Args:
+ cache_dir: Directory to store cache files
+ ttl_days: Time-to-live in days (default: 7)
+ """
+ self.cache_dir = cache_dir
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
+ self.ttl_days = ttl_days
+
+ def get(self, cache_key: str) -> DiscoveredSkill | None:
+ """Get cached skill if exists and not expired.
+
+ Args:
+ cache_key: Unique cache key
+
+ Returns:
+ Cached DiscoveredSkill or None if miss/expired
+ """
+ cache_file = self.cache_dir / f"{cache_key}.json"
+
+ if not cache_file.exists():
+ logger.debug(f"Cache miss: {cache_key}")
+ return None
+
+ try:
+ with open(cache_file, "r", encoding="utf-8") as f:
+ data = json.load(f)
+
+ # Check expiration
+ cached_at = datetime.fromisoformat(data["cached_at"])
+ if datetime.now() - cached_at > timedelta(days=self.ttl_days):
+ logger.info(f"Cache expired: {cache_key}")
+ cache_file.unlink() # Delete expired cache
+ return None
+
+ logger.info(f"Cache hit: {cache_key}")
+ return DiscoveredSkill(**data["skill"])
+
+ except Exception as e:
+ logger.warning(f"Cache read error for {cache_key}: {e}")
+ return None
+
+ def set(self, cache_key: str, skill: DiscoveredSkill):
+ """Save skill to cache.
+
+ Args:
+ cache_key: Unique cache key
+ skill: DiscoveredSkill to cache
+ """
+ cache_file = self.cache_dir / f"{cache_key}.json"
+
+ try:
+ data = {
+ "cached_at": datetime.now().isoformat(),
+ "skill": skill.to_dict(),
+ }
+
+ with open(cache_file, "w", encoding="utf-8") as f:
+ json.dump(data, f, indent=2)
+
+ logger.debug(f"Cached: {cache_key}")
+
+ except Exception as e:
+ logger.warning(f"Cache write error for {cache_key}: {e}")
+
+ @staticmethod
+ def generate_key(attribute_id: str, score: float, evidence_hash: str) -> str:
+ """Generate cache key from finding attributes.
+
+ Args:
+ attribute_id: Attribute ID (e.g., "claude_md_file")
+ score: Finding score
+ evidence_hash: Hash of evidence list
+
+ Returns:
+ Cache key string
+ """
+ key_data = f"{attribute_id}_{score}_{evidence_hash}"
+ return hashlib.sha256(key_data.encode()).hexdigest()[:16]
diff --git a/tests/unit/learners/test_llm_enricher.py b/tests/unit/learners/test_llm_enricher.py
new file mode 100644
index 0000000..1ae3de4
--- /dev/null
+++ b/tests/unit/learners/test_llm_enricher.py
@@ -0,0 +1,167 @@
+"""Tests for LLM enrichment functionality."""
+
+import json
+from unittest.mock import Mock
+
+import pytest
+from anthropic import Anthropic
+
+from agentready.learners.llm_enricher import LLMEnricher
+from agentready.models import Attribute, DiscoveredSkill, Finding, Repository
+
+
+@pytest.fixture
+def mock_anthropic_client():
+ """Mock Anthropic client."""
+ client = Mock(spec=Anthropic)
+
+ # Mock response
+ mock_response = Mock()
+ mock_response.content = [
+ Mock(
+ text=json.dumps(
+ {
+ "skill_description": "Enhanced description from LLM",
+ "instructions": [
+ "Step 1: Do something specific",
+ "Step 2: Verify it worked",
+ "Step 3: Commit the changes",
+ ],
+ "code_examples": [
+ {
+ "file_path": "src/example.py",
+ "code": "def example():\n pass",
+ "explanation": "This shows the pattern",
+ }
+ ],
+ "best_practices": ["Always use type hints", "Test your code"],
+ "anti_patterns": [
+ "Don't use global variables",
+ "Avoid mutable defaults",
+ ],
+ }
+ )
+ )
+ ]
+
+ client.messages.create.return_value = mock_response
+ return client
+
+
+@pytest.fixture
+def basic_skill():
+ """Basic skill from heuristic extraction."""
+ return DiscoveredSkill(
+ skill_id="test-skill",
+ name="Test Skill",
+ description="Basic description",
+ confidence=95.0,
+ source_attribute_id="test_attribute",
+ reusability_score=100.0,
+ impact_score=50.0,
+ pattern_summary="Test pattern",
+ code_examples=["Basic example"],
+ citations=[],
+ )
+
+
+@pytest.fixture
+def sample_repository(tmp_path):
+ """Sample repository."""
+ repo_path = tmp_path / "test-repo"
+ repo_path.mkdir()
+
+ # Create .git directory
+ (repo_path / ".git").mkdir()
+
+ # Create a sample file
+ (repo_path / "test.py").write_text("def test():\n pass")
+
+ return Repository(
+ path=repo_path,
+ name="test-repo",
+ url=None,
+ branch="main",
+ commit_hash="abc123",
+ languages={"Python": 1},
+ total_files=1,
+ total_lines=2,
+ )
+
+
+@pytest.fixture
+def sample_finding():
+ """Sample finding."""
+ attr = Attribute(
+ id="test_attribute",
+ name="Test Attribute",
+ category="Testing",
+ tier=1,
+ description="A test attribute",
+ criteria="Must pass",
+ default_weight=1.0,
+ )
+
+ return Finding(
+ attribute=attr,
+ status="pass",
+ score=95.0,
+ measured_value="passing",
+ threshold="pass",
+ evidence=["Test evidence 1", "Test evidence 2"],
+ remediation=None,
+ error_message=None,
+ )
+
+
+def test_enrich_skill_success(
+ mock_anthropic_client, basic_skill, sample_repository, sample_finding, tmp_path
+):
+ """Test successful skill enrichment."""
+ cache_dir = tmp_path / "cache"
+ enricher = LLMEnricher(mock_anthropic_client, cache_dir=cache_dir)
+
+ enriched = enricher.enrich_skill(basic_skill, sample_repository, sample_finding)
+
+ # Verify API was called
+ assert mock_anthropic_client.messages.create.called
+
+ # Verify enrichment
+ assert enriched.description == "Enhanced description from LLM"
+ assert len(enriched.code_examples) > len(basic_skill.code_examples)
+
+
+def test_enrich_skill_uses_cache(
+ mock_anthropic_client, basic_skill, sample_repository, sample_finding, tmp_path
+):
+ """Test that second enrichment uses cache."""
+ cache_dir = tmp_path / "cache"
+ enricher = LLMEnricher(mock_anthropic_client, cache_dir=cache_dir)
+
+ # First call
+ enricher.enrich_skill(basic_skill, sample_repository, sample_finding)
+ first_call_count = mock_anthropic_client.messages.create.call_count
+
+ # Second call (should use cache)
+ enricher.enrich_skill(basic_skill, sample_repository, sample_finding)
+ second_call_count = mock_anthropic_client.messages.create.call_count
+
+ # Verify cache was used
+ assert second_call_count == first_call_count
+
+
+def test_enrich_skill_api_error_fallback(
+ basic_skill, sample_repository, sample_finding, tmp_path
+):
+ """Test fallback to original skill on API error."""
+ client = Mock(spec=Anthropic)
+ client.messages.create.side_effect = Exception("API Error")
+
+ cache_dir = tmp_path / "cache"
+ enricher = LLMEnricher(client, cache_dir=cache_dir)
+
+ enriched = enricher.enrich_skill(basic_skill, sample_repository, sample_finding)
+
+ # Should return original skill
+ assert enriched.skill_id == basic_skill.skill_id
+ assert enriched.description == basic_skill.description
diff --git a/tests/unit/test_demo.py b/tests/unit/test_demo.py
new file mode 100644
index 0000000..09067de
--- /dev/null
+++ b/tests/unit/test_demo.py
@@ -0,0 +1,131 @@
+"""Tests for demo command."""
+
+import tempfile
+from pathlib import Path
+
+import pytest
+from click.testing import CliRunner
+
+from agentready.cli.demo import create_demo_repository, demo
+
+
+def test_create_demo_repository_python():
+ """Test creating a Python demo repository."""
+ with tempfile.TemporaryDirectory() as temp_dir:
+ demo_path = Path(temp_dir) / "test-demo"
+ create_demo_repository(demo_path, "python")
+
+ # Check basic structure
+ assert demo_path.exists()
+ assert (demo_path / ".git").exists()
+ assert (demo_path / "README.md").exists()
+ assert (demo_path / "CLAUDE.md").exists()
+ assert (demo_path / "pyproject.toml").exists()
+ assert (demo_path / ".gitignore").exists()
+
+ # Check source files
+ assert (demo_path / "src" / "demoapp" / "__init__.py").exists()
+ assert (demo_path / "src" / "demoapp" / "main.py").exists()
+
+ # Check tests
+ assert (demo_path / "tests" / "test_main.py").exists()
+
+ # Verify content
+ readme_content = (demo_path / "README.md").read_text()
+ assert "Demo Python Project" in readme_content
+
+ claude_content = (demo_path / "CLAUDE.md").read_text()
+ assert "AI Assistant Guide" in claude_content
+
+
+def test_create_demo_repository_javascript():
+ """Test creating a JavaScript demo repository."""
+ with tempfile.TemporaryDirectory() as temp_dir:
+ demo_path = Path(temp_dir) / "test-demo-js"
+ create_demo_repository(demo_path, "javascript")
+
+ # Check basic structure
+ assert demo_path.exists()
+ assert (demo_path / ".git").exists()
+ assert (demo_path / "README.md").exists()
+ assert (demo_path / "package.json").exists()
+ assert (demo_path / ".gitignore").exists()
+
+ # Check source files
+ assert (demo_path / "src" / "index.js").exists()
+
+ # Verify content
+ package_content = (demo_path / "package.json").read_text()
+ assert "demo-js-app" in package_content
+
+
+def test_demo_command_help():
+ """Test demo command help output."""
+ runner = CliRunner()
+ result = runner.invoke(demo, ["--help"])
+ assert result.exit_code == 0
+ assert "automated demonstration" in result.output.lower()
+ assert "--language" in result.output
+ assert "--no-browser" in result.output
+ assert "--keep-repo" in result.output
+
+
+def test_demo_command_python():
+ """Test running demo command with Python language."""
+ runner = CliRunner()
+
+ # Run with --no-browser to avoid opening browser in tests
+ # Use isolated filesystem for cleaner testing
+ with runner.isolated_filesystem():
+ result = runner.invoke(demo, ["--language", "python", "--no-browser"])
+
+ # Check exit code
+ assert result.exit_code == 0, f"Command failed: {result.output}"
+
+ # Check output contains expected messages
+ assert "AgentReady Demo" in result.output
+ assert "Creating sample repository" in result.output
+ assert "Running 25 attribute assessments" in result.output
+ assert "Assessment Complete!" in result.output
+ assert "Overall Score:" in result.output
+ assert "Certification:" in result.output
+ assert "Generating reports" in result.output
+ assert "Demo complete!" in result.output
+
+ # Check reports were generated
+ demo_output = Path(".agentready-demo")
+ assert demo_output.exists()
+
+ # Find generated reports (with timestamp)
+ html_files = list(demo_output.glob("demo-report-*.html"))
+ md_files = list(demo_output.glob("demo-report-*.md"))
+ json_files = list(demo_output.glob("demo-assessment-*.json"))
+
+ assert len(html_files) == 1, "HTML report should be generated"
+ assert len(md_files) == 1, "Markdown report should be generated"
+ assert len(json_files) == 1, "JSON assessment should be generated"
+
+
+def test_demo_command_javascript():
+ """Test running demo command with JavaScript language."""
+ runner = CliRunner()
+
+ with runner.isolated_filesystem():
+ result = runner.invoke(demo, ["--language", "javascript", "--no-browser"])
+
+ assert result.exit_code == 0, f"Command failed: {result.output}"
+ assert "sample javascript project created" in result.output.lower()
+ assert "Demo complete!" in result.output
+
+
+def test_demo_command_keep_repo():
+ """Test demo command with --keep-repo flag."""
+ runner = CliRunner()
+
+ with runner.isolated_filesystem():
+ result = runner.invoke(
+ demo, ["--language", "python", "--no-browser", "--keep-repo"]
+ )
+
+ assert result.exit_code == 0
+ assert "Demo repository saved at:" in result.output
diff --git a/uv.lock b/uv.lock
index 5a4251d..d9a6083 100644
--- a/uv.lock
+++ b/uv.lock
@@ -4,7 +4,7 @@ requires-python = ">=3.11"
[[package]]
name = "agentready"
-version = "1.0.0"
+version = "1.1.1"
source = { editable = "." }
dependencies = [
{ name = "click" },