|
| 1 | +#!/usr/bin/env python3 |
| 2 | +"""Analyze CPU profile using GitHub Models API for performance insights.""" |
| 3 | + |
| 4 | +import os |
| 5 | +import sys |
| 6 | +from pathlib import Path |
| 7 | + |
| 8 | + |
| 9 | +def analyze_profile_with_github_models(profile_path: str) -> str: |
| 10 | + """Analyze a CPU profile using GitHub Models API.""" |
| 11 | + try: |
| 12 | + import requests |
| 13 | + except ImportError: |
| 14 | + return "⚠️ requests package not available for LLM analysis" |
| 15 | + |
| 16 | + github_token = os.getenv("GITHUB_TOKEN") |
| 17 | + if not github_token: |
| 18 | + return "⚠️ GITHUB_TOKEN not set" |
| 19 | + |
| 20 | + # Read profile (limit to first 50KB to avoid token limits) |
| 21 | + profile_content = Path(profile_path).read_text(encoding="utf-8") |
| 22 | + if len(profile_content) > 50_000: |
| 23 | + profile_content = profile_content[:50_000] + "\n... (truncated)" |
| 24 | + |
| 25 | + prompt = f"""Analyze this CPU profiling data from py-spy and provide actionable performance insights. |
| 26 | +
|
| 27 | +The profile is in "collapsed stack trace" format where each line shows: |
| 28 | +- A semicolon-separated call stack (deepest function last) |
| 29 | +- Followed by a space and sample count |
| 30 | +
|
| 31 | +Focus on: |
| 32 | +1. **Hotspots**: Which functions consume the most CPU time? |
| 33 | +2. **Patterns**: Are there inefficiencies like excessive I/O, loops, or imports? |
| 34 | +3. **Recommendations**: Specific, actionable suggestions to improve performance |
| 35 | +
|
| 36 | +Profile data: |
| 37 | +``` |
| 38 | +{profile_content} |
| 39 | +``` |
| 40 | +
|
| 41 | +Provide a concise analysis (2-3 paragraphs max) with the most important findings.""" |
| 42 | + |
| 43 | + # Use GitHub Models API (available in GitHub Actions) |
| 44 | + # Reference: https://docs.github.com/en/github-models |
| 45 | + endpoint = "https://models.inference.ai.azure.com/chat/completions" |
| 46 | + |
| 47 | + headers = { |
| 48 | + "Content-Type": "application/json", |
| 49 | + "Authorization": f"Bearer {github_token}", |
| 50 | + } |
| 51 | + |
| 52 | + payload = { |
| 53 | + "model": "gpt-4o", # GitHub Models supports gpt-4o, gpt-4o-mini, etc. |
| 54 | + "messages": [ |
| 55 | + {"role": "user", "content": prompt} |
| 56 | + ], |
| 57 | + "max_tokens": 1024, |
| 58 | + "temperature": 0.7, |
| 59 | + } |
| 60 | + |
| 61 | + try: |
| 62 | + response = requests.post(endpoint, headers=headers, json=payload, timeout=30) |
| 63 | + response.raise_for_status() |
| 64 | + result = response.json() |
| 65 | + return result["choices"][0]["message"]["content"] |
| 66 | + except requests.exceptions.RequestException as e: |
| 67 | + return f"⚠️ Failed to analyze with GitHub Models: {e}" |
| 68 | + except (KeyError, IndexError) as e: |
| 69 | + return f"⚠️ Unexpected response format from GitHub Models: {e}" |
| 70 | + |
| 71 | + |
| 72 | +def main(): |
| 73 | + if len(sys.argv) != 2: |
| 74 | + print("Usage: analyze_profile.py <profile.txt>") |
| 75 | + sys.exit(1) |
| 76 | + |
| 77 | + profile_path = sys.argv[1] |
| 78 | + if not Path(profile_path).exists(): |
| 79 | + print(f"Error: {profile_path} not found") |
| 80 | + sys.exit(1) |
| 81 | + |
| 82 | + analysis = analyze_profile_with_github_models(profile_path) |
| 83 | + print(analysis) |
| 84 | + |
| 85 | + |
| 86 | +if __name__ == "__main__": |
| 87 | + main() |
0 commit comments