Skip to content

Commit aa567a7

Browse files
committed
changes to make it work with Azure hosted openAI models
1 parent 5878857 commit aa567a7

File tree

2 files changed

+130
-2
lines changed

2 files changed

+130
-2
lines changed

config.yaml

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
# OpenEvolve Default Configuration
2+
# This file contains all available configuration options with sensible defaults
3+
# You can use this as a template for your own configuration
4+
5+
# General settings
6+
max_iterations: 1000 # Maximum number of evolution iterations
7+
checkpoint_interval: 50 # Save checkpoints every N iterations
8+
log_level: "INFO" # Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
9+
log_dir: null # Custom directory for logs (default: output_dir/logs)
10+
random_seed: null # Random seed for reproducibility (null = random)
11+
12+
# Evolution settings
13+
diff_based_evolution: true # Use diff-based evolution (true) or full rewrites (false)
14+
allow_full_rewrites: false # Allow occasional full rewrites even in diff-based mode
15+
max_code_length: 10000 # Maximum allowed code length in characters
16+
17+
# LLM configuration
18+
llm:
19+
models:
20+
- name: "o1"
21+
weight: 1.0
22+
23+
evaluator_models:
24+
- name: "o1"
25+
weight: 1.0
26+
27+
# Azure endpoint *root* – no path, no query string
28+
api_base: "https://<YOUR_BASE>.openai.azure.com/openai/deployments/<YOUR_DEPLOYMENT_eg_o1"
29+
30+
# Tell the SDK which API flavour and version to use
31+
# api_type: "azure"
32+
# api_version: "2025-01-01-preview"
33+
api_key: YOUR_API_KEY # Or provide it directly here
34+
temperature: 0.7
35+
top_p: 0.95
36+
max_tokens: 4096
37+
timeout: 60
38+
retries: 3
39+
retry_delay: 5
40+
41+
42+
# Prompt configuration
43+
prompt:
44+
template_dir: null # Custom directory for prompt templates
45+
system_message: "You are an expert coder helping to improve programs through evolution."
46+
evaluator_system_message: "You are an expert code reviewer."
47+
48+
# Number of examples to include in the prompt
49+
num_top_programs: 3 # Number of top-performing programs to include
50+
num_diverse_programs: 2 # Number of diverse programs to include
51+
52+
# Template stochasticity
53+
use_template_stochasticity: true # Use random variations in templates for diversity
54+
template_variations: # Different phrasings for parts of the template
55+
improvement_suggestion:
56+
- "Here's how we could improve this code:"
57+
- "I suggest the following improvements:"
58+
- "We can enhance this code by:"
59+
60+
# Note: meta-prompting features are not yet implemented
61+
62+
# Database configuration
63+
database:
64+
# General settings
65+
db_path: null # Path to persist database (null = in-memory only)
66+
in_memory: true # Keep database in memory for faster access
67+
68+
# Evolutionary parameters
69+
population_size: 1000 # Maximum number of programs to keep in memory
70+
archive_size: 100 # Size of elite archive
71+
num_islands: 5 # Number of islands for island model (separate populations)
72+
73+
# Island-based evolution parameters
74+
# Islands provide diversity by maintaining separate populations that evolve independently.
75+
# Migration periodically shares the best solutions between adjacent islands.
76+
migration_interval: 50 # Migrate between islands every N generations
77+
migration_rate: 0.1 # Fraction of top programs to migrate (0.1 = 10%)
78+
79+
# Selection parameters
80+
elite_selection_ratio: 0.1 # Ratio of elite programs to select
81+
exploration_ratio: 0.2 # Ratio of exploration vs exploitation
82+
exploitation_ratio: 0.7 # Ratio of exploitation vs random selection
83+
# Note: diversity_metric is fixed to "edit_distance" (feature_based not implemented)
84+
85+
# Feature map dimensions for MAP-Elites
86+
feature_dimensions: # Dimensions for MAP-Elites feature map
87+
- "score" # Performance score
88+
- "complexity" # Code complexity (length)
89+
feature_bins: 10 # Number of bins per dimension
90+
91+
# Evaluator configuration
92+
evaluator:
93+
# General settings
94+
timeout: 300 # Maximum evaluation time in seconds
95+
max_retries: 3 # Maximum number of retries for evaluation
96+
97+
# Note: resource limits (memory_limit_mb, cpu_limit) are not yet implemented
98+
99+
# Evaluation strategies
100+
cascade_evaluation: true # Use cascade evaluation to filter bad solutions early
101+
cascade_thresholds: # Thresholds for advancing to next evaluation stage
102+
- 0.5 # First stage threshold
103+
- 0.75 # Second stage threshold
104+
- 0.9 # Third stage threshold
105+
106+
# Parallel evaluation
107+
parallel_evaluations: 4 # Number of parallel evaluations
108+
# Note: distributed evaluation is not yet implemented
109+
110+
# LLM-based feedback (experimental)
111+
use_llm_feedback: false # Use LLM to evaluate code quality
112+
llm_feedback_weight: 0.1 # Weight for LLM feedback in final score

openevolve/llm/openai.py

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
from openevolve.config import LLMConfig
1313
from openevolve.llm.base import LLMInterface
1414

15+
from openai import AzureOpenAI
16+
1517
logger = logging.getLogger(__name__)
1618

1719

@@ -34,9 +36,13 @@ def __init__(
3436
self.api_key = model_cfg.api_key
3537

3638
# Set up API client
39+
print("##########################")
40+
3741
self.client = openai.OpenAI(
38-
api_key=self.api_key,
39-
base_url=self.api_base,
42+
api_key = self.api_key,
43+
base_url = self.api_base,
44+
default_headers={"api-key": self.api_key},
45+
default_query = {"api-version": "2025-01-01-preview"},
4046
)
4147

4248
logger.info(f"Initialized OpenAI LLM with model: {self.model}")
@@ -102,6 +108,16 @@ async def generate_with_context(
102108

103109
async def _call_api(self, params: Dict[str, Any]) -> str:
104110
"""Make the actual API call"""
111+
# ----- Azure o-series models need max_completion_tokens -----
112+
if "max_tokens" in params:
113+
params = params.copy() # don’t mutate caller’s dict
114+
params["extra_body"] = {"max_completion_tokens": params.pop("max_tokens")}
115+
# -----------------------------------------------------------
116+
# ⬇ NEW: drop sampling knobs that o-series refuses
117+
for unsupported in ("temperature", "top_p",
118+
"frequency_penalty", "presence_penalty"):
119+
params.pop(unsupported, None)
120+
# ------------------------------------------------------------------
105121
# Use asyncio to run the blocking API call in a thread pool
106122
loop = asyncio.get_event_loop()
107123
response = await loop.run_in_executor(

0 commit comments

Comments
 (0)