33"""
44
55import os
6- from dataclasses import dataclass , field
6+ from dataclasses import asdict , dataclass , field
77from pathlib import Path
88from typing import TYPE_CHECKING , Any , Callable , Dict , List , Optional , Union
99
@@ -41,7 +41,7 @@ class LLMModelConfig:
4141
4242 # Reproducibility
4343 random_seed : Optional [int ] = None
44-
44+
4545 # Reasoning parameters
4646 reasoning_effort : Optional [str ] = None
4747
@@ -75,7 +75,7 @@ class LLMConfig(LLMModelConfig):
7575 primary_model_weight : float = None
7676 secondary_model : str = None
7777 secondary_model_weight : float = None
78-
78+
7979 # Reasoning parameters (inherited from LLMModelConfig but can be overridden)
8080 reasoning_effort : Optional [str ] = None
8181
@@ -146,7 +146,7 @@ def rebuild_models(self) -> None:
146146 # Clear existing models lists
147147 self .models = []
148148 self .evaluator_models = []
149-
149+
150150 # Re-run model generation logic from __post_init__
151151 if self .primary_model :
152152 # Create primary model
@@ -205,6 +205,7 @@ class PromptConfig:
205205 template_variations : Dict [str , List [str ]] = field (default_factory = dict )
206206
207207 # Meta-prompting
208+ # Note: meta-prompting features not implemented
208209 use_meta_prompting : bool = False
209210 meta_prompt_weight : float = 0.1
210211
@@ -254,6 +255,7 @@ class DatabaseConfig:
254255 elite_selection_ratio : float = 0.1
255256 exploration_ratio : float = 0.2
256257 exploitation_ratio : float = 0.7
258+ # Note: diversity_metric fixed to "edit_distance"
257259 diversity_metric : str = "edit_distance" # Options: "edit_distance", "feature_based"
258260
259261 # Feature map dimensions for MAP-Elites
@@ -291,6 +293,7 @@ class DatabaseConfig:
291293 embedding_model : Optional [str ] = None
292294 similarity_threshold : float = 0.99
293295
296+
294297@dataclass
295298class EvaluatorConfig :
296299 """Configuration for program evaluation"""
@@ -300,6 +303,7 @@ class EvaluatorConfig:
300303 max_retries : int = 3
301304
302305 # Resource limits for evaluation
306+ # Note: resource limits not implemented
303307 memory_limit_mb : Optional [int ] = None
304308 cpu_limit : Optional [float ] = None
305309
@@ -309,6 +313,7 @@ class EvaluatorConfig:
309313
310314 # Parallel evaluation
311315 parallel_evaluations : int = 1
316+ # Note: distributed evaluation not implemented
312317 distributed : bool = False
313318
314319 # LLM-based feedback
@@ -323,7 +328,7 @@ class EvaluatorConfig:
323328@dataclass
324329class EvolutionTraceConfig :
325330 """Configuration for evolution trace logging"""
326-
331+
327332 enabled : bool = False
328333 format : str = "jsonl" # Options: "jsonl", "json", "hdf5"
329334 include_code : bool = False
@@ -362,6 +367,9 @@ class Config:
362367 convergence_threshold : float = 0.001
363368 early_stopping_metric : str = "combined_score"
364369
370+ # Parallel controller settings
371+ max_tasks_per_child : Optional [int ] = None
372+
365373 @classmethod
366374 def from_yaml (cls , path : Union [str , Path ]) -> "Config" :
367375 """Load configuration from a YAML file"""
@@ -377,7 +385,9 @@ def from_dict(cls, config_dict: Dict[str, Any]) -> "Config":
377385
378386 # Update top-level fields
379387 for key , value in config_dict .items ():
380- if key not in ["llm" , "prompt" , "database" , "evaluator" , "evolution_trace" ] and hasattr (config , key ):
388+ if key not in ["llm" , "prompt" , "database" , "evaluator" , "evolution_trace" ] and hasattr (
389+ config , key
390+ ):
381391 setattr (config , key , value )
382392
383393 # Update nested configs
@@ -406,87 +416,7 @@ def from_dict(cls, config_dict: Dict[str, Any]) -> "Config":
406416 return config
407417
408418 def to_dict (self ) -> Dict [str , Any ]:
409- """Convert configuration to a dictionary"""
410- return {
411- # General settings
412- "max_iterations" : self .max_iterations ,
413- "checkpoint_interval" : self .checkpoint_interval ,
414- "log_level" : self .log_level ,
415- "log_dir" : self .log_dir ,
416- "random_seed" : self .random_seed ,
417- # Component configurations
418- "llm" : {
419- "models" : self .llm .models ,
420- "evaluator_models" : self .llm .evaluator_models ,
421- "api_base" : self .llm .api_base ,
422- "temperature" : self .llm .temperature ,
423- "top_p" : self .llm .top_p ,
424- "max_tokens" : self .llm .max_tokens ,
425- "timeout" : self .llm .timeout ,
426- "retries" : self .llm .retries ,
427- "retry_delay" : self .llm .retry_delay ,
428- },
429- "prompt" : {
430- "template_dir" : self .prompt .template_dir ,
431- "system_message" : self .prompt .system_message ,
432- "evaluator_system_message" : self .prompt .evaluator_system_message ,
433- "num_top_programs" : self .prompt .num_top_programs ,
434- "num_diverse_programs" : self .prompt .num_diverse_programs ,
435- "use_template_stochasticity" : self .prompt .use_template_stochasticity ,
436- "template_variations" : self .prompt .template_variations ,
437- # Note: meta-prompting features not implemented
438- # "use_meta_prompting": self.prompt.use_meta_prompting,
439- # "meta_prompt_weight": self.prompt.meta_prompt_weight,
440- },
441- "database" : {
442- "db_path" : self .database .db_path ,
443- "in_memory" : self .database .in_memory ,
444- "population_size" : self .database .population_size ,
445- "archive_size" : self .database .archive_size ,
446- "num_islands" : self .database .num_islands ,
447- "elite_selection_ratio" : self .database .elite_selection_ratio ,
448- "exploration_ratio" : self .database .exploration_ratio ,
449- "exploitation_ratio" : self .database .exploitation_ratio ,
450- # Note: diversity_metric fixed to "edit_distance"
451- # "diversity_metric": self.database.diversity_metric,
452- "feature_dimensions" : self .database .feature_dimensions ,
453- "feature_bins" : self .database .feature_bins ,
454- "migration_interval" : self .database .migration_interval ,
455- "migration_rate" : self .database .migration_rate ,
456- "random_seed" : self .database .random_seed ,
457- "log_prompts" : self .database .log_prompts ,
458- },
459- "evaluator" : {
460- "timeout" : self .evaluator .timeout ,
461- "max_retries" : self .evaluator .max_retries ,
462- # Note: resource limits not implemented
463- # "memory_limit_mb": self.evaluator.memory_limit_mb,
464- # "cpu_limit": self.evaluator.cpu_limit,
465- "cascade_evaluation" : self .evaluator .cascade_evaluation ,
466- "cascade_thresholds" : self .evaluator .cascade_thresholds ,
467- "parallel_evaluations" : self .evaluator .parallel_evaluations ,
468- # Note: distributed evaluation not implemented
469- # "distributed": self.evaluator.distributed,
470- "use_llm_feedback" : self .evaluator .use_llm_feedback ,
471- "llm_feedback_weight" : self .evaluator .llm_feedback_weight ,
472- },
473- "evolution_trace" : {
474- "enabled" : self .evolution_trace .enabled ,
475- "format" : self .evolution_trace .format ,
476- "include_code" : self .evolution_trace .include_code ,
477- "include_prompts" : self .evolution_trace .include_prompts ,
478- "output_path" : self .evolution_trace .output_path ,
479- "buffer_size" : self .evolution_trace .buffer_size ,
480- "compress" : self .evolution_trace .compress ,
481- },
482- # Evolution settings
483- "diff_based_evolution" : self .diff_based_evolution ,
484- "max_code_length" : self .max_code_length ,
485- # Early stopping settings
486- "early_stopping_patience" : self .early_stopping_patience ,
487- "convergence_threshold" : self .convergence_threshold ,
488- "early_stopping_metric" : self .early_stopping_metric ,
489- }
419+ return asdict (self )
490420
491421 def to_yaml (self , path : Union [str , Path ]) -> None :
492422 """Save configuration to a YAML file"""
0 commit comments