File size: 1,356 Bytes
cfcbbc8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
# Configuration file for llm4hep supervisor-coder framework
#
# This file controls the LLM models and parameters used for testing.
# Copy this file to config.yml and customize for your experiments.
# Supervisor model - analyzes tasks and provides instructions to the coder
supervisor: lbl/cborg-deepthought:latest
# Coder model - generates Python code based on supervisor instructions
coder: lbl/cborg-deepthought:latest
# Temperature for LLM generation (0.0 = deterministic, 1.0 = creative)
temperature: 0.0
# Optional: Maximum iterations per step (default: 10)
# Uncomment to limit supervisor-coder refinement loops
# max_iterations: 3
# Optional: Custom output directory
# Uncomment to specify where results should be saved
# out_dir: results/my_experiment
# Model Options:
# See CBORG_MODEL_MAPPINGS.md for available models including:
#
# Anthropic Claude:
# - anthropic/claude-sonnet:latest
# - anthropic/claude-opus:latest
# - anthropic/claude-haiku:latest
#
# OpenAI:
# - openai/gpt-5-mini
# - openai/gpt-5
# - openai/o3
# - openai/o3-mini
# - openai/o4-mini
#
# Google Gemini:
# - google/gemini:latest
# - google/gemini-flash
#
# xAI Grok:
# - xai/grok:latest
# - xai/grok-mini
#
# AWS/Meta Llama:
# - aws/llama-4-maverick
# - aws/llama-4-scout
#
# Other:
# - deepseek-r1
# - gcp/qwen-3
# - gpt-oss-120b
|