# LLM Model Configurations models: # The "Master Switch" default: deepseek_v3 providers: # Provider 1: MegaLLM (OpenAI-Like) openai_like: api_key: ${MEGALLM_API_KEY} base_url: ${API_URL} models: deepseek_v3: id: "deepseek-ai/deepseek-v3.1" temperature: 0.7 max_tokens: 4096 supports_streaming: true # Provider 2: OpenRouter openrouter: api_key: ${OPENROUTER_API_KEY} base_url: ${OPENROUTER_BASE_URL} models: deepseek_r1: id: "deepseek/deepseek-r1-0528:free" temperature: 0.6 max_tokens: 4096 # Rate limiting rate_limits: requests_per_minute: 60 tokens_per_minute: 100000