Coverage for core / autogen_config.py: 0.0%
13 statements
« prev ^ index » next coverage.py v7.14.0, created at 2026-05-12 04:49 +0000
« prev ^ index » next coverage.py v7.14.0, created at 2026-05-12 04:49 +0000
1"""Autogen LLM config_list — single source of truth.
3Used by both create_recipe.py and reuse_recipe.py. Resolves the LLM
4endpoint based on node tier and environment configuration:
5 - regional/central with HEVOLVE_LLM_ENDPOINT_URL → cloud endpoint
6 - flat with HEVOLVE_LLM_API_KEY → wizard-configured cloud
7 - flat without cloud → local llama.cpp on get_local_llm_url()
8"""
9import os
12def get_autogen_config_list() -> list:
13 """Build the autogen config_list based on environment."""
14 from core.port_registry import get_local_llm_url
16 _node_tier = os.environ.get('HEVOLVE_NODE_TIER', 'flat')
17 _active_cloud = os.environ.get('HEVOLVE_ACTIVE_CLOUD_PROVIDER', '')
19 if _node_tier in ('regional', 'central') and os.environ.get('HEVOLVE_LLM_ENDPOINT_URL'):
20 return [{
21 "model": os.environ.get('HEVOLVE_LLM_MODEL_NAME', 'gpt-4.1-mini'),
22 "api_key": os.environ.get('HEVOLVE_LLM_API_KEY', 'dummy'),
23 "base_url": os.environ['HEVOLVE_LLM_ENDPOINT_URL'],
24 "price": [0.0025, 0.01]
25 }]
27 if _active_cloud and os.environ.get('HEVOLVE_LLM_API_KEY'):
28 _cloud_cfg = {
29 "model": os.environ.get('HEVOLVE_LLM_MODEL_NAME', 'gpt-4o-mini'),
30 "api_key": os.environ['HEVOLVE_LLM_API_KEY'],
31 "price": [0.0025, 0.01],
32 }
33 if os.environ.get('HEVOLVE_LLM_ENDPOINT_URL'):
34 _cloud_cfg["base_url"] = os.environ['HEVOLVE_LLM_ENDPOINT_URL']
35 return [_cloud_cfg]
37 return [{
38 "model": os.environ.get('HEVOLVE_LOCAL_LLM_MODEL', 'local'),
39 "api_key": 'dummy',
40 "base_url": get_local_llm_url(),
41 "price": [0, 0],
42 }]