Add the ability to use LLM Providers from the Arch config (#112)

Signed-off-by: José Ulises Niño Rivera <junr03@users.noreply.github.com>
This commit is contained in:
José Ulises Niño Rivera 2024-10-03 10:57:01 -07:00 committed by GitHub
parent 1b57a49c9d
commit 8ea917aae5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 295 additions and 210 deletions

View file

@ -7,17 +7,32 @@ ENVOY_CONFIG_TEMPLATE_FILE = os.getenv('ENVOY_CONFIG_TEMPLATE_FILE', 'envoy.temp
ARCH_CONFIG_FILE = os.getenv('ARCH_CONFIG_FILE', '/config/arch_config.yaml')
ENVOY_CONFIG_FILE_RENDERED = os.getenv('ENVOY_CONFIG_FILE_RENDERED', '/etc/envoy/envoy.yaml')
ARCH_CONFIG_SCHEMA_FILE = os.getenv('ARCH_CONFIG_SCHEMA_FILE', 'arch_config_schema.yaml')
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY', False)
MISTRAL_API_KEY = os.getenv('MISTRAL_API_KEY', False)
def add_secret_key_to_llm_providers(config_yaml) :
llm_providers = []
for llm_provider in config_yaml.get("llm_providers", []):
if llm_provider['access_key'] == "$MISTRAL_ACCESS_KEY":
llm_provider['access_key'] = MISTRAL_API_KEY
elif llm_provider['access_key'] == "$OPENAI_ACCESS_KEY":
llm_provider['access_key'] = OPENAI_API_KEY
else:
llm_provider.pop('access_key')
llm_providers.append(llm_provider)
config_yaml["llm_providers"] = llm_providers
return config_yaml
env = Environment(loader=FileSystemLoader('./'))
template = env.get_template('envoy.template.yaml')
with open(ARCH_CONFIG_FILE, 'r') as file:
katanemo_config = file.read()
arch_config_string = file.read()
with open(ARCH_CONFIG_SCHEMA_FILE, 'r') as file:
arch_config_schema = file.read()
config_yaml = yaml.safe_load(katanemo_config)
config_yaml = yaml.safe_load(arch_config_string)
config_schema_yaml = yaml.safe_load(arch_config_schema)
try:
@ -54,9 +69,16 @@ for name, endpoint_details in endpoints.items():
print("updated clusters", inferred_clusters)
config_yaml = add_secret_key_to_llm_providers(config_yaml)
arch_llm_providers = config_yaml["llm_providers"]
arch_config_string = yaml.dump(config_yaml)
print("llm_providers:", arch_llm_providers)
data = {
'katanemo_config': katanemo_config,
'arch_clusters': inferred_clusters
'arch_config': arch_config_string,
'arch_clusters': inferred_clusters,
'arch_llm_providers': arch_llm_providers
}
rendered = template.render(data)