fixing thinking mode in rechunk and model renaming in /v1 endpoints

This commit is contained in:
Alpha Nerd 2025-09-17 11:39:51 +02:00
parent f4678018bf
commit d85d120cc8

View file

@ -1,22 +0,0 @@
# config.yaml
endpoints:
- http://192.168.0.50:11434
- http://192.168.0.51:11434
- http://192.168.0.52:11434
#- https://openrouter.ai/api/v1
#- https://api.openai.com/v1
#- https://generativelanguage.googleapis.com/v1beta/openai
# Maximum concurrent connections *per endpointmodel pair* (equals to OLLAMA_NUM_PARALLEL)
max_concurrent_connections: 2
# API keys for remote endpoints
# Set an environment variable like OPENAI_KEY
# Confirm endpoints are exactly as in endpoints block
api_keys:
"http://192.168.0.50:11434": "ollama"
"http://192.168.0.51:11434": "ollama"
"http://192.168.0.52:11434": "ollama"
#"https://openrouter.ai/api/v1": "${OPENROUTER_KEY}"
#"https://api.openai.com/v1": "${OPENAI_KEY}"
#"https://generativelanguage.googleapis.com/v1beta/openai": "${GEMINI_KEY}"