2024-01-05 20:23:14 +08:00
|
|
|
llm:
|
2024-04-22 19:24:13 +08:00
|
|
|
api_type: "openai" # or azure / ollama / groq etc.
|
2024-01-11 15:44:12 +08:00
|
|
|
base_url: "YOUR_BASE_URL"
|
|
|
|
|
api_key: "YOUR_API_KEY"
|
2024-04-22 19:18:03 +08:00
|
|
|
model: "gpt-4-turbo" # or gpt-3.5-turbo
|
2024-02-18 11:31:53 +08:00
|
|
|
proxy: "YOUR_PROXY" # for LLM API requests
|
2024-03-21 13:21:24 +08:00
|
|
|
# timeout: 600 # Optional. If set to 0, default value is 300.
|
2024-04-22 19:18:03 +08:00
|
|
|
# Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
|
|
|
|
|
pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's
|
2024-01-11 15:44:12 +08:00
|
|
|
|
2024-04-22 19:24:13 +08:00
|
|
|
|
2024-04-10 14:12:45 +08:00
|
|
|
# RAG Embedding.
|
|
|
|
|
# For backward compatibility, if the embedding is not set and the llm's api_type is either openai or azure, the llm's config will be used.
|
|
|
|
|
embedding:
|
2024-04-23 17:05:25 +08:00
|
|
|
api_type: "" # openai / azure / gemini / ollama etc. Check EmbeddingType for more options.
|
|
|
|
|
base_url: ""
|
|
|
|
|
api_key: ""
|
|
|
|
|
model: ""
|
|
|
|
|
api_version: ""
|
|
|
|
|
embed_batch_size: 100
|
2024-05-03 16:46:41 +08:00
|
|
|
dimensions: # output dimension of embedding model
|
2024-01-11 15:44:12 +08:00
|
|
|
|
2024-04-23 15:24:28 +08:00
|
|
|
# Role's custom configuration
|
|
|
|
|
roles:
|
|
|
|
|
- role: "ProductManager" # role's className or role's role_id
|
|
|
|
|
llm:
|
|
|
|
|
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
|
|
|
|
|
base_url: "YOUR_BASE_URL"
|
|
|
|
|
api_key: "YOUR_API_KEY"
|
|
|
|
|
proxy: "YOUR_PROXY" # for LLM API requests
|
|
|
|
|
model: "gpt-4-turbo-1106"
|
|
|
|
|
- role: "Architect"
|
|
|
|
|
llm:
|
|
|
|
|
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
|
|
|
|
|
base_url: "YOUR_BASE_URL"
|
|
|
|
|
api_key: "YOUR_API_KEY"
|
|
|
|
|
proxy: "YOUR_PROXY" # for LLM API requests
|
|
|
|
|
model: "gpt-35-turbo"
|
|
|
|
|
- role: "ProjectManager"
|
|
|
|
|
llm:
|
|
|
|
|
api_type: "azure"
|
|
|
|
|
base_url: "YOUR_BASE_URL"
|
|
|
|
|
api_key: "YOUR_API_KEY"
|
|
|
|
|
api_version: "YOUR_API_VERSION"
|
|
|
|
|
model: "gpt-4-1106"
|
|
|
|
|
- role: "Engineer"
|
|
|
|
|
llm:
|
|
|
|
|
api_type: "azure"
|
|
|
|
|
base_url: "YOUR_BASE_URL"
|
|
|
|
|
api_key: "YOUR_API_KEY"
|
|
|
|
|
api_version: "YOUR_API_VERSION"
|
|
|
|
|
model: "gpt-35-turbo-1106"
|
|
|
|
|
|
2024-03-06 00:44:19 +08:00
|
|
|
repair_llm_output: true # when the output is not a valid json, try to repair it
|
|
|
|
|
|
2024-02-18 11:31:53 +08:00
|
|
|
proxy: "YOUR_PROXY" # for tools like requests, playwright, selenium, etc.
|
2024-01-05 20:23:14 +08:00
|
|
|
|
|
|
|
|
search:
|
2024-01-11 15:44:12 +08:00
|
|
|
api_type: "google"
|
|
|
|
|
api_key: "YOUR_API_KEY"
|
|
|
|
|
cse_id: "YOUR_CSE_ID"
|
2024-01-05 20:23:14 +08:00
|
|
|
|
2024-02-02 13:55:15 +08:00
|
|
|
browser:
|
|
|
|
|
engine: "playwright" # playwright/selenium
|
|
|
|
|
browser_type: "chromium" # playwright: chromium/firefox/webkit; selenium: chrome/firefox/edge/ie
|
|
|
|
|
|
2024-01-05 20:23:14 +08:00
|
|
|
mermaid:
|
2024-01-11 15:44:12 +08:00
|
|
|
engine: "pyppeteer"
|
2024-05-24 16:03:57 +08:00
|
|
|
pyppeteer_path: "/Applications/Google Chrome.app"
|
2024-01-05 20:23:14 +08:00
|
|
|
|
|
|
|
|
redis:
|
|
|
|
|
host: "YOUR_HOST"
|
|
|
|
|
port: 32582
|
|
|
|
|
password: "YOUR_PASSWORD"
|
|
|
|
|
db: "0"
|
|
|
|
|
|
|
|
|
|
s3:
|
|
|
|
|
access_key: "YOUR_ACCESS_KEY"
|
2024-01-11 15:44:12 +08:00
|
|
|
secret_key: "YOUR_SECRET_KEY"
|
2024-01-05 20:23:14 +08:00
|
|
|
endpoint: "YOUR_ENDPOINT"
|
|
|
|
|
secure: false
|
|
|
|
|
bucket: "test"
|
|
|
|
|
|
2024-06-04 10:28:39 +08:00
|
|
|
exp_pool:
|
2024-08-09 16:05:48 +08:00
|
|
|
enabled: false
|
2024-07-08 10:09:36 +08:00
|
|
|
enable_read: false
|
|
|
|
|
enable_write: false
|
|
|
|
|
persist_path: .chroma_exp_data # The directory.
|
2024-08-19 14:07:13 +08:00
|
|
|
retrieval_type: bm25 # Default is `bm25`, can be set to `chroma` for vector storage, which requires setting up embedding.
|
2024-08-20 20:19:42 +08:00
|
|
|
use_llm_ranker: true # Default is `true`, it will use LLM Reranker to get better result.
|
2024-08-23 10:14:12 +08:00
|
|
|
collection_name: experience_pool # When `retrieval_type` is `chroma`, `collection_name` is the collection name in chromadb.
|
|
|
|
|
|
2024-09-12 11:20:40 +08:00
|
|
|
role_zero:
|
2024-09-12 11:31:12 +08:00
|
|
|
enable_longterm_memory: false # Whether to use long-term memory. Default is `false`.
|
2024-09-29 11:27:39 +08:00
|
|
|
longterm_memory_persist_path: .role_memory_data # The directory to save data.
|
2024-10-11 15:12:24 +08:00
|
|
|
memory_k: 200 # The capacity of short-term memory.
|
2024-10-11 15:37:33 +08:00
|
|
|
similarity_top_k: 5 # The number of long-term memories to retrieve.
|
2024-10-12 17:52:50 +08:00
|
|
|
use_llm_ranker: false # Whether to use LLM Reranker to get better result. Default is `false`.
|
2024-01-05 20:23:14 +08:00
|
|
|
|
2024-02-01 13:45:00 +08:00
|
|
|
azure_tts_subscription_key: "YOUR_SUBSCRIPTION_KEY"
|
|
|
|
|
azure_tts_region: "eastus"
|
2024-01-05 20:23:14 +08:00
|
|
|
|
2024-02-01 13:45:00 +08:00
|
|
|
iflytek_api_id: "YOUR_APP_ID"
|
|
|
|
|
iflytek_api_key: "YOUR_API_KEY"
|
|
|
|
|
iflytek_api_secret: "YOUR_API_SECRET"
|
2024-01-05 20:23:14 +08:00
|
|
|
|
2024-02-01 13:45:00 +08:00
|
|
|
metagpt_tti_url: "YOUR_MODEL_URL"
|
2024-07-22 17:10:29 +08:00
|
|
|
|
|
|
|
|
omniparse:
|
|
|
|
|
api_key: "YOUR_API_KEY"
|
|
|
|
|
base_url: "YOUR_BASE_URL"
|
2024-07-23 14:31:30 +08:00
|
|
|
|
2024-07-01 20:23:44 +08:00
|
|
|
models:
|
2024-07-16 14:43:23 +08:00
|
|
|
# "YOUR_MODEL_NAME_1 or YOUR_API_TYPE_1": # model: "gpt-4-turbo" # or gpt-3.5-turbo
|
2024-07-01 20:23:44 +08:00
|
|
|
# api_type: "openai" # or azure / ollama / groq etc.
|
|
|
|
|
# base_url: "YOUR_BASE_URL"
|
|
|
|
|
# api_key: "YOUR_API_KEY"
|
|
|
|
|
# proxy: "YOUR_PROXY" # for LLM API requests
|
|
|
|
|
# # timeout: 600 # Optional. If set to 0, default value is 300.
|
|
|
|
|
# # Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
|
|
|
|
|
# pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's
|
2024-07-16 14:43:23 +08:00
|
|
|
# "YOUR_MODEL_NAME_2 or YOUR_API_TYPE_2": # api_type: "openai" # or azure / ollama / groq etc.
|
2024-07-01 20:23:44 +08:00
|
|
|
# api_type: "openai" # or azure / ollama / groq etc.
|
|
|
|
|
# base_url: "YOUR_BASE_URL"
|
|
|
|
|
# api_key: "YOUR_API_KEY"
|
|
|
|
|
# proxy: "YOUR_PROXY" # for LLM API requests
|
|
|
|
|
# # timeout: 600 # Optional. If set to 0, default value is 300.
|
|
|
|
|
# # Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
|
2024-07-23 14:31:30 +08:00
|
|
|
# pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's
|