diff --git a/config/config2.example.yaml b/config/config2.example.yaml index 7f4758acb..7cfd70347 100644 --- a/config/config2.example.yaml +++ b/config/config2.example.yaml @@ -17,10 +17,10 @@ llm: # For backward compatibility, if the embedding is not set and the llm's api_type is either openai or azure, the llm's config will be used. embedding: api_type: "" # openai / azure / gemini / ollama etc. Check EmbeddingType for more options. - base_url: "YOU_BASE_URL" - api_key: "YOU_API_KEY" - model: "YOU_MODEL" - api_version: "YOU_API_VERSION" + base_url: "" + api_key: "" + model: "" + api_version: "" embed_batch_size: 100 repair_llm_output: true # when the output is not a valid json, try to repair it diff --git a/metagpt/configs/embedding_config.py b/metagpt/configs/embedding_config.py index 545c2a9cc..20de47999 100644 --- a/metagpt/configs/embedding_config.py +++ b/metagpt/configs/embedding_config.py @@ -14,7 +14,25 @@ class EmbeddingType(Enum): class EmbeddingConfig(YamlModel): - """Config for Embedding.""" + """Config for Embedding. + + Examples: + --------- + api_type: "openai" + api_key: "YOU_API_KEY" + + api_type: "azure" + api_key: "YOU_API_KEY" + base_url: "YOU_BASE_URL" + api_version: "YOU_API_VERSION" + + api_type: "gemini" + api_key: "YOU_API_KEY" + + api_type: "ollama" + base_url: "YOU_BASE_URL" + model: "YOU_MODEL" + """ api_type: Optional[EmbeddingType] = None api_key: Optional[str] = None