mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-01 03:46:23 +02:00
remove global config
This commit is contained in:
parent
6e0990f251
commit
2968c181c1
39 changed files with 193 additions and 123 deletions
|
|
@ -7,10 +7,11 @@
|
|||
"""
|
||||
from llama_index.embeddings.openai import OpenAIEmbedding
|
||||
|
||||
from metagpt.config2 import config
|
||||
from metagpt.config2 import Config
|
||||
|
||||
|
||||
def get_embedding() -> OpenAIEmbedding:
|
||||
config = Config.default()
|
||||
llm = config.get_openai_llm()
|
||||
if llm is None:
|
||||
raise ValueError("To use OpenAIEmbedding, please ensure that config.llm.api_type is correctly set to 'openai'.")
|
||||
|
|
|
|||
|
|
@ -13,10 +13,11 @@ from semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion impo
|
|||
OpenAIChatCompletion,
|
||||
)
|
||||
|
||||
from metagpt.config2 import config
|
||||
from metagpt.config2 import Config
|
||||
|
||||
|
||||
def make_sk_kernel():
|
||||
config = Config.default()
|
||||
kernel = sk.Kernel()
|
||||
if llm := config.get_azure_llm():
|
||||
kernel.add_chat_service(
|
||||
|
|
|
|||
|
|
@ -9,12 +9,14 @@ import asyncio
|
|||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from metagpt.config2 import config
|
||||
from metagpt.config2 import Config
|
||||
from metagpt.logs import logger
|
||||
from metagpt.utils.common import awrite, check_cmd_exists
|
||||
|
||||
|
||||
async def mermaid_to_file(engine, mermaid_code, output_file_without_suffix, width=2048, height=2048) -> int:
|
||||
async def mermaid_to_file(
|
||||
engine, mermaid_code, output_file_without_suffix, width=2048, height=2048, config=None
|
||||
) -> int:
|
||||
"""suffix: png/svg/pdf
|
||||
|
||||
:param mermaid_code: mermaid code
|
||||
|
|
@ -24,6 +26,7 @@ async def mermaid_to_file(engine, mermaid_code, output_file_without_suffix, widt
|
|||
:return: 0 if succeed, -1 if failed
|
||||
"""
|
||||
# Write the Mermaid code to a temporary file
|
||||
config = config if config else Config.default()
|
||||
dir_name = os.path.dirname(output_file_without_suffix)
|
||||
if dir_name and not os.path.exists(dir_name):
|
||||
os.makedirs(dir_name)
|
||||
|
|
|
|||
|
|
@ -10,11 +10,11 @@ from urllib.parse import urljoin
|
|||
|
||||
from pyppeteer import launch
|
||||
|
||||
from metagpt.config2 import config
|
||||
from metagpt.config2 import Config
|
||||
from metagpt.logs import logger
|
||||
|
||||
|
||||
async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048, height=2048) -> int:
|
||||
async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048, height=2048, config=None) -> int:
|
||||
"""
|
||||
Converts the given Mermaid code to various output formats and saves them to files.
|
||||
|
||||
|
|
@ -27,6 +27,7 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048,
|
|||
Returns:
|
||||
int: Returns 1 if the conversion and saving were successful, -1 otherwise.
|
||||
"""
|
||||
config = config if config else Config.default()
|
||||
suffixes = ["png", "svg", "pdf"]
|
||||
__dirname = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
|
|
|||
|
|
@ -4,12 +4,12 @@
|
|||
|
||||
import copy
|
||||
from enum import Enum
|
||||
from typing import Callable, Union
|
||||
from typing import Callable, Optional, Union
|
||||
|
||||
import regex as re
|
||||
from tenacity import RetryCallState, retry, stop_after_attempt, wait_fixed
|
||||
|
||||
from metagpt.config2 import config
|
||||
from metagpt.config2 import Config
|
||||
from metagpt.logs import logger
|
||||
from metagpt.utils.custom_decoder import CustomDecoder
|
||||
|
||||
|
|
@ -154,7 +154,9 @@ def _repair_llm_raw_output(output: str, req_key: str, repair_type: RepairType =
|
|||
return output
|
||||
|
||||
|
||||
def repair_llm_raw_output(output: str, req_keys: list[str], repair_type: RepairType = None) -> str:
|
||||
def repair_llm_raw_output(
|
||||
output: str, req_keys: list[str], repair_type: RepairType = None, config: Optional[Config] = None
|
||||
) -> str:
|
||||
"""
|
||||
in open-source llm model, it usually can't follow the instruction well, the output may be incomplete,
|
||||
so here we try to repair it and use all repair methods by default.
|
||||
|
|
@ -169,6 +171,7 @@ def repair_llm_raw_output(output: str, req_keys: list[str], repair_type: RepairT
|
|||
target: { xxx }
|
||||
output: { xxx }]
|
||||
"""
|
||||
config = config if config else Config.default()
|
||||
if not config.repair_llm_output:
|
||||
return output
|
||||
|
||||
|
|
@ -256,6 +259,7 @@ def run_after_exp_and_passon_next_retry(logger: "loguru.Logger") -> Callable[["R
|
|||
"next_action":"None"
|
||||
}
|
||||
"""
|
||||
config = Config.default()
|
||||
if retry_state.outcome.failed:
|
||||
if retry_state.args:
|
||||
# # can't be used as args=retry_state.args
|
||||
|
|
@ -276,8 +280,12 @@ def run_after_exp_and_passon_next_retry(logger: "loguru.Logger") -> Callable[["R
|
|||
return run_and_passon
|
||||
|
||||
|
||||
def repair_stop_after_attempt(retry_state):
|
||||
return stop_after_attempt(3 if Config.default().repair_llm_output else 0)(retry_state)
|
||||
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3 if config.repair_llm_output else 0),
|
||||
stop=repair_stop_after_attempt,
|
||||
wait=wait_fixed(1),
|
||||
after=run_after_exp_and_passon_next_retry(logger),
|
||||
)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue