mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-10 00:02:38 +02:00
feat: merge geekan:main
This commit is contained in:
commit
aa5c6f7a1a
49 changed files with 1974 additions and 893 deletions
|
|
@ -9,7 +9,6 @@ import json
|
|||
from abc import abstractmethod
|
||||
from typing import Optional
|
||||
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.base_chatbot import BaseChatbot
|
||||
|
||||
|
||||
|
|
@ -55,7 +54,6 @@ class BaseGPTAPI(BaseChatbot):
|
|||
message.extend(format_msgs)
|
||||
message.append(self._user_msg(msg))
|
||||
rsp = await self.acompletion_text(message, stream=stream, generator=generator, timeout=timeout)
|
||||
logger.debug(message)
|
||||
# logger.debug(rsp)
|
||||
return rsp
|
||||
|
||||
|
|
|
|||
23
metagpt/provider/fireworks_api.py
Normal file
23
metagpt/provider/fireworks_api.py
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Desc : fireworks.ai's api
|
||||
|
||||
import openai
|
||||
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.provider.openai_api import CostManager, OpenAIGPTAPI, RateLimiter
|
||||
|
||||
|
||||
class FireWorksGPTAPI(OpenAIGPTAPI):
|
||||
def __init__(self):
|
||||
self.__init_fireworks(CONFIG)
|
||||
self.llm = openai
|
||||
self.model = CONFIG.fireworks_api_model
|
||||
self.auto_max_tokens = False
|
||||
self._cost_manager = CostManager()
|
||||
RateLimiter.__init__(self, rpm=self.rpm)
|
||||
|
||||
def __init_fireworks(self, config: "Config"):
|
||||
openai.api_key = config.fireworks_api_key
|
||||
openai.api_base = config.fireworks_api_base
|
||||
self.rpm = int(config.get("RPM", 10))
|
||||
46
metagpt/provider/open_llm_api.py
Normal file
46
metagpt/provider/open_llm_api.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Desc : self-host open llm model with openai-compatible interface
|
||||
|
||||
import openai
|
||||
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.openai_api import CostManager, OpenAIGPTAPI, RateLimiter
|
||||
|
||||
|
||||
class OpenLLMCostManager(CostManager):
|
||||
"""open llm model is self-host, it's free and without cost"""
|
||||
|
||||
def update_cost(self, prompt_tokens, completion_tokens, model):
|
||||
"""
|
||||
Update the total cost, prompt tokens, and completion tokens.
|
||||
|
||||
Args:
|
||||
prompt_tokens (int): The number of tokens used in the prompt.
|
||||
completion_tokens (int): The number of tokens used in the completion.
|
||||
model (str): The model used for the API call.
|
||||
"""
|
||||
self.total_prompt_tokens += prompt_tokens
|
||||
self.total_completion_tokens += completion_tokens
|
||||
|
||||
logger.info(
|
||||
f"Max budget: ${CONFIG.max_budget:.3f} | "
|
||||
f"prompt_tokens: {prompt_tokens}, completion_tokens: {completion_tokens}"
|
||||
)
|
||||
CONFIG.total_cost = self.total_cost
|
||||
|
||||
|
||||
class OpenLLMGPTAPI(OpenAIGPTAPI):
|
||||
def __init__(self):
|
||||
self.__init_openllm(CONFIG)
|
||||
self.llm = openai
|
||||
self.model = CONFIG.open_llm_api_model
|
||||
self.auto_max_tokens = False
|
||||
self._cost_manager = OpenLLMCostManager()
|
||||
RateLimiter.__init__(self, rpm=self.rpm)
|
||||
|
||||
def __init_openllm(self, config: "Config"):
|
||||
openai.api_key = "sk-xx" # self-host api doesn't need api-key, use the default value
|
||||
openai.api_base = config.open_llm_api_base
|
||||
self.rpm = int(config.get("RPM", 10))
|
||||
3
metagpt/provider/postprecess/__init__.py
Normal file
3
metagpt/provider/postprecess/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Desc :
|
||||
70
metagpt/provider/postprecess/base_postprecess_plugin.py
Normal file
70
metagpt/provider/postprecess/base_postprecess_plugin.py
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Desc : base llm postprocess plugin to do the operations like repair the raw llm output
|
||||
|
||||
from typing import Union
|
||||
|
||||
from metagpt.logs import logger
|
||||
from metagpt.utils.repair_llm_raw_output import (
|
||||
RepairType,
|
||||
extract_content_from_output,
|
||||
repair_llm_raw_output,
|
||||
retry_parse_json_text,
|
||||
)
|
||||
|
||||
|
||||
class BasePostPrecessPlugin(object):
|
||||
model = None # the plugin of the `model`, use to judge in `llm_postprecess`
|
||||
|
||||
def run_repair_llm_output(self, output: str, schema: dict, req_key: str = "[/CONTENT]") -> Union[dict, list]:
|
||||
"""
|
||||
repair steps
|
||||
1. repair the case sensitive problem using the schema's fields
|
||||
2. extract the content from the req_key pair( xx[REQ_KEY]xxx[/REQ_KEY]xx )
|
||||
3. repair the invalid json text in the content
|
||||
4. parse the json text and repair it according to the exception with retry loop
|
||||
"""
|
||||
output_class_fields = list(schema["properties"].keys()) # Custom ActionOutput's fields
|
||||
|
||||
content = self.run_repair_llm_raw_output(output, req_keys=output_class_fields + [req_key])
|
||||
content = self.run_extract_content_from_output(content, right_key=req_key)
|
||||
# # req_keys mocked
|
||||
content = self.run_repair_llm_raw_output(content, req_keys=[None], repair_type=RepairType.JSON)
|
||||
parsed_data = self.run_retry_parse_json_text(content)
|
||||
|
||||
return parsed_data
|
||||
|
||||
def run_repair_llm_raw_output(self, content: str, req_keys: list[str], repair_type: str = None) -> str:
|
||||
"""inherited class can re-implement the function"""
|
||||
return repair_llm_raw_output(content, req_keys=req_keys, repair_type=repair_type)
|
||||
|
||||
def run_extract_content_from_output(self, content: str, right_key: str) -> str:
|
||||
"""inherited class can re-implement the function"""
|
||||
return extract_content_from_output(content, right_key=right_key)
|
||||
|
||||
def run_retry_parse_json_text(self, content: str) -> Union[dict, list]:
|
||||
"""inherited class can re-implement the function"""
|
||||
logger.info(f"extracted json CONTENT from output:\n{content}")
|
||||
parsed_data = retry_parse_json_text(output=content) # should use output=content
|
||||
return parsed_data
|
||||
|
||||
def run(self, output: str, schema: dict, req_key: str = "[/CONTENT]") -> Union[dict, list]:
|
||||
"""
|
||||
this is used for prompt with a json-format output requirement and outer pair key, like
|
||||
[REQ_KEY]
|
||||
{
|
||||
"Key": "value"
|
||||
}
|
||||
[/REQ_KEY]
|
||||
|
||||
Args
|
||||
outer (str): llm raw output
|
||||
schema: output json schema
|
||||
req_key: outer pair right key, usually in `[/REQ_KEY]` format
|
||||
"""
|
||||
assert len(schema.get("properties")) > 0
|
||||
assert "/" in req_key
|
||||
|
||||
# current, postprocess only deal the repair_llm_raw_output
|
||||
new_output = self.run_repair_llm_output(output=output, schema=schema, req_key=req_key)
|
||||
return new_output
|
||||
20
metagpt/provider/postprecess/llm_output_postprecess.py
Normal file
20
metagpt/provider/postprecess/llm_output_postprecess.py
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Desc : the entry of choosing which PostProcessPlugin to deal particular LLM model's output
|
||||
|
||||
from typing import Union
|
||||
|
||||
from metagpt.provider.postprecess.base_postprecess_plugin import BasePostPrecessPlugin
|
||||
|
||||
|
||||
def llm_output_postprecess(
|
||||
output: str, schema: dict, req_key: str = "[/CONTENT]", model_name: str = None
|
||||
) -> Union[dict, str]:
|
||||
"""
|
||||
default use BasePostPrecessPlugin if there is not matched plugin.
|
||||
"""
|
||||
# TODO choose different model's plugin according to the model_name
|
||||
postprecess_plugin = BasePostPrecessPlugin()
|
||||
|
||||
result = postprecess_plugin.run(output=output, schema=schema, req_key=req_key)
|
||||
return result
|
||||
Loading…
Add table
Add a link
Reference in a new issue