feat: merge mgx_ops

This commit is contained in:
莘权 马 2024-08-15 21:52:09 +08:00
commit 547bddd79a
8 changed files with 42 additions and 17 deletions

View file

@ -46,6 +46,7 @@ async def add_exp(req: str, resp: str, tag: str, metric: Metric = None):
metric=metric or Metric(score=Score(val=10, reason="Manual")),
)
exp_manager = get_exp_manager()
exp_manager.config.exp_pool.enabled = True
exp_manager.config.exp_pool.enable_write = True
exp_manager.create_exp(exp)
logger.info(f"New experience created for the request `{req[:10]}`.")
@ -59,8 +60,10 @@ async def add_exps(exps: list, tag: str):
tag: A tag for categorizing the experiences.
"""
tasks = [add_exp(req=json.dumps(exp["req"]), resp=exp["resp"], tag=tag) for exp in exps]
tasks = [
add_exp(req=exp["req"] if isinstance(exp["req"], str) else json.dumps(exp["req"]), resp=exp["resp"], tag=tag)
for exp in exps
]
await asyncio.gather(*tasks)

View file

@ -15,6 +15,7 @@ async def main():
# Define the simple request and response
req = "Simple req"
resp = "Simple resp"
exp_manager = get_exp_manager()
# Add the new experience
exp = Experience(req=req, resp=resp, entry_type=EntryType.MANUAL)

View file

@ -35,8 +35,10 @@ def exp_cache(
Note:
1. This can be applied to both synchronous and asynchronous functions.
2. The function must have a `req` parameter, and it must be provided as a keyword argument.
3. If `config.exp_pool.enable_read` is False, the decorator will just directly execute the function.
3. If `config.exp_pool.enabled` is False, the decorator will just directly execute the function.
4. If `config.exp_pool.enable_write` is False, the decorator will skip evaluating and saving the experience.
5. If `config.exp_pool.enable_read` is False, the decorator will skip reading from the experience pool.
Args:
_func: Just to make the decorator more flexible, for example, it can be used directly with @exp_cache by default, without the need for @exp_cache().

View file

@ -8,7 +8,7 @@ EXTRA_INSTRUCTION = """
- For information searching requirement, you should use the Browser tool instead of web scraping.
- When no link is provided, you should use the Browser tool to search for the information.
7. When you are making plan. It is highly recommend to plan and append all the tasks in first response once time, except for 7.1.
7.1. When the requirement is given with a file, read the file first through either Editor.read (write code instead for csv or excel) WITHOUT a plan. After reading the file content, use RoleZero.reply_to_human if the requirement can be answered straightaway, otherwise, make a plan if further calculation is needed.
7.1. When the requirement is inquiring about a pdf, docx, md, or txt document, read the document first through either Editor.read WITHOUT a plan. After reading the document, use RoleZero.reply_to_human if the requirement can be answered straightaway, otherwise, make a plan if further calculation is needed.
8. Don't finish_current_task multiple times for the same task.
9. Finish current task timely, such as when the code is written and executed successfully.
10. When using the command 'end', add the command 'finish_current_task' before it.

View file

@ -29,7 +29,7 @@ class RAGEmbeddingFactory(GenericFactory):
LLMType.AZURE: self._create_azure,
}
super().__init__(creators)
self.config = config if self.config else Config.default()
self.config = config if config else Config.default()
def get_rag_embedding(self, key: EmbeddingType = None) -> BaseEmbedding:
"""Key is EmbeddingType."""

View file

@ -10,7 +10,7 @@ from llama_index.core.llms import (
LLMMetadata,
)
from llama_index.core.llms.callbacks import llm_completion_callback
from pydantic import Field, model_validator
from pydantic import Field
from metagpt.config2 import Config
from metagpt.llm import LLM
@ -30,19 +30,30 @@ class RAGLLM(CustomLLM):
num_output: int = -1
model_name: str = ""
@model_validator(mode="after")
def update_from_config(self):
def __init__(
self,
model_infer: BaseLLM,
context_window: int = -1,
num_output: int = -1,
model_name: str = "",
*args,
**kwargs
):
super().__init__(*args, **kwargs)
config = Config.default()
if self.context_window < 0:
self.context_window = TOKEN_MAX.get(config.llm.model, DEFAULT_CONTEXT_WINDOW)
if context_window < 0:
context_window = TOKEN_MAX.get(config.llm.model, DEFAULT_CONTEXT_WINDOW)
if self.num_output < 0:
self.num_output = config.llm.max_token
if num_output < 0:
num_output = config.llm.max_token
if not self.model_name:
self.model_name = config.llm.model
if not model_name:
model_name = config.llm.model
return self
self.model_infer = model_infer
self.context_window = context_window
self.num_output = num_output
self.model_name = model_name
@property
def metadata(self) -> LLMMetadata:

View file

@ -288,7 +288,9 @@ class RoleZero(Role):
# routing
memory = self.get_memories(k=4) # FIXME: A magic number for two rounds of Q&A
context = self.llm.format_msg(memory + [UserMessage(content=QUICK_THINK_PROMPT)])
intent_result = await self.llm.aask(context, system_msgs=[self.format_quick_system_prompt()])
async with ThoughtReporter() as reporter:
await reporter.async_report({"type": "classify"})
intent_result = await self.llm.aask(context, system_msgs=[self.format_quick_system_prompt()])
if "QUICK" in intent_result or "AMBIGUOUS" in intent_result: # llm call with the original context
async with ThoughtReporter(enable_llm_stream=True) as reporter:

View file

@ -155,7 +155,10 @@ class TestExpCache:
@pytest.fixture
def mock_config(self, mocker):
return mocker.patch("metagpt.exp_pool.decorator.config")
config = Config.default().model_copy(deep=True)
default = mocker.patch("metagpt.config2.Config.default")
default.return_value = config
return config
@pytest.mark.asyncio
async def test_exp_cache_disabled(self, mock_config, mock_exp_manager):
@ -171,7 +174,9 @@ class TestExpCache:
@pytest.mark.asyncio
async def test_exp_cache_enabled_no_perfect_exp(self, mock_config, mock_exp_manager, mock_scorer):
mock_config.exp_pool.enabled = True
mock_config.exp_pool.enable_read = True
mock_config.exp_pool.enable_write = True
mock_exp_manager.query_exps.return_value = []
@exp_cache(manager=mock_exp_manager, scorer=mock_scorer)
@ -185,6 +190,7 @@ class TestExpCache:
@pytest.mark.asyncio
async def test_exp_cache_enabled_with_perfect_exp(self, mock_config, mock_exp_manager, mock_perfect_judge):
mock_config.exp_pool.enabled = True
mock_config.exp_pool.enable_read = True
perfect_exp = Experience(req="test", resp="perfect_result")
mock_exp_manager.query_exps.return_value = [perfect_exp]