rm unnecessary function call

This commit is contained in:
yzlin 2024-02-27 17:59:50 +08:00
parent 219d361ca6
commit a2b85641c3
9 changed files with 54 additions and 80 deletions

View file

@ -1,9 +1,10 @@
from __future__ import annotations
import json
from metagpt.actions import Action
from metagpt.logs import logger
from metagpt.schema import Message
from metagpt.utils.common import create_func_call_config
from metagpt.utils.common import CodeParser
DEBUG_REFLECTION_EXAMPLE = '''
Example 1:
@ -48,28 +49,15 @@ Here is an example for you.
{runtime_result}
Analysis the error step by step, provide me improve method and code. Remember to follow [context] requirement. Don't forget write code for steps behind the error step.
[reflection on previous impl]:
xxx
"""
CODE_REFLECTION = {
"name": "execute_reflection_code",
"description": "Execute reflection code.",
"parameters": {
"type": "object",
"properties": {
"reflection": {
"type": "string",
"description": "Reflection on previous impl.",
},
"improved_impl": {
"type": "string",
"description": "Refined code after reflection.",
},
},
"required": ["reflection", "improved_impl"],
},
}
Output a json following the format:
```json
{{
"reflection": str = "Reflection on previous implementation",
"improved_impl": str = "Refined code after reflection.",
}}
```
"""
class DebugCode(Action):
@ -91,7 +79,6 @@ class DebugCode(Action):
str: The improved implementation based on the debugging process.
"""
info = []
reflection_prompt = REFLECTION_PROMPT.format(
debug_example=DEBUG_REFLECTION_EXAMPLE,
context=context,
@ -99,11 +86,8 @@ class DebugCode(Action):
runtime_result=runtime_result,
)
system_prompt = "You are an AI Python assistant. You will be given your previous implementation code of a task, runtime error results, and a hint to change the implementation appropriately. Write your full implementation "
info.append(Message(role="system", content=system_prompt))
info.append(Message(role="user", content=reflection_prompt))
tool_config = create_func_call_config(CODE_REFLECTION)
reflection = await self.llm.aask_code(messages=info, **tool_config)
logger.info(f"reflection is {reflection}")
rsp = await self._aask(reflection_prompt, system_msgs=[system_prompt])
reflection = json.loads(CodeParser.parse_code(block=None, text=rsp))
return {"code": reflection["improved_impl"]}

View file

@ -41,7 +41,7 @@ class WriteCodeWithToolsML(WriteCodeWithTools):
examples=USE_TOOLS_EXAMPLE if tool_schemas else USE_NO_TOOLS_EXAMPLE,
)
rsp = await self.llm.aask_code(prompt, language="python")
rsp = await self.llm.aask_code(prompt)
# Extra output to be used for potential debugging
context = [Message(content=prompt, role="user")]
@ -55,5 +55,5 @@ class UpdateDataColumns(Action):
code_context = [remove_comments(task.code) for task in finished_tasks]
code_context = "\n\n".join(code_context)
prompt = UPDATE_DATA_COLUMNS.format(history_code=code_context)
rsp = await self.llm.aask_code(prompt, language="python")
rsp = await self.llm.aask_code(prompt)
return rsp

View file

@ -6,19 +6,19 @@
"""
from __future__ import annotations
import json
from typing import Tuple
from metagpt.actions import Action
from metagpt.logs import logger
from metagpt.prompts.mi.write_analysis_code import (
SELECT_FUNCTION_TOOLS,
TOOL_RECOMMENDATION_PROMPT,
TOOL_USAGE_PROMPT,
)
from metagpt.schema import Message, Plan, SystemMessage
from metagpt.tools import TOOL_REGISTRY
from metagpt.tools.tool_registry import validate_tool_names
from metagpt.utils.common import create_func_call_config
from metagpt.utils.common import CodeParser
class WriteCodeWithTools(Action):
@ -69,9 +69,9 @@ class WriteCodeWithTools(Action):
current_task=task,
available_tools=available_tools,
)
tool_config = create_func_call_config(SELECT_FUNCTION_TOOLS)
rsp = await self.llm.aask_code(prompt, **tool_config)
recommend_tools = rsp["recommend_tools"]
rsp = await self._aask(prompt)
rsp = CodeParser.parse_code(block=None, text=rsp)
recommend_tools = json.loads(rsp)
logger.info(f"Recommended tools: \n{recommend_tools}")
# Parses and validates the recommended tools, for LLM might hallucinate and recommend non-existing tools
@ -126,6 +126,6 @@ class WriteCodeWithTools(Action):
# prepare prompt & LLM call
prompt = self._insert_system_message(context)
rsp = await self.llm.aask_code(prompt, language="python")
rsp = await self.llm.aask_code(prompt)
return rsp

View file

@ -20,22 +20,22 @@ from metagpt.utils.common import CodeParser
class WritePlan(Action):
PROMPT_TEMPLATE: str = """
# Context:
__context__
{context}
# Available Task Types:
__task_type_desc__
{task_type_desc}
# Task:
Based on the context, write a plan or modify an existing plan of what you should do to achieve the goal. A plan consists of one to __max_tasks__ tasks.
Based on the context, write a plan or modify an existing plan of what you should do to achieve the goal. A plan consists of one to {max_tasks} tasks.
If you are modifying an existing plan, carefully follow the instruction, don't make unnecessary changes. Give the whole plan unless instructed to modify only one task of the plan.
If you encounter errors on the current task, revise and output the current single task only.
Output a list of jsons following the format:
```json
[
{
{{
"task_id": str = "unique identifier for a task in plan, can be an ordinal",
"dependent_task_ids": list[str] = "ids of tasks prerequisite to this task",
"instruction": "what you should do in this task, one short phrase or sentence",
"task_type": "type of this task, should be one of Available Task Types",
},
}},
...
]
```
@ -45,10 +45,8 @@ class WritePlan(Action):
task_type_desc = "\n".join(
[f"- **{tool_type.name}**: {tool_type.desc}" for tool_type in TOOL_REGISTRY.get_tool_types().values()]
) # task type are binded with tool type now, should be improved in the future
prompt = (
self.PROMPT_TEMPLATE.replace("__context__", "\n".join([str(ct) for ct in context]))
# .replace("__current_plan__", current_plan)
.replace("__max_tasks__", str(max_tasks)).replace("__task_type_desc__", task_type_desc)
prompt = self.PROMPT_TEMPLATE.format(
context="\n".join([str(ct) for ct in context]), max_tasks=max_tasks, task_type_desc=task_type_desc
)
rsp = await self._aask(prompt)
rsp = CodeParser.parse_code(block=None, text=rsp)

View file

@ -13,25 +13,12 @@ Recommend up to five tools from 'Available Tools' that can help solve the 'User
- If you believe that no tools are suitable, indicate with an empty list.
- Only list the names of the tools, not the full schema of each tool.
- Ensure selected tools are listed in 'Available Tools'.
- Output a json list of tool names:
```json
["tool_name1", "tool_name2", ...]
```
"""
SELECT_FUNCTION_TOOLS = {
"name": "select_function_tools",
"description": "For current task, select suitable tools for it.",
"parameters": {
"type": "object",
"properties": {
"recommend_tools": {
"type": "array",
"description": "List of tool names. Empty list if no tool is suitable.",
"items": {
"type": "string",
},
},
},
"required": ["recommend_tools"],
},
}
TOOL_USAGE_PROMPT = """
# Instruction

View file

@ -25,6 +25,23 @@ GENERAL_FUNCTION_SCHEMA = {
},
}
CODE_ONLY_FUNCTION_SCHEMA = {
"name": "add_new_code",
"description": "Add new code cell of current task to the end of an active Jupyter notebook.",
"parameters": {
"type": "object",
"properties": {
"code": {
"type": "string",
"description": "The code to be added to a new cell in jupyter.",
},
},
"required": ["code"],
},
}
# tool_choice value for general_function_schema
# https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice
GENERAL_TOOL_CHOICE = {"type": "function", "function": {"name": "execute"}}

View file

@ -9,7 +9,6 @@
import json
import re
from copy import deepcopy
from typing import AsyncIterator, Optional, Union
from openai import APIConnectionError, AsyncOpenAI, AsyncStream
@ -27,7 +26,7 @@ from tenacity import (
from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.logs import log_llm_stream, logger
from metagpt.provider.base_llm import BaseLLM
from metagpt.provider.constant import GENERAL_FUNCTION_SCHEMA
from metagpt.provider.constant import CODE_ONLY_FUNCTION_SCHEMA, GENERAL_FUNCTION_SCHEMA
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.schema import Message
from metagpt.utils.common import CodeParser, decode_image
@ -177,7 +176,7 @@ class OpenAILLM(BaseLLM):
self._update_costs(rsp.usage)
return rsp
async def aask_code(self, messages: list[dict], timeout: int = 3, language: str = "", **kwargs) -> dict:
async def aask_code(self, messages: list[dict], timeout: int = 3, include_language: bool = False, **kwargs) -> dict:
"""Use function of tools to ask a code.
Note: Keep kwargs consistent with https://platform.openai.com/docs/api-reference/chat/create
@ -185,12 +184,12 @@ class OpenAILLM(BaseLLM):
>>> llm = OpenAILLM()
>>> msg = [{'role': 'user', 'content': "Write a python hello world code."}]
>>> rsp = await llm.aask_code(msg)
# -> {'code': "print('Hello, World!')"}
>>> rsp = await llm.aask_code(msg, include_language=True)
# -> {'language': 'python', 'code': "print('Hello, World!')"}
"""
if "tools" not in kwargs:
function_schema = deepcopy(GENERAL_FUNCTION_SCHEMA)
if language:
function_schema["parameters"]["properties"]["language"]["enum"] = [language]
function_schema = GENERAL_FUNCTION_SCHEMA if include_language else CODE_ONLY_FUNCTION_SCHEMA
configs = {"tools": [{"type": "function", "function": function_schema}]}
kwargs.update(configs)
rsp = await self._achat_completion_function(messages, **kwargs)

View file

@ -41,7 +41,6 @@ class MLEngineer(Interpreter):
runtime_result=self.working_memory.get(),
context=self.debug_context,
)
logger.info(f"new code \n{code}")
cause_by = DebugCode
self.latest_code = code["code"]

View file

@ -361,16 +361,6 @@ def parse_recipient(text):
return ""
def create_func_call_config(func_schema: dict) -> dict:
"""Create new function call config"""
tools = [{"type": "function", "function": func_schema}]
tool_choice = {"type": "function", "function": {"name": func_schema["name"]}}
return {
"tools": tools,
"tool_choice": tool_choice,
}
def remove_comments(code_str: str) -> str:
"""Remove comments from code."""
pattern = r"(\".*?\"|\'.*?\')|(\#.*?$)"