diff --git a/config/config.yaml b/config/config.yaml index bed67083c..694251f17 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -5,7 +5,7 @@ ## The official OPENAI_API_BASE is https://api.openai.com/v1 ## If the official OPENAI_API_BASE is not available, we recommend using the [openai-forward](https://github.com/beidongjiedeguang/openai-forward). ## Or, you can configure OPENAI_PROXY to access official OPENAI_API_BASE. -OPENAI_API_BASE: "https://api.openai.com/v1" +#OPENAI_API_BASE: "https://api.openai.com/v1" #OPENAI_PROXY: "http://127.0.0.1:8118" #OPENAI_API_KEY: "YOUR_API_KEY" # set the value to sk-xxx if you host the openai interface for open llm model OPENAI_API_MODEL: "gpt-4" @@ -24,12 +24,13 @@ RPM: 10 #### if AZURE, check https://github.com/openai/openai-cookbook/blob/main/examples/azure/chat.ipynb #### You can use ENGINE or DEPLOYMENT mode -#OPENAI_API_TYPE: "azure" -#OPENAI_API_BASE: "YOUR_AZURE_ENDPOINT" -#OPENAI_API_KEY: "YOUR_AZURE_API_KEY" -#OPENAI_API_VERSION: "YOUR_AZURE_API_VERSION" -#DEPLOYMENT_NAME: "YOUR_DEPLOYMENT_NAME" -#DEPLOYMENT_ID: "YOUR_DEPLOYMENT_ID" +OPENAI_API_TYPE: "azure" +OPENAI_API_BASE: "https://deepwisdom.openai.azure.com/" +OPENAI_API_KEY: "02ae6058d09849c691176befeae2107c" +#OPENAI_API_VERSION: "2023-05-15" +OPENAI_API_VERSION: "2023-07-01-preview" +DEPLOYMENT_ID: "GPT-4" +OPENAI_API_ENGINE: "gpt-4" #### if zhipuai from `https://open.bigmodel.cn`. You can set here or export API_KEY="YOUR_API_KEY" # ZHIPUAI_API_KEY: "YOUR_API_KEY" diff --git a/metagpt/roles/ml_engineer.py b/metagpt/roles/ml_engineer.py index 15edb2b06..c088ff104 100644 --- a/metagpt/roles/ml_engineer.py +++ b/metagpt/roles/ml_engineer.py @@ -10,7 +10,7 @@ from metagpt.actions import Action from metagpt.actions.execute_code import ExecutePyCode from metagpt.actions.write_analysis_code import WriteCodeByGenerate, WriteCodeWithTools from metagpt.actions.write_plan import WritePlan -from metagpt.actions.write_task_guide import WriteTaskGuide +# from metagpt.actions.write_task_guide import WriteTaskGuide from metagpt.logs import logger from metagpt.prompts.ml_engineer import GEN_DATA_DESC_PROMPT from metagpt.roles import Role @@ -39,7 +39,7 @@ catboost def truncate(result: str, keep_len: int = 1000) -> str: desc = "Truncated to show only the last 1000 characters\n" if result.startswith(desc): - result = result[-len(desc) :] + result = result[-len(desc):] if len(result) > keep_len: result = result[-keep_len:] @@ -110,9 +110,9 @@ class AskReview(Action): logger.info("most recent context:") latest_action = context[-1].cause_by.__name__ if context[-1].cause_by else "" prompt = f"\nPlease review output from {latest_action}:\n" \ - "If you want to change a task in the plan, say 'change task task_id, ... (things to change)'\n" \ - "If you confirm the output and wish to continue with the current process, type CONFIRM\n" \ - "If you want to terminate the process, type exit:\n" + "If you want to change a task in the plan, say 'change task task_id, ... (things to change)'\n" \ + "If you confirm the output and wish to continue with the current process, type CONFIRM\n" \ + "If you want to terminate the process, type exit:\n" rsp = input(prompt) if rsp.lower() in ("exit"): @@ -148,7 +148,7 @@ class GenerateDataDesc(Action): class MLEngineer(Role): def __init__( - self, name="ABC", profile="MLEngineer", goal="", auto_run: bool = False, data_path: str = None + self, name="ABC", profile="MLEngineer", goal="", auto_run: bool = False, data_path: str = None ): super().__init__(name=name, profile=profile, goal=goal) self._set_react_mode(react_mode="plan_and_act") @@ -300,11 +300,15 @@ if __name__ == "__main__": # requirement = "Run data analysis on sklearn Wisconsin Breast Cancer dataset, include a plot, train a model to predict targets (20% as validation), and show validation accuracy" # requirement = "Run EDA and visualization on this dataset, train a model to predict survival, report metrics on validation set (20%), dataset: workspace/titanic/train.csv" + from metagpt.const import DATA_PATH + requirement = "Perform data analysis on the provided data. Train a model to predict the target variable Survived. Include data preprocessing, feature engineering, and modeling in your pipeline. The metric is accuracy." - data_path = "/data/lidanyang/tabular_data/titanic" + data_path = f"{DATA_PATH}/titanic" + async def main(requirement: str = requirement, auto_run: bool = True, data_path: str = data_path): role = MLEngineer(goal=requirement, auto_run=auto_run, data_path=data_path) await role.run(requirement) + fire.Fire(main) diff --git a/metagpt/tools/functions/__init__.py b/metagpt/tools/functions/__init__.py index 30ee10827..d4a1ff73b 100644 --- a/metagpt/tools/functions/__init__.py +++ b/metagpt/tools/functions/__init__.py @@ -6,4 +6,4 @@ # @Desc : from metagpt.tools.functions.register.register import registry import metagpt.tools.functions.libs.feature_engineering -import metagpt.tools.functions.libs.data_preprocess +# import metagpt.tools.functions.libs.data_preprocess diff --git a/metagpt/tools/web_browser_engine.py b/metagpt/tools/web_browser_engine.py index 453d87f31..7228ae9cf 100644 --- a/metagpt/tools/web_browser_engine.py +++ b/metagpt/tools/web_browser_engine.py @@ -7,7 +7,7 @@ from typing import Any, Callable, Coroutine, Literal, overload from metagpt.config import CONFIG from metagpt.tools import WebBrowserEngineType -from metagpt.utils.parse_html import WebPage +# from metagpt.utils.parse_html import WebPage class WebBrowserEngine: diff --git a/metagpt/utils/__init__.py b/metagpt/utils/__init__.py index f13175cf8..86cac50db 100644 --- a/metagpt/utils/__init__.py +++ b/metagpt/utils/__init__.py @@ -6,7 +6,7 @@ @File : __init__.py """ -from metagpt.utils.read_document import read_docx +# from metagpt.utils.read_document import read_docx from metagpt.utils.singleton import Singleton from metagpt.utils.token_counter import ( TOKEN_COSTS, @@ -16,7 +16,7 @@ from metagpt.utils.token_counter import ( __all__ = [ - "read_docx", + # "read_docx", "Singleton", "TOKEN_COSTS", "count_message_tokens", diff --git a/requirements.txt b/requirements.txt index 1d1bc95a1..9b75fd200 100644 --- a/requirements.txt +++ b/requirements.txt @@ -35,7 +35,6 @@ tqdm==4.64.0 # webdriver_manager<3.9 anthropic==0.3.6 typing-inspect==0.8.0 -typing_extensions==4.5.0 libcst==1.0.1 qdrant-client==1.4.0 pytest-mock==3.11.1 @@ -46,7 +45,6 @@ wrapt==1.15.0 websocket-client==0.58.0 zhipuai==1.0.7 rich==13.6.0 -nbclient==0.9.0 nbformat==5.9.2 ipython==8.17.2 ipykernel==6.27.0