Merge branch 'geekan/main' into feature/multi-llm

This commit is contained in:
莘权 马 2024-07-17 10:06:27 +08:00
commit 42c06421cc
25 changed files with 319 additions and 93 deletions

View file

@ -32,7 +32,7 @@ jobs:
run: |
export ALLOW_OPENAI_API_CALL=0
mkdir -p ~/.metagpt && cp tests/config2.yaml ~/.metagpt/config2.yaml
pytest tests/ --doctest-modules --cov=./metagpt/ --cov-report=xml:cov.xml --cov-report=html:htmlcov --durations=20 | tee unittest.txt
pytest tests/ --ignore=tests/metagpt/environment/android_env --ignore=tests/metagpt/ext/android_assistant --doctest-modules --cov=./metagpt/ --cov-report=xml:cov.xml --cov-report=html:htmlcov --durations=20 | tee unittest.txt
- name: Show coverage report
run: |
coverage report -m

View file

@ -31,7 +31,7 @@ ## News
🚀 Feb. 08, 2024: [v0.7.0](https://github.com/geekan/MetaGPT/releases/tag/v0.7.0) released, supporting assigning different LLMs to different Roles. We also introduced [Data Interpreter](https://github.com/geekan/MetaGPT/blob/main/examples/di/README.md), a powerful agent capable of solving a wide range of real-world problems.
🚀 Jan. 16, 2024: Our paper [MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework
](https://arxiv.org/abs/2308.00352) accepted for **oral presentation (top 1.2%)** at ICLR 2024, **ranking #1** in the LLM-based Agent category.
](https://openreview.net/forum?id=VtmBAGCN7o) accepted for **oral presentation (top 1.2%)** at ICLR 2024, **ranking #1** in the LLM-based Agent category.
🚀 Jan. 03, 2024: [v0.6.0](https://github.com/geekan/MetaGPT/releases/tag/v0.6.0) released, new features include serialization, upgraded OpenAI package and supported multiple LLM, provided [minimal example for debate](https://github.com/geekan/MetaGPT/blob/main/examples/debate_simple.py) etc.
@ -166,16 +166,15 @@ ## Citation
To stay updated with the latest research and development, follow [@MetaGPT_](https://twitter.com/MetaGPT_) on Twitter.
To cite [MetaGPT](https://arxiv.org/abs/2308.00352) or [Data Interpreter](https://arxiv.org/abs/2402.18679) in publications, please use the following BibTeX entries.
To cite [MetaGPT](https://openreview.net/forum?id=VtmBAGCN7o) or [Data Interpreter](https://arxiv.org/abs/2402.18679) in publications, please use the following BibTeX entries.
```bibtex
@misc{hong2023metagpt,
title={MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework},
author={Sirui Hong and Mingchen Zhuge and Jonathan Chen and Xiawu Zheng and Yuheng Cheng and Ceyao Zhang and Jinlin Wang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu and Jürgen Schmidhuber},
year={2023},
eprint={2308.00352},
archivePrefix={arXiv},
primaryClass={cs.AI}
@inproceedings{hong2024metagpt,
title={Meta{GPT}: Meta Programming for A Multi-Agent Collaborative Framework},
author={Sirui Hong and Mingchen Zhuge and Jonathan Chen and Xiawu Zheng and Yuheng Cheng and Jinlin Wang and Ceyao Zhang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu and J{\"u}rgen Schmidhuber},
booktitle={The Twelfth International Conference on Learning Representations},
year={2024},
url={https://openreview.net/forum?id=VtmBAGCN7o}
}
@misc{hong2024data,
title={Data Interpreter: An LLM Agent For Data Science},
@ -185,6 +184,5 @@ ## Citation
archivePrefix={arXiv},
primaryClass={cs.AI}
}
```

View file

@ -2,4 +2,4 @@ llm:
api_type: 'claude' # or anthropic
base_url: 'https://api.anthropic.com'
api_key: 'YOUR_API_KEY'
model: 'claude-3-opus-20240229'
model: 'claude-3-5-sonnet-20240620' # or 'claude-3-opus-20240229'

View file

@ -119,13 +119,12 @@ ## 引用
如果您在研究论文中使用 MetaGPT 或 Data Interpreter请引用我们的工作
```bibtex
@misc{hong2023metagpt,
title={MetaGPT: Meta Programming for Multi-Agent Collaborative Framework},
author={Sirui Hong and Xiawu Zheng and Jonathan Chen and Yuheng Cheng and Jinlin Wang and Ceyao Zhang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu},
year={2023},
eprint={2308.00352},
archivePrefix={arXiv},
primaryClass={cs.AI}
@inproceedings{hong2024metagpt,
title={Meta{GPT}: Meta Programming for A Multi-Agent Collaborative Framework},
author={Sirui Hong and Mingchen Zhuge and Jonathan Chen and Xiawu Zheng and Yuheng Cheng and Jinlin Wang and Ceyao Zhang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu and J{\"u}rgen Schmidhuber},
booktitle={The Twelfth International Conference on Learning Representations},
year={2024},
url={https://openreview.net/forum?id=VtmBAGCN7o}
}
@misc{hong2024data,
title={Data Interpreter: An LLM Agent For Data Science},

View file

@ -298,13 +298,12 @@ ## 引用
研究論文でMetaGPTやData Interpreterを使用する場合は、以下のように当社の作業を引用してください
```bibtex
@misc{hong2023metagpt,
title={MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework},
author={Sirui Hong and Mingchen Zhuge and Jonathan Chen and Xiawu Zheng and Yuheng Cheng and Ceyao Zhang and Jinlin Wang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu and Jürgen Schmidhuber},
year={2023},
eprint={2308.00352},
archivePrefix={arXiv},
primaryClass={cs.AI}
@inproceedings{hong2024metagpt,
title={Meta{GPT}: Meta Programming for A Multi-Agent Collaborative Framework},
author={Sirui Hong and Mingchen Zhuge and Jonathan Chen and Xiawu Zheng and Yuheng Cheng and Jinlin Wang and Ceyao Zhang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu and J{\"u}rgen Schmidhuber},
booktitle={The Twelfth International Conference on Learning Representations},
year={2024},
url={https://openreview.net/forum?id=VtmBAGCN7o}
}
@misc{hong2024data,
title={Data Interpreter: An LLM Agent For Data Science},

3
examples/ui_with_chainlit/.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
*.chainlit
chainlit.md
.files

View file

@ -0,0 +1,34 @@
# MetaGPT in UI with Chainlit! 🤖
- MetaGPT functionality in UI using Chainlit.
- It also takes a **one line requirement** as input and outputs **user stories / competitive analysis / requirements / data structures / APIs / documents, etc.**, But `everything in UI`.
## Install Chainlit
- Setup initial MetaGPT config from [Main](../../README.md).
```bash
pip install chainlit
```
## Usage
```bash
chainlit run app.py
```
- Now go to: http://localhost:8000
- Select,
- `Create a 2048 game`
- `Write a cli Blackjack Game`
- `Type your own message...`
- It will run a metagpt software company.
## To Setup with own application
- We can change `Environment.run`, `Team.run`, `Role.run`, `Role._act`, `Action.run`.
- In this code, changed `Environment.run`, as it was easier to do.
- We will need to change `metagpt.logs.set_llm_stream_logfunc` to stream messages in UI with Chainlit Message.
- To use at some other place we need to call `chainlit.Message(content="").send()` with content.

View file

View file

@ -0,0 +1,83 @@
import chainlit as cl
from init_setup import ChainlitEnv
from metagpt.roles import (
Architect,
Engineer,
ProductManager,
ProjectManager,
QaEngineer,
)
from metagpt.team import Team
# https://docs.chainlit.io/concepts/starters
@cl.set_chat_profiles
async def chat_profile() -> list[cl.ChatProfile]:
"""Generates a chat profile containing starter messages which can be triggered to run MetaGPT
Returns:
list[chainlit.ChatProfile]: List of Chat Profile
"""
return [
cl.ChatProfile(
name="MetaGPT",
icon="/public/MetaGPT-new-log.jpg",
markdown_description="It takes a **one line requirement** as input and outputs **user stories / competitive analysis / requirements / data structures / APIs / documents, etc.**, But `everything in UI`.",
starters=[
cl.Starter(
label="Create a 2048 Game",
message="Create a 2048 game",
icon="/public/2048.jpg",
),
cl.Starter(
label="Write a cli Blackjack Game",
message="Write a cli Blackjack Game",
icon="/public/blackjack.jpg",
),
],
)
]
# https://docs.chainlit.io/concepts/message
@cl.on_message
async def startup(message: cl.Message) -> None:
"""On Message in UI, Create a MetaGPT software company
Args:
message (chainlit.Message): message by chainlist
"""
idea = message.content
company = Team(env=ChainlitEnv())
# Similar to software_company.py
company.hire(
[
ProductManager(),
Architect(),
ProjectManager(),
Engineer(n_borg=5, use_code_review=True),
QaEngineer(),
]
)
company.invest(investment=3.0)
company.run_project(idea=idea)
await company.run(n_round=5)
workdir = company.env.context.git_repo.workdir
files = company.env.context.git_repo.get_files(workdir)
files = "\n".join([f"{workdir}/{file}" for file in files if not file.startswith(".git")])
await cl.Message(
content=f"""
Codes can be found here:
{files}
---
Total cost: `{company.cost_manager.total_cost}`
"""
).send()

View file

@ -0,0 +1,69 @@
import asyncio
import chainlit as cl
from metagpt.environment import Environment
from metagpt.logs import logger, set_llm_stream_logfunc
from metagpt.roles import Role
from metagpt.utils.common import any_to_name
def log_llm_stream_chainlit(msg):
# Stream the message token into Chainlit UI.
cl.run_sync(chainlit_message.stream_token(msg))
set_llm_stream_logfunc(func=log_llm_stream_chainlit)
class ChainlitEnv(Environment):
"""Chainlit Environment for UI Integration"""
async def run(self, k=1):
"""处理一次所有信息的运行
Process all Role runs at once
"""
for _ in range(k):
futures = []
for role in self.roles.values():
# Call role.run with chainlit configuration
future = self._chainlit_role_run(role=role)
futures.append(future)
await asyncio.gather(*futures)
logger.debug(f"is idle: {self.is_idle}")
async def _chainlit_role_run(self, role: Role) -> None:
"""To run the role with chainlit config
Args:
role (Role): metagpt.role.Role
"""
global chainlit_message
chainlit_message = cl.Message(content="")
message = await role.run()
# If message is from role._act() publish to UI.
if message is not None and message.content != "No actions taken yet":
# Convert a message from action node in json format
chainlit_message.content = await self._convert_message_to_markdownjson(message=chainlit_message.content)
# message content from which role and its action...
chainlit_message.content += f"---\n\nAction: `{any_to_name(message.cause_by)}` done by `{role._setting}`."
await chainlit_message.send()
# for clean view in UI
async def _convert_message_to_markdownjson(self, message: str) -> str:
"""If the message is from MetaGPT Action Node output, then
convert it into markdown json for clear view in UI.
Args:
message (str): message by role._act
Returns:
str: message in mardown from
"""
if message.startswith("[CONTENT]"):
return f"```json\n{message}\n```\n"
return message

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

View file

@ -161,6 +161,8 @@ class CollectLinks(Action):
"""
max_results = max(num_results * 2, 6)
results = await self.search_engine.run(query, max_results=max_results, as_string=False)
if len(results) == 0:
return []
_results = "\n".join(f"{i}: {j}" for i, j in zip(range(max_results), results))
prompt = COLLECT_AND_RANKURLS_PROMPT.format(topic=topic, query=query, results=_results)
logger.debug(prompt)

View file

@ -1,14 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/1 11:59
@Author : alexanderwu
@File : const.py
@Modified By: mashenquan, 2023-11-1. According to Section 2.2.1 and 2.2.2 of RFC 116, added key definitions for
common properties in the Message.
@Modified By: mashenquan, 2023-11-27. Defines file repository paths according to Section 2.2.3.4 of RFC 135.
@Modified By: mashenquan, 2023/12/5. Add directories for code summarization..
"""
import os
from pathlib import Path

View file

@ -27,6 +27,7 @@ SUPPORT_STREAM_MODELS = {
"anthropic.claude-3-sonnet-20240229-v1:0:28k": 28000,
"anthropic.claude-3-sonnet-20240229-v1:0:200k": 200000,
"anthropic.claude-3-haiku-20240307-v1:0": 200000,
"anthropic.claude-3-5-sonnet-20240620-v1:0": 200000,
"anthropic.claude-3-haiku-20240307-v1:0:48k": 48000,
"anthropic.claude-3-haiku-20240307-v1:0:200k": 200000,
# currently (2024-4-29) only available at US West (Oregon) AWS Region.

View file

@ -1,5 +1,7 @@
import asyncio
import json
from typing import Literal
from functools import partial
from typing import List, Literal
import boto3
from botocore.eventstream import EventStream
@ -22,7 +24,6 @@ class BedrockLLM(BaseLLM):
self.__client = self.__init_client("bedrock-runtime")
self.__provider = get_provider(self.config.model)
self.cost_manager = CostManager(token_costs=BEDROCK_TOKEN_COSTS)
logger.warning("Amazon bedrock doesn't support asynchronous now")
if self.config.model in NOT_SUUPORT_STREAM_MODELS:
logger.warning(f"model {self.config.model} doesn't support streaming output!")
@ -64,15 +65,21 @@ class BedrockLLM(BaseLLM):
]
logger.info("\n" + "\n".join(summaries))
def invoke_model(self, request_body: str) -> dict:
response = self.__client.invoke_model(modelId=self.config.model, body=request_body)
async def invoke_model(self, request_body: str) -> dict:
loop = asyncio.get_running_loop()
response = await loop.run_in_executor(
None, partial(self.client.invoke_model, modelId=self.config.model, body=request_body)
)
usage = self._get_usage(response)
self._update_costs(usage, self.config.model)
response_body = self._get_response_body(response)
return response_body
def invoke_model_with_response_stream(self, request_body: str) -> EventStream:
response = self.__client.invoke_model_with_response_stream(modelId=self.config.model, body=request_body)
async def invoke_model_with_response_stream(self, request_body: str) -> EventStream:
loop = asyncio.get_running_loop()
response = await loop.run_in_executor(
None, partial(self.client.invoke_model_with_response_stream, modelId=self.config.model, body=request_body)
)
usage = self._get_usage(response)
self._update_costs(usage, self.config.model)
return response
@ -97,7 +104,7 @@ class BedrockLLM(BaseLLM):
async def acompletion(self, messages: list[dict]) -> dict:
request_body = self.__provider.get_request_body(messages, self._const_kwargs)
response_body = self.invoke_model(request_body)
response_body = await self.invoke_model(request_body)
return response_body
async def _achat_completion(self, messages: list[dict], timeout=USE_CONFIG_TIMEOUT) -> dict:
@ -111,14 +118,8 @@ class BedrockLLM(BaseLLM):
return full_text
request_body = self.__provider.get_request_body(messages, self._const_kwargs, stream=True)
response = self.invoke_model_with_response_stream(request_body)
collected_content = []
for event in response["body"]:
chunk_text = self.__provider.get_choice_text_from_stream(event)
collected_content.append(chunk_text)
log_llm_stream(chunk_text)
stream_response = await self.invoke_model_with_response_stream(request_body)
collected_content = await self._get_stream_response_body(stream_response)
log_llm_stream("\n")
full_text = ("".join(collected_content)).lstrip()
return full_text
@ -127,6 +128,18 @@ class BedrockLLM(BaseLLM):
response_body = json.loads(response["body"].read())
return response_body
async def _get_stream_response_body(self, stream_response) -> List[str]:
def collect_content() -> str:
collected_content = []
for event in stream_response["body"]:
chunk_text = self.__provider.get_choice_text_from_stream(event)
collected_content.append(chunk_text)
log_llm_stream(chunk_text)
return collected_content
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, collect_content)
def _get_usage(self, response) -> dict[str, int]:
headers = response.get("ResponseMetadata", {}).get("HTTPHeaders", {})
prompt_tokens = int(headers.get("x-amzn-bedrock-input-token-count", 0))

View file

@ -50,6 +50,9 @@ class QianFanLLM(BaseLLM):
else:
raise ValueError("Set the `access_key`&`secret_key` or `api_key`&`secret_key` first")
if self.config.base_url:
os.environ.setdefault("QIANFAN_BASE_URL", self.config.base_url)
support_system_pairs = [
("ERNIE-Bot-4", "completions_pro"), # (model, corresponding-endpoint)
("ERNIE-Bot-8k", "ernie_bot_8k"),

View file

@ -80,19 +80,17 @@ class InvoiceOCRAssistant(Role):
raise Exception("Invoice file not uploaded")
resp = await todo.run(file_path)
actions = list(self.actions)
if len(resp) == 1:
# Single file support for questioning based on OCR recognition results
self.set_actions([GenerateTable, ReplyQuestion])
actions.extend([GenerateTable, ReplyQuestion])
self.orc_data = resp[0]
else:
self.set_actions([GenerateTable])
self.set_todo(None)
actions.append(GenerateTable)
self.set_actions(actions)
self.rc.max_react_loop = len(self.actions)
content = INVOICE_OCR_SUCCESS
resp = OCRResults(ocr_result=json.dumps(resp))
msg = Message(content=content, instruct_content=resp)
self.rc.memory.add(msg)
return await super().react()
elif isinstance(todo, GenerateTable):
ocr_results: OCRResults = msg.instruct_content
resp = await todo.run(json.loads(ocr_results.ocr_result), self.filename)

View file

@ -58,7 +58,9 @@ class Researcher(Role):
)
elif isinstance(todo, WebBrowseAndSummarize):
links = instruct_content.links
todos = (todo.run(*url, query=query, system_text=research_system_text) for (query, url) in links.items())
todos = (
todo.run(*url, query=query, system_text=research_system_text) for (query, url) in links.items() if url
)
if self.enable_concurrency:
summaries = await asyncio.gather(*todos)
else:

View file

@ -87,8 +87,11 @@ class SerpAPIWrapper(BaseModel):
get_focused = lambda x: {i: j for i, j in x.items() if i in focus}
if "error" in res.keys():
raise ValueError(f"Got error from SerpAPI: {res['error']}")
if "answer_box" in res.keys() and "answer" in res["answer_box"].keys():
if res["error"] == "Google hasn't returned any results for this query.":
toret = "No good search result found"
else:
raise ValueError(f"Got error from SerpAPI: {res['error']}")
elif "answer_box" in res.keys() and "answer" in res["answer_box"].keys():
toret = res["answer_box"]["answer"]
elif "answer_box" in res.keys() and "snippet" in res["answer_box"].keys():
toret = res["answer_box"]["snippet"]

View file

@ -23,8 +23,8 @@ from metagpt.utils.graph_repository import SPO, GraphRepository
class DiGraphRepository(GraphRepository):
"""Graph repository based on DiGraph."""
def __init__(self, name: str, **kwargs):
super().__init__(name=name, **kwargs)
def __init__(self, name: str | Path, **kwargs):
super().__init__(name=str(name), **kwargs)
self._repo = networkx.DiGraph()
async def insert(self, subject: str, predicate: str, object_: str):
@ -112,8 +112,28 @@ class DiGraphRepository(GraphRepository):
async def load(self, pathname: str | Path):
"""Load a directed graph repository from a JSON file."""
data = await aread(filename=pathname, encoding="utf-8")
m = json.loads(data)
self.load_json(data)
def load_json(self, val: str):
"""
Loads a JSON-encoded string representing a graph structure and updates
the internal repository (_repo) with the parsed graph.
Args:
val (str): A JSON-encoded string representing a graph structure.
Returns:
self: Returns the instance of the class with the updated _repo attribute.
Raises:
TypeError: If val is not a valid JSON string or cannot be parsed into
a valid graph structure.
"""
if not val:
return self
m = json.loads(val)
self._repo = networkx.node_link_graph(m)
return self
@staticmethod
async def load_from(pathname: str | Path) -> GraphRepository:
@ -126,9 +146,7 @@ class DiGraphRepository(GraphRepository):
GraphRepository: A new instance of the graph repository loaded from the specified JSON file.
"""
pathname = Path(pathname)
name = pathname.with_suffix("").name
root = pathname.parent
graph = DiGraphRepository(name=name, root=root)
graph = DiGraphRepository(name=pathname.stem, root=pathname.parent)
if pathname.exists():
await graph.load(pathname=pathname)
return graph

View file

@ -11,8 +11,10 @@ from multiprocessing import Pipe
class StreamPipe:
parent_conn, child_conn = Pipe()
finish: bool = False
def __init__(self, name=None):
self.name = name
self.parent_conn, self.child_conn = Pipe()
self.finish: bool = False
format_data = {
"id": "chatcmpl-96bVnBOOyPFZZxEoTIGbdpFcVEnur",

View file

@ -57,7 +57,9 @@ TOKEN_COSTS = {
"claude-2.0": {"prompt": 0.008, "completion": 0.024},
"claude-2.1": {"prompt": 0.008, "completion": 0.024},
"claude-3-sonnet-20240229": {"prompt": 0.003, "completion": 0.015},
"claude-3-5-sonnet-20240620": {"prompt": 0.003, "completion": 0.015},
"claude-3-opus-20240229": {"prompt": 0.015, "completion": 0.075},
"claude-3-haiku-20240307": {"prompt": 0.00025, "completion": 0.00125},
"yi-34b-chat-0205": {"prompt": 0.0003, "completion": 0.0003},
"yi-34b-chat-200k": {"prompt": 0.0017, "completion": 0.0017},
"yi-large": {"prompt": 0.0028, "completion": 0.0028},
@ -224,6 +226,8 @@ TOKEN_MAX = {
"claude-2.1": 200000,
"claude-3-sonnet-20240229": 200000,
"claude-3-opus-20240229": 200000,
"claude-3-5-sonnet-20240620": 200000,
"claude-3-haiku-20240307": 200000,
"yi-34b-chat-0205": 4000,
"yi-34b-chat-200k": 200000,
"yi-large": 16385,
@ -280,6 +284,7 @@ BEDROCK_TOKEN_COSTS = {
"anthropic.claude-3-sonnet-20240229-v1:0": {"prompt": 0.003, "completion": 0.015},
"anthropic.claude-3-sonnet-20240229-v1:0:28k": {"prompt": 0.003, "completion": 0.015},
"anthropic.claude-3-sonnet-20240229-v1:0:200k": {"prompt": 0.003, "completion": 0.015},
"anthropic.claude-3-5-sonnet-20240620-v1:0": {"prompt": 0.003, "completion": 0.015},
"anthropic.claude-3-haiku-20240307-v1:0": {"prompt": 0.00025, "completion": 0.00125},
"anthropic.claude-3-haiku-20240307-v1:0:48k": {"prompt": 0.00025, "completion": 0.00125},
"anthropic.claude-3-haiku-20240307-v1:0:200k": {"prompt": 0.00025, "completion": 0.00125},

View file

@ -45,30 +45,6 @@ extras_require = {
"llama-index-postprocessor-flag-embedding-reranker==0.1.2",
"docx2txt==0.8",
],
"android_assistant": [
"pyshine==0.0.9",
"opencv-python==4.6.0.66",
"protobuf<3.20,>=3.9.2",
"modelscope",
"tensorflow==2.9.1; os_name == 'linux'",
"tensorflow==2.9.1; os_name == 'win32'",
"tensorflow-macos==2.9; os_name == 'darwin'",
"keras==2.9.0",
"torch",
"torchvision",
"transformers",
"opencv-python",
"matplotlib",
"pycocotools",
"SentencePiece",
"tf_slim",
"tf_keras",
"pyclipper",
"shapely",
"groundingdino-py",
"datasets==2.18.0",
"clip-openai",
],
}
extras_require["test"] = [
@ -85,6 +61,9 @@ extras_require["test"] = [
"aioboto3~=12.4.0",
"gradio==3.0.0",
"grpcio-status==1.48.2",
"grpcio-tools==1.48.2",
"google-api-core==2.17.1",
"protobuf==3.19.6",
"pylint==3.0.3",
"pybrowsers",
]
@ -93,7 +72,30 @@ extras_require["pyppeteer"] = [
"pyppeteer>=1.0.2"
] # pyppeteer is unmaintained and there are conflicts with dependencies
extras_require["dev"] = (["pylint~=3.0.3", "black~=23.3.0", "isort~=5.12.0", "pre-commit~=3.6.0"],)
extras_require["android_assistant"] = [
"pyshine==0.0.9",
"opencv-python==4.6.0.66",
"protobuf<3.20,>=3.9.2",
"modelscope",
"tensorflow==2.9.1; os_name == 'linux'",
"tensorflow==2.9.1; os_name == 'win32'",
"tensorflow-macos==2.9; os_name == 'darwin'",
"keras==2.9.0",
"torch",
"torchvision",
"transformers",
"opencv-python",
"matplotlib",
"pycocotools",
"SentencePiece",
"tf_slim",
"tf_keras",
"pyclipper",
"shapely",
"groundingdino-py",
"datasets==2.18.0",
"clip-openai",
]
setup(
name="metagpt",