Merge branch 'main' into incremental_development

# Conflicts:
#	metagpt/schema.py
This commit is contained in:
mannaandpoem 2024-01-19 19:53:17 +08:00
commit e1b783ca14
127 changed files with 2346 additions and 648 deletions

34
.github/workflows/build-package.yaml vendored Normal file
View file

@ -0,0 +1,34 @@
name: Build and upload python package
on:
release:
types: [created]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.9'
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -e.
pip install setuptools wheel twine
- name: Set package version
run: |
export VERSION="${GITHUB_REF#refs/tags/v}"
sed -i "s/version=.*/version=\"${VERSION}\",/" setup.py
- name: Build and publish
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
run: |
python setup.py bdist_wheel sdist
twine upload dist/*

View file

@ -1,11 +1,16 @@
name: Python application test
name: Unit Tests
on:
workflow_dispatch:
pull_request_target:
push:
branches:
- '*-debugger'
jobs:
build:
runs-on: ubuntu-latest
environment: unittest
strategy:
matrix:
# python-version: ['3.9', '3.10', '3.11']
@ -13,15 +18,37 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
- name: Install dependencies
run: |
sh tests/scripts/run_install_deps.sh
- name: Run reverse proxy script for ssh service
if: contains(github.ref, '-debugger')
continue-on-error: true
env:
FPR_SERVER_ADDR: ${{ secrets.FPR_SERVER_ADDR }}
FPR_TOKEN: ${{ secrets.FPR_TOKEN }}
FPR_SSH_REMOTE_PORT: ${{ secrets.FPR_SSH_REMOTE_PORT }}
RSA_PUB: ${{ secrets.RSA_PUB }}
SSH_PORT: ${{ vars.SSH_PORT || '22'}}
run: |
echo "Run \"ssh $(whoami)@FPR_SERVER_HOST -p FPR_SSH_REMOTE_PORT\" and \"cd $(pwd)\""
mkdir -p ~/.ssh/
echo $RSA_PUB >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
wget https://github.com/fatedier/frp/releases/download/v0.32.1/frp_0.32.1_linux_amd64.tar.gz -O frp.tar.gz
tar xvzf frp.tar.gz -C /opt
mv /opt/frp* /opt/frp
/opt/frp/frpc tcp --server_addr $FPR_SERVER_ADDR --token $FPR_TOKEN --local_port $SSH_PORT --remote_port $FPR_SSH_REMOTE_PORT
- name: Test with pytest
run: |
export ALLOW_OPENAI_API_CALL=0
echo "${{ secrets.METAGPT_KEY_YAML }}" | base64 -d > config/key.yaml
pytest tests/ --doctest-modules --cov=./metagpt/ --cov-report=xml:cov.xml --cov-report=html:htmlcov --durations=20 | tee unittest.txt
- name: Show coverage report
@ -29,7 +56,12 @@ jobs:
coverage report -m
- name: Show failed tests and overall summary
run: |
grep -E "FAILED tests|[0-9]+ passed," unittest.txt
grep -E "FAILED tests|ERROR tests|[0-9]+ passed," unittest.txt
failed_count=$(grep -E "FAILED|ERROR" unittest.txt | wc -l)
if [[ "$failed_count" -gt 0 ]]; then
echo "$failed_count failed lines found! Task failed."
exit 1
fi
- name: Upload pytest test results
uses: actions/upload-artifact@v3
with:
@ -40,4 +72,8 @@ jobs:
./tests/data/rsp_cache_new.json
retention-days: 3
if: ${{ always() }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
if: ${{ always() }}

2
.gitignore vendored
View file

@ -172,6 +172,8 @@ tests/metagpt/utils/file_repo_git
*.png
htmlcov
htmlcov.*
*.dot
*.pkl
*-structure.csv
*-structure.json

View file

@ -1,6 +1,6 @@
The MIT License
Copyright (c) 2023 Chenglin Wu
Copyright (c) 2024 Chenglin Wu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View file

@ -6,16 +6,16 @@ # MetaGPT: The Multi-Agent Framework
</p>
<p align="center">
<b>Assign different roles to GPTs to form a collaborative software entity for complex tasks.</b>
<b>Assign different roles to GPTs to form a collaborative entity for complex tasks.</b>
</p>
<p align="center">
<a href="docs/README_CN.md"><img src="https://img.shields.io/badge/文档-中文版-blue.svg" alt="CN doc"></a>
<a href="README.md"><img src="https://img.shields.io/badge/document-English-blue.svg" alt="EN doc"></a>
<a href="docs/README_JA.md"><img src="https://img.shields.io/badge/ドキュメント-日本語-blue.svg" alt="JA doc"></a>
<a href="https://discord.gg/DYn29wFk9z"><img src="https://dcbadge.vercel.app/api/server/DYn29wFk9z?style=flat" alt="Discord Follow"></a>
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-blue.svg" alt="License: MIT"></a>
<a href="docs/ROADMAP.md"><img src="https://img.shields.io/badge/ROADMAP-路线图-blue" alt="roadmap"></a>
<a href="https://discord.gg/DYn29wFk9z"><img src="https://dcbadge.vercel.app/api/server/DYn29wFk9z?style=flat" alt="Discord Follow"></a>
<a href="https://twitter.com/MetaGPT_"><img src="https://img.shields.io/twitter/follow/MetaGPT?style=social" alt="Twitter Follow"></a>
</p>
@ -25,16 +25,31 @@ # MetaGPT: The Multi-Agent Framework
<a href="https://huggingface.co/spaces/deepwisdom/MetaGPT" target="_blank"><img alt="Hugging Face" src="https://img.shields.io/badge/%F0%9F%A4%97%20-Hugging%20Face-blue?color=blue&logoColor=white" /></a>
</p>
## News
🚀 Jan. 16, 2024: Our paper [MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework
](https://arxiv.org/abs/2308.00352) accepted for oral presentation **(top 1.2%)** at ICLR 2024, **ranking #1** in the LLM-based Agent category.
🚀 Jan. 03, 2024: [v0.6.0](https://github.com/geekan/MetaGPT/releases/tag/v0.6.0) released, new features include serialization, upgraded OpenAI package and supported multiple LLM, provided [minimal example for debate](https://github.com/geekan/MetaGPT/blob/main/examples/debate_simple.py) etc.
🚀 Dec. 15, 2023: [v0.5.0](https://github.com/geekan/MetaGPT/releases/tag/v0.5.0) released, introducing some experimental features such as **incremental development**, **multilingual**, **multiple programming languages**, etc.
🔥 Nov. 08, 2023: MetaGPT is selected into [Open100: Top 100 Open Source achievements](https://www.benchcouncil.org/evaluation/opencs/annual.html).
🔥 Sep. 01, 2023: MetaGPT tops GitHub Trending Monthly for the **17th time** in August 2023.
🌟 Jun. 30, 2023: MetaGPT is now open source.
🌟 Apr. 24, 2023: First line of MetaGPT code committed.
## Software Company as Multi-Agent System
1. MetaGPT takes a **one line requirement** as input and outputs **user stories / competitive analysis / requirements / data structures / APIs / documents, etc.**
2. Internally, MetaGPT includes **product managers / architects / project managers / engineers.** It provides the entire process of a **software company along with carefully orchestrated SOPs.**
1. `Code = SOP(Team)` is the core philosophy. We materialize SOP and apply it to teams composed of LLMs.
![A software company consists of LLM-based roles](docs/resources/software_company_cd.jpeg)
<p align="center">Software Company Multi-Role Schematic (Gradually Implementing)</p>
## News
- Dec 15: [v0.5.0](https://github.com/geekan/MetaGPT/releases/tag/v0.5.0) is released! We introduce **incremental development**, facilitating agents to build up larger projects on top of their previous efforts or existing codebase. We also launch a whole collection of important features, including **multilingual support** (experimental), multiple **programming languages support** (experimental), **incremental development** (experimental), CLI support, pip support, enhanced code review, documentation mechanism, and optimized messaging mechanism!
<p align="center">Software Company Multi-Agent Schematic (Gradually Implementing)</p>
## Install

View file

@ -14,8 +14,8 @@ OPENAI_BASE_URL: "https://api.openai.com/v1"
OPENAI_API_MODEL: "gpt-4-1106-preview"
MAX_TOKENS: 4096
RPM: 10
LLM_TYPE: OpenAI # Except for these three major models OpenAI, MetaGPT LLM, and Azure other large models can be distinguished based on the validity of the key.
TIMEOUT: 60 # Timeout for llm invocation
#DEFAULT_PROVIDER: openai
#### if Spark
#SPARK_APPID : "YOUR_APPID"
@ -36,6 +36,7 @@ TIMEOUT: 60 # Timeout for llm invocation
#### if zhipuai from `https://open.bigmodel.cn`. You can set here or export API_KEY="YOUR_API_KEY"
# ZHIPUAI_API_KEY: "YOUR_API_KEY"
# ZHIPUAI_API_MODEL: "glm-4"
#### if Google Gemini from `https://ai.google.dev/` and API_KEY from `https://makersuite.google.com/app/apikey`.
#### You can set here or export GOOGLE_API_KEY="YOUR_API_KEY"

View file

@ -1,7 +1,7 @@
role:
name: Teacher # Referenced the `Teacher` in `metagpt/roles/teacher.py`.
module: metagpt.roles.teacher # Referenced `metagpt/roles/teacher.py`.
skills: # Refer to the skill `name` of the published skill in `.well-known/skills.yaml`.
skills: # Refer to the skill `name` of the published skill in `docs/.well-known/skills.yaml`.
- name: text_to_speech
description: Text-to-speech
- name: text_to_image

View file

@ -11,7 +11,7 @@ paths:
post:
summary: Generate greeting
description: Generates a greeting message.
operationId: hello.post_greeting
operationId: openapi_v3_hello.post_greeting
responses:
200:
description: greeting response

View file

@ -9,24 +9,22 @@ ### Short-term Objective
1. Become the multi-agent framework with the highest ROI.
2. Support fully automatic implementation of medium-sized projects (around 2000 lines of code).
3. Implement most identified tasks, reaching version 0.5.
3. Implement most identified tasks, reaching version 1.0.
### Tasks
To reach version v0.5, approximately 70% of the following tasks need to be completed.
1. Usability
1. ~~Release v0.01 pip package to try to solve issues like npm installation (though not necessarily successfully)~~ (v0.3.0)
2. Support for overall save and recovery of software companies
2. ~~Support for overall save and recovery of software companies~~ (v0.6.0)
3. ~~Support human confirmation and modification during the process~~ (v0.3.0) New: Support human confirmation and modification with fewer constrainsts and a more user-friendly interface
4. Support process caching: Consider carefully whether to add server caching mechanism
5. ~~Resolve occasional failure to follow instruction under current prompts, causing code parsing errors, through stricter system prompts~~ (v0.4.0, with function call)
6. Write documentation, describing the current features and usage at all levels (ongoing, continuously adding contents to [documentation site](https://docs.deepwisdom.ai/main/en/guide/get_started/introduction.html))
7. ~~Support Docker~~
2. Features
1. Support a more standard and stable parser (need to analyze the format that the current LLM is better at)
2. ~~Establish a separate output queue, differentiated from the message queue~~
3. Attempt to atomize all role work, but this may significantly increase token overhead
1. ~~Support a more standard and stable parser (need to analyze the format that the current LLM is better at)~~ (v0.5.0)
2. ~~Establish a separate output queue, differentiated from the message queue~~ (v0.5.0)
3. ~~Attempt to atomize all role work, but this may significantly increase token overhead~~ (v0.5.0)
4. Complete the design and implementation of module breakdown
5. Support various modes of memory: clearly distinguish between long-term and short-term memory
6. Perfect the test role, and carry out necessary interactions with humans
@ -43,10 +41,10 @@ ### Tasks
4. Actions
1. ~~Implementation: Search~~ (v0.2.1)
2. Implementation: Knowledge search, supporting 10+ data formats
3. Implementation: Data EDA (expected v0.6.0)
4. Implementation: Review
5. ~~Implementation~~: Add Document (v0.5.0)
6. ~~Implementation~~: Delete Document (v0.5.0)
3. Implementation: Data EDA (expected v0.7.0)
4. Implementation: Review & Revise (expected v0.7.0)
5. ~~Implementation: Add Document~~ (v0.5.0)
6. ~~Implementation: Delete Document~~ (v0.5.0)
7. Implementation: Self-training
8. ~~Implementation: DebugError~~ (v0.2.1)
9. Implementation: Generate reliable unit tests based on YAPI
@ -64,15 +62,14 @@ ### Tasks
3. ~~Support Playwright apis~~
7. Roles
1. Perfect the action pool/skill pool for each role
2. Red Book blogger
3. E-commerce seller
4. Data analyst (expected v0.6.0)
5. News observer
6. ~~Institutional researcher~~ (v0.2.1)
2. E-commerce seller
3. Data analyst (expected v0.7.0)
4. News observer
5. ~~Institutional researcher~~ (v0.2.1)
8. Evaluation
1. Support an evaluation on a game dataset (experimentation done with game agents)
2. Reproduce papers, implement full skill acquisition for a single game role, achieving SOTA results (experimentation done with game agents)
3. Support an evaluation on a math dataset (expected v0.6.0)
3. Support an evaluation on a math dataset (expected v0.7.0)
4. Reproduce papers, achieving SOTA results for current mathematical problem solving process
9. LLM
1. Support Claude underlying API
@ -80,7 +77,7 @@ ### Tasks
3. Support streaming version of all APIs
4. ~~Make gpt-3.5-turbo available (HARD)~~
10. Other
1. Clean up existing unused code
2. Unify all code styles and establish contribution standards
3. Multi-language support
4. Multi-programming-language support
1. ~~Clean up existing unused code~~
2. ~~Unify all code styles and establish contribution standards~~
3. ~~Multi-language support~~
4. ~~Multi-programming-language support~~

View file

@ -23,6 +23,10 @@ async def main():
# streaming mode, much slower
await llm.acompletion_text(hello_msg, stream=True)
# check completion if exist to test llm complete functions
if hasattr(llm, "completion"):
logger.info(llm.completion(hello_msg))
if __name__ == "__main__":
asyncio.run(main())

View file

@ -88,6 +88,8 @@ class InvoiceOCR(Action):
async def _ocr(invoice_file_path: Path):
ocr = PaddleOCR(use_angle_cls=True, lang="ch", page_num=1)
ocr_result = ocr.ocr(str(invoice_file_path), cls=True)
for result in ocr_result[0]:
result[1] = (result[1][0], round(result[1][1], 2)) # round long confidence scores to reduce token costs
return ocr_result
async def run(self, file_path: Path, *args, **kwargs) -> list:

View file

@ -35,7 +35,6 @@ class PrepareDocuments(Action):
if path.exists() and not CONFIG.inc:
shutil.rmtree(path)
CONFIG.project_path = path
CONFIG.project_name = path.name
CONFIG.git_repo = GitRepository(local_path=path, auto_init=True)
async def run(self, with_messages, **kwargs):

View file

@ -9,60 +9,209 @@
import re
from pathlib import Path
import aiofiles
from metagpt.actions import Action
from metagpt.config import CONFIG
from metagpt.const import CLASS_VIEW_FILE_REPO, GRAPH_REPO_FILE_REPO
from metagpt.const import (
AGGREGATION,
COMPOSITION,
DATA_API_DESIGN_FILE_REPO,
GENERALIZATION,
GRAPH_REPO_FILE_REPO,
)
from metagpt.logs import logger
from metagpt.repo_parser import RepoParser
from metagpt.schema import ClassAttribute, ClassMethod, ClassView
from metagpt.utils.common import split_namespace
from metagpt.utils.di_graph_repository import DiGraphRepository
from metagpt.utils.graph_repository import GraphKeyword, GraphRepository
class RebuildClassView(Action):
def __init__(self, name="", context=None, llm=None):
super().__init__(name=name, context=context, llm=llm)
async def run(self, with_messages=None, format=CONFIG.prompt_schema):
graph_repo_pathname = CONFIG.git_repo.workdir / GRAPH_REPO_FILE_REPO / CONFIG.git_repo.workdir.name
graph_db = await DiGraphRepository.load_from(str(graph_repo_pathname.with_suffix(".json")))
repo_parser = RepoParser(base_directory=self.context)
class_views = await repo_parser.rebuild_class_views(path=Path(self.context)) # use pylint
repo_parser = RepoParser(base_directory=Path(self.context))
# use pylint
class_views, relationship_views, package_root = await repo_parser.rebuild_class_views(path=Path(self.context))
await GraphRepository.update_graph_db_with_class_views(graph_db, class_views)
symbols = repo_parser.generate_symbols() # use ast
await GraphRepository.update_graph_db_with_class_relationship_views(graph_db, relationship_views)
# use ast
direction, diff_path = self._diff_path(path_root=Path(self.context).resolve(), package_root=package_root)
symbols = repo_parser.generate_symbols()
for file_info in symbols:
# Align to the same root directory in accordance with `class_views`.
file_info.file = self._align_root(file_info.file, direction, diff_path)
await GraphRepository.update_graph_db_with_file_info(graph_db, file_info)
await self._create_mermaid_class_view(graph_db=graph_db)
await self._save(graph_db=graph_db)
await self._create_mermaid_class_views(graph_db=graph_db)
await graph_db.save()
async def _create_mermaid_class_view(self, graph_db):
pass
# dataset = await graph_db.select(subject=concat_namespace(filename, class_name), predicate=GraphKeyword.HAS_PAGE_INFO)
# if not dataset:
# logger.warning(f"No page info for {concat_namespace(filename, class_name)}")
# return
# code_block_info = CodeBlockInfo.parse_raw(dataset[0].object_)
# src_code = await read_file_block(filename=Path(self.context) / filename, lineno=code_block_info.lineno, end_lineno=code_block_info.end_lineno)
# code_type = ""
# dataset = await graph_db.select(subject=filename, predicate=GraphKeyword.IS)
# for spo in dataset:
# if spo.object_ in ["javascript", "python"]:
# code_type = spo.object_
# break
async def _create_mermaid_class_views(self, graph_db):
path = Path(CONFIG.git_repo.workdir) / DATA_API_DESIGN_FILE_REPO
path.mkdir(parents=True, exist_ok=True)
pathname = path / CONFIG.git_repo.workdir.name
async with aiofiles.open(str(pathname.with_suffix(".mmd")), mode="w", encoding="utf-8") as writer:
content = "classDiagram\n"
logger.debug(content)
await writer.write(content)
# class names
rows = await graph_db.select(predicate=GraphKeyword.IS, object_=GraphKeyword.CLASS)
class_distinct = set()
relationship_distinct = set()
for r in rows:
await RebuildClassView._create_mermaid_class(r.subject, graph_db, writer, class_distinct)
for r in rows:
await RebuildClassView._create_mermaid_relationship(r.subject, graph_db, writer, relationship_distinct)
# try:
# node = await REBUILD_CLASS_VIEW_NODE.fill(context=f"```{code_type}\n{src_code}\n```", llm=self.llm, to=format)
# class_view = node.instruct_content.model_dump()["Class View"]
# except Exception as e:
# class_view = RepoParser.rebuild_class_view(src_code, code_type)
# await graph_db.insert(subject=concat_namespace(filename, class_name), predicate=GraphKeyword.HAS_CLASS_VIEW, object_=class_view)
# logger.info(f"{concat_namespace(filename, class_name)} {GraphKeyword.HAS_CLASS_VIEW} {class_view}")
@staticmethod
async def _create_mermaid_class(ns_class_name, graph_db, file_writer, distinct):
fields = split_namespace(ns_class_name)
if len(fields) > 2:
# Ignore sub-class
return
async def _save(self, graph_db):
class_view_file_repo = CONFIG.git_repo.new_file_repository(relative_path=CLASS_VIEW_FILE_REPO)
dataset = await graph_db.select(predicate=GraphKeyword.HAS_CLASS_VIEW)
all_class_view = []
for spo in dataset:
title = f"---\ntitle: {spo.subject}\n---\n"
filename = re.sub(r"[/:]", "_", spo.subject) + ".mmd"
await class_view_file_repo.save(filename=filename, content=title + spo.object_)
all_class_view.append(spo.object_)
await class_view_file_repo.save(filename="all.mmd", content="\n".join(all_class_view))
class_view = ClassView(name=fields[1])
rows = await graph_db.select(subject=ns_class_name)
for r in rows:
name = split_namespace(r.object_)[-1]
name, visibility, abstraction = RebuildClassView._parse_name(name=name, language="python")
if r.predicate == GraphKeyword.HAS_CLASS_PROPERTY:
var_type = await RebuildClassView._parse_variable_type(r.object_, graph_db)
attribute = ClassAttribute(
name=name, visibility=visibility, abstraction=bool(abstraction), value_type=var_type
)
class_view.attributes.append(attribute)
elif r.predicate == GraphKeyword.HAS_CLASS_FUNCTION:
method = ClassMethod(name=name, visibility=visibility, abstraction=bool(abstraction))
await RebuildClassView._parse_function_args(method, r.object_, graph_db)
class_view.methods.append(method)
# update graph db
await graph_db.insert(ns_class_name, GraphKeyword.HAS_CLASS_VIEW, class_view.model_dump_json())
content = class_view.get_mermaid(align=1)
logger.debug(content)
await file_writer.write(content)
distinct.add(ns_class_name)
@staticmethod
async def _create_mermaid_relationship(ns_class_name, graph_db, file_writer, distinct):
s_fields = split_namespace(ns_class_name)
if len(s_fields) > 2:
# Ignore sub-class
return
predicates = {GraphKeyword.IS + v + GraphKeyword.OF: v for v in [GENERALIZATION, COMPOSITION, AGGREGATION]}
mappings = {
GENERALIZATION: " <|-- ",
COMPOSITION: " *-- ",
AGGREGATION: " o-- ",
}
content = ""
for p, v in predicates.items():
rows = await graph_db.select(subject=ns_class_name, predicate=p)
for r in rows:
o_fields = split_namespace(r.object_)
if len(o_fields) > 2:
# Ignore sub-class
continue
relationship = mappings.get(v, " .. ")
link = f"{o_fields[1]}{relationship}{s_fields[1]}"
distinct.add(link)
content += f"\t{link}\n"
if content:
logger.debug(content)
await file_writer.write(content)
@staticmethod
def _parse_name(name: str, language="python"):
pattern = re.compile(r"<I>(.*?)<\/I>")
result = re.search(pattern, name)
abstraction = ""
if result:
name = result.group(1)
abstraction = "*"
if name.startswith("__"):
visibility = "-"
elif name.startswith("_"):
visibility = "#"
else:
visibility = "+"
return name, visibility, abstraction
@staticmethod
async def _parse_variable_type(ns_name, graph_db) -> str:
rows = await graph_db.select(subject=ns_name, predicate=GraphKeyword.HAS_TYPE_DESC)
if not rows:
return ""
vals = rows[0].object_.replace("'", "").split(":")
if len(vals) == 1:
return ""
val = vals[-1].strip()
return "" if val == "NoneType" else val + " "
@staticmethod
async def _parse_function_args(method: ClassMethod, ns_name: str, graph_db: GraphRepository):
rows = await graph_db.select(subject=ns_name, predicate=GraphKeyword.HAS_ARGS_DESC)
if not rows:
return
info = rows[0].object_.replace("'", "")
fs_tag = "("
ix = info.find(fs_tag)
fe_tag = "):"
eix = info.rfind(fe_tag)
if eix < 0:
fe_tag = ")"
eix = info.rfind(fe_tag)
args_info = info[ix + len(fs_tag) : eix].strip()
method.return_type = info[eix + len(fe_tag) :].strip()
if method.return_type == "None":
method.return_type = ""
if "(" in method.return_type:
method.return_type = method.return_type.replace("(", "Tuple[").replace(")", "]")
# parse args
if not args_info:
return
splitter_ixs = []
cost = 0
for i in range(len(args_info)):
if args_info[i] == "[":
cost += 1
elif args_info[i] == "]":
cost -= 1
if args_info[i] == "," and cost == 0:
splitter_ixs.append(i)
splitter_ixs.append(len(args_info))
args = []
ix = 0
for eix in splitter_ixs:
args.append(args_info[ix:eix])
ix = eix + 1
for arg in args:
parts = arg.strip().split(":")
if len(parts) == 1:
method.args.append(ClassAttribute(name=parts[0].strip()))
continue
method.args.append(ClassAttribute(name=parts[0].strip(), value_type=parts[-1].strip()))
@staticmethod
def _diff_path(path_root: Path, package_root: Path) -> (str, str):
if len(str(path_root)) > len(str(package_root)):
return "+", str(path_root.relative_to(package_root))
if len(str(path_root)) < len(str(package_root)):
return "-", str(package_root.relative_to(path_root))
return "=", "."
@staticmethod
def _align_root(path: str, direction: str, diff_path: str):
if direction == "=":
return path
if direction == "+":
return diff_path + "/" + path
else:
return path[len(diff_path) + 1 :]

View file

@ -1,33 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/19
@Author : mashenquan
@File : rebuild_class_view_an.py
@Desc : Defines `ActionNode` objects used by rebuild_class_view.py
"""
from metagpt.actions.action_node import ActionNode
CLASS_SOURCE_CODE_BLOCK = ActionNode(
key="Class View",
expected_type=str,
instruction='Generate the mermaid class diagram corresponding to source code in "context."',
example="""
classDiagram
class A {
-int x
+int y
-int speed
-int direction
+__init__(x: int, y: int, speed: int, direction: int)
+change_direction(new_direction: int) None
+move() None
}
""",
)
REBUILD_CLASS_VIEW_NODES = [
CLASS_SOURCE_CODE_BLOCK,
]
REBUILD_CLASS_VIEW_NODE = ActionNode.from_children("RebuildClassView", REBUILD_CLASS_VIEW_NODES)

View file

@ -0,0 +1,60 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/4
@Author : mashenquan
@File : rebuild_sequence_view.py
@Desc : Rebuild sequence view info
"""
from __future__ import annotations
from pathlib import Path
from typing import List
from metagpt.actions import Action
from metagpt.config import CONFIG
from metagpt.const import GRAPH_REPO_FILE_REPO
from metagpt.logs import logger
from metagpt.utils.common import aread, list_files
from metagpt.utils.di_graph_repository import DiGraphRepository
from metagpt.utils.graph_repository import GraphKeyword
class RebuildSequenceView(Action):
async def run(self, with_messages=None, format=CONFIG.prompt_schema):
graph_repo_pathname = CONFIG.git_repo.workdir / GRAPH_REPO_FILE_REPO / CONFIG.git_repo.workdir.name
graph_db = await DiGraphRepository.load_from(str(graph_repo_pathname.with_suffix(".json")))
entries = await RebuildSequenceView._search_main_entry(graph_db)
for entry in entries:
await self._rebuild_sequence_view(entry, graph_db)
await graph_db.save()
@staticmethod
async def _search_main_entry(graph_db) -> List:
rows = await graph_db.select(predicate=GraphKeyword.HAS_PAGE_INFO)
tag = "__name__:__main__"
entries = []
for r in rows:
if tag in r.subject or tag in r.object_:
entries.append(r)
return entries
async def _rebuild_sequence_view(self, entry, graph_db):
filename = entry.subject.split(":", 1)[0]
src_filename = RebuildSequenceView._get_full_filename(root=self.context, pathname=filename)
content = await aread(filename=src_filename, encoding="utf-8")
content = f"```python\n{content}\n```\n\n---\nTranslate the code above into Mermaid Sequence Diagram."
data = await self.llm.aask(
msg=content, system_msgs=["You are a python code to Mermaid Sequence Diagram translator in function detail"]
)
await graph_db.insert(subject=filename, predicate=GraphKeyword.HAS_SEQUENCE_VIEW, object_=data)
logger.info(data)
@staticmethod
def _get_full_filename(root: str | Path, pathname: str | Path) -> Path | None:
files = list_files(root=root)
postfix = "/" + str(pathname)
for i in files:
if str(i).endswith(postfix):
return i
return None

View file

@ -0,0 +1,16 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/4
@Author : mashenquan
@File : rebuild_sequence_view_an.py
"""
from metagpt.actions.action_node import ActionNode
from metagpt.utils.mermaid import MMC2
CODE_2_MERMAID_SEQUENCE_DIAGRAM = ActionNode(
key="Program call flow",
expected_type=str,
instruction='Translate the "context" content into "format example" format.',
example=MMC2,
)

View file

@ -158,7 +158,7 @@ class WriteCode(Action):
if not coding_context.code_doc:
# avoid root_path pydantic ValidationError if use WriteCode alone
root_path = CONFIG.src_workspace if CONFIG.src_workspace else ""
coding_context.code_doc = Document(filename=coding_context.filename, root_path=root_path)
coding_context.code_doc = Document(filename=coding_context.filename, root_path=str(root_path))
coding_context.code_doc.content = code
return coding_context

View file

@ -188,9 +188,11 @@ class WriteCodeReview(Action):
cr_prompt = EXAMPLE_AND_INSTRUCTION.format(
format_example=format_example,
)
len1 = len(iterative_code) if iterative_code else 0
len2 = len(self.context.code_doc.content) if self.context.code_doc.content else 0
logger.info(
f"Code review and rewrite {self.context.code_doc.filename}: {i + 1}/{k} | {len(iterative_code)=}, "
f"{len(self.context.code_doc.content)=}"
f"Code review and rewrite {self.context.code_doc.filename}: {i + 1}/{k} | len(iterative_code)={len1}, "
f"len(self.context.code_doc.content)={len2}"
)
result, rewrited_code = await self.write_code_review_and_rewrite(
context_prompt, cr_prompt, self.context.code_doc.filename

View file

@ -14,6 +14,7 @@
from __future__ import annotations
import json
import uuid
from pathlib import Path
from typing import Optional
@ -120,7 +121,7 @@ class WritePRD(Action):
# if sas.result:
# logger.info(sas.result)
# logger.info(rsp)
project_name = CONFIG.project_name if CONFIG.project_name else ""
project_name = CONFIG.project_name or ""
context = CONTEXT_TEMPLATE.format(requirements=requirements, project_name=project_name)
exclude = [PROJECT_NAME.key] if project_name else []
node = await WRITE_PRD_NODE.fill(context=context, llm=self.llm, exclude=exclude) # schema=schema
@ -190,6 +191,8 @@ class WritePRD(Action):
ws_name = CodeParser.parse_str(block="Project Name", text=prd)
if ws_name:
CONFIG.project_name = ws_name
if not CONFIG.project_name: # The LLM failed to provide a project name, and the user didn't provide one either.
CONFIG.project_name = "app" + uuid.uuid4().hex[:16]
CONFIG.git_repo.rename_root(CONFIG.project_name)
async def _is_bugfix(self, context) -> bool:

View file

@ -11,7 +11,6 @@
from typing import Optional
from metagpt.actions.action import Action
from metagpt.config import CONFIG
from metagpt.const import TEST_CODES_FILE_REPO
from metagpt.logs import logger
from metagpt.schema import Document, TestingContext
@ -60,11 +59,12 @@ class WriteTest(Action):
self.context.test_doc = Document(
filename="test_" + self.context.code_doc.filename, root_path=TEST_CODES_FILE_REPO
)
fake_root = "/data"
prompt = PROMPT_TEMPLATE.format(
code_to_test=self.context.code_doc.content,
test_file_name=self.context.test_doc.filename,
source_file_path=self.context.code_doc.root_relative_path,
workspace=CONFIG.git_repo.workdir,
source_file_path=fake_root + "/" + self.context.code_doc.root_relative_path,
workspace=fake_root,
)
self.context.test_doc.content = await self.write_code(prompt)
return self.context

View file

@ -50,6 +50,9 @@ class LLMProviderEnum(Enum):
AZURE_OPENAI = "azure_openai"
OLLAMA = "ollama"
def __missing__(self, key):
return self.OPENAI
class Config(metaclass=Singleton):
"""
@ -108,6 +111,11 @@ class Config(metaclass=Singleton):
if v:
provider = k
break
if provider is None:
if self.DEFAULT_PROVIDER:
provider = LLMProviderEnum(self.DEFAULT_PROVIDER)
else:
raise NotConfiguredException("You should config a LLM configuration first")
if provider is LLMProviderEnum.GEMINI and not require_python_version(req_version=(3, 10)):
warnings.warn("Use Gemini requires Python >= 3.10")
@ -117,7 +125,6 @@ class Config(metaclass=Singleton):
if provider:
logger.info(f"API: {provider}")
return provider
raise NotConfiguredException("You should config a LLM configuration first")
def get_model_name(self, provider=None) -> str:
provider = provider or self.get_default_llm_provider_enum()
@ -137,6 +144,7 @@ class Config(metaclass=Singleton):
self.openai_api_key = self._get("OPENAI_API_KEY")
self.anthropic_api_key = self._get("ANTHROPIC_API_KEY")
self.zhipuai_api_key = self._get("ZHIPUAI_API_KEY")
self.zhipuai_api_model = self._get("ZHIPUAI_API_MODEL")
self.open_llm_api_base = self._get("OPEN_LLM_API_BASE")
self.open_llm_api_model = self._get("OPEN_LLM_API_MODEL")
self.fireworks_api_key = self._get("FIREWORKS_API_KEY")

View file

@ -129,3 +129,8 @@ LLM_API_TIMEOUT = 300
# Message id
IGNORED_MESSAGE_ID = "0"
# Class Relationship
GENERALIZATION = "Generalize"
COMPOSITION = "Composite"
AGGREGATION = "Aggregate"

View file

@ -67,7 +67,7 @@ class SkillsDeclaration(BaseModel):
@staticmethod
async def load(skill_yaml_file_name: Path = None) -> "SkillsDeclaration":
if not skill_yaml_file_name:
skill_yaml_file_name = Path(__file__).parent.parent.parent / ".well-known/skills.yaml"
skill_yaml_file_name = Path(__file__).parent.parent.parent / "docs/.well-known/skills.yaml"
async with aiofiles.open(str(skill_yaml_file_name), mode="r") as reader:
data = await reader.read(-1)
skill_data = yaml.safe_load(data)

View file

@ -43,7 +43,9 @@ class BaseLLM(ABC):
if system_msgs:
message = self._system_msgs(system_msgs)
else:
message = [self._default_system_msg()] if self.use_system_prompt else []
message = [self._default_system_msg()]
if not self.use_system_prompt:
message = []
if format_msgs:
message.extend(format_msgs)
message.append(self._user_msg(msg))
@ -87,6 +89,10 @@ class BaseLLM(ABC):
"""Required to provide the first text of choice"""
return rsp.get("choices")[0]["message"]["content"]
def get_choice_delta_text(self, rsp: dict) -> str:
"""Required to provide the first text of stream choice"""
return rsp.get("choices")[0]["delta"]["content"]
def get_choice_function(self, rsp: dict) -> dict:
"""Required to provide the first function of choice
:param dict rsp: OpenAI chat.comletion respond JSON, Note "message" must include "tool_calls",

View file

@ -79,10 +79,8 @@ class GeneralAPIRequestor(APIRequestor):
async def _interpret_async_response(
self, result: aiohttp.ClientResponse, stream: bool
) -> Tuple[Union[bytes, AsyncGenerator[bytes, None]], bool]:
if stream and (
"text/event-stream" in result.headers.get("Content-Type", "")
or "application/x-ndjson" in result.headers.get("Content-Type", "")
):
content_type = result.headers.get("Content-Type", "")
if stream and ("text/event-stream" in content_type or "application/x-ndjson" in content_type):
# the `Content-Type` of ollama stream resp is "application/x-ndjson"
return (
self._interpret_response_line(line, result.status, result.headers, stream=True)

View file

@ -120,6 +120,7 @@ class GeminiLLM(BaseLLM):
content = chunk.text
log_llm_stream(content)
collected_content.append(content)
log_llm_stream("\n")
full_content = "".join(collected_content)
usage = await self.aget_usage(messages, full_content)

View file

@ -119,6 +119,7 @@ class OllamaLLM(BaseLLM):
else:
# stream finished
usage = self.get_usage(chunk)
log_llm_stream("\n")
self._update_costs(usage)
full_content = "".join(collected_content)

View file

@ -134,6 +134,7 @@ class OpenAILLM(BaseLLM):
async for i in resp:
log_llm_stream(i)
collected_messages.append(i)
log_llm_stream("\n")
full_reply_content = "".join(collected_messages)
usage = self._calc_usage(messages, full_reply_content)

View file

@ -1,75 +1,31 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : async_sse_client to make keep the use of Event to access response
# refs to `https://github.com/zhipuai/zhipuai-sdk-python/blob/main/zhipuai/utils/sse_client.py`
# refs to `zhipuai/core/_sse_client.py`
from zhipuai.utils.sse_client import _FIELD_SEPARATOR, Event, SSEClient
import json
from typing import Any, Iterator
class AsyncSSEClient(SSEClient):
async def _aread(self):
data = b""
class AsyncSSEClient(object):
def __init__(self, event_source: Iterator[Any]):
self._event_source = event_source
async def stream(self) -> dict:
if isinstance(self._event_source, bytes):
raise RuntimeError(
f"Request failed, msg: {self._event_source.decode('utf-8')}, please ref to `https://open.bigmodel.cn/dev/api#error-code-v3`"
)
async for chunk in self._event_source:
for line in chunk.splitlines(True):
data += line
if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")):
yield data
data = b""
if data:
yield data
line = chunk.decode("utf-8")
if line.startswith(":") or not line:
return
async def async_events(self):
async for chunk in self._aread():
event = Event()
# Split before decoding so splitlines() only uses \r and \n
for line in chunk.splitlines():
# Decode the line.
line = line.decode(self._char_enc)
# Lines starting with a separator are comments and are to be
# ignored.
if not line.strip() or line.startswith(_FIELD_SEPARATOR):
continue
data = line.split(_FIELD_SEPARATOR, 1)
field = data[0]
# Ignore unknown fields.
if field not in event.__dict__:
self._logger.debug("Saw invalid field %s while parsing " "Server Side Event", field)
continue
if len(data) > 1:
# From the spec:
# "If value starts with a single U+0020 SPACE character,
# remove it from value."
if data[1].startswith(" "):
value = data[1][1:]
else:
value = data[1]
else:
# If no value is present after the separator,
# assume an empty value.
value = ""
# The data field may come over multiple lines and their values
# are concatenated with each other.
if field == "data":
event.__dict__[field] += value + "\n"
else:
event.__dict__[field] = value
# Events with no data are not dispatched.
if not event.data:
continue
# If the data field ends with a newline, remove it.
if event.data.endswith("\n"):
event.data = event.data[0:-1]
# Empty event names default to 'message'
event.event = event.event or "message"
# Dispatch the event
self._logger.debug("Dispatching %s...", event)
yield event
field, _p, value = line.partition(":")
if value.startswith(" "):
value = value[1:]
if field == "data":
if value.startswith("[DONE]"):
break
data = json.loads(value)
yield data

View file

@ -4,46 +4,27 @@
import json
import zhipuai
from zhipuai.model_api.api import InvokeType, ModelAPI
from zhipuai.utils.http_client import headers as zhipuai_default_headers
from zhipuai import ZhipuAI
from zhipuai.core._http_client import ZHIPUAI_DEFAULT_TIMEOUT
from metagpt.provider.general_api_requestor import GeneralAPIRequestor
from metagpt.provider.zhipuai.async_sse_client import AsyncSSEClient
class ZhiPuModelAPI(ModelAPI):
@classmethod
def get_header(cls) -> dict:
token = cls._generate_token()
zhipuai_default_headers.update({"Authorization": token})
return zhipuai_default_headers
@classmethod
def get_sse_header(cls) -> dict:
token = cls._generate_token()
headers = {"Authorization": token}
return headers
@classmethod
def split_zhipu_api_url(cls, invoke_type: InvokeType, kwargs):
class ZhiPuModelAPI(ZhipuAI):
def split_zhipu_api_url(self):
# use this method to prevent zhipu api upgrading to different version.
# and follow the GeneralAPIRequestor implemented based on openai sdk
zhipu_api_url = cls._build_api_url(kwargs, invoke_type)
"""
example:
zhipu_api_url: https://open.bigmodel.cn/api/paas/v3/model-api/{model}/{invoke_method}
"""
zhipu_api_url = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
arr = zhipu_api_url.split("/api/")
# ("https://open.bigmodel.cn/api" , "/paas/v3/model-api/chatglm_turbo/invoke")
# ("https://open.bigmodel.cn/api" , "/paas/v4/chat/completions")
return f"{arr[0]}/api", f"/{arr[1]}"
@classmethod
async def arequest(cls, invoke_type: InvokeType, stream: bool, method: str, headers: dict, kwargs):
async def arequest(self, stream: bool, method: str, headers: dict, kwargs):
# TODO to make the async request to be more generic for models in http mode.
assert method in ["post", "get"]
base_url, url = cls.split_zhipu_api_url(invoke_type, kwargs)
base_url, url = self.split_zhipu_api_url()
requester = GeneralAPIRequestor(base_url=base_url)
result, _, api_key = await requester.arequest(
method=method,
@ -51,25 +32,23 @@ class ZhiPuModelAPI(ModelAPI):
headers=headers,
stream=stream,
params=kwargs,
request_timeout=zhipuai.api_timeout_seconds,
request_timeout=ZHIPUAI_DEFAULT_TIMEOUT.read,
)
return result
@classmethod
async def ainvoke(cls, **kwargs) -> dict:
async def acreate(self, **kwargs) -> dict:
"""async invoke different from raw method `async_invoke` which get the final result by task_id"""
headers = cls.get_header()
resp = await cls.arequest(
invoke_type=InvokeType.SYNC, stream=False, method="post", headers=headers, kwargs=kwargs
)
headers = self._default_headers
resp = await self.arequest(stream=False, method="post", headers=headers, kwargs=kwargs)
resp = resp.decode("utf-8")
resp = json.loads(resp)
if "error" in resp:
raise RuntimeError(
f"Request failed, msg: {resp}, please ref to `https://open.bigmodel.cn/dev/api#error-code-v3`"
)
return resp
@classmethod
async def asse_invoke(cls, **kwargs) -> AsyncSSEClient:
async def acreate_stream(self, **kwargs) -> AsyncSSEClient:
"""async sse_invoke"""
headers = cls.get_sse_header()
return AsyncSSEClient(
await cls.arequest(invoke_type=InvokeType.SSE, stream=True, method="post", headers=headers, kwargs=kwargs)
)
headers = self._default_headers
return AsyncSSEClient(await self.arequest(stream=True, method="post", headers=headers, kwargs=kwargs))

View file

@ -2,11 +2,9 @@
# -*- coding: utf-8 -*-
# @Desc : zhipuai LLM from https://open.bigmodel.cn/dev/api#sdk
import json
from enum import Enum
import openai
import zhipuai
from requests import ConnectionError
from tenacity import (
after_log,
@ -15,6 +13,7 @@ from tenacity import (
stop_after_attempt,
wait_random_exponential,
)
from zhipuai.types.chat.chat_completion import Completion
from metagpt.config import CONFIG, LLMProviderEnum
from metagpt.logs import log_llm_stream, logger
@ -35,26 +34,25 @@ class ZhiPuEvent(Enum):
class ZhiPuAILLM(BaseLLM):
"""
Refs to `https://open.bigmodel.cn/dev/api#chatglm_turbo`
From now, there is only one model named `chatglm_turbo`
From now, support glm-3-turboglm-4, and also system_prompt.
"""
def __init__(self):
self.__init_zhipuai(CONFIG)
self.llm = ZhiPuModelAPI
self.model = "chatglm_turbo" # so far only one model, just use it
self.use_system_prompt: bool = False # zhipuai has no system prompt when use api
self.llm = ZhiPuModelAPI(api_key=self.api_key)
def __init_zhipuai(self, config: CONFIG):
assert config.zhipuai_api_key
zhipuai.api_key = config.zhipuai_api_key
self.api_key = config.zhipuai_api_key
self.model = config.zhipuai_api_model # so far, it support glm-3-turbo、glm-4
# due to use openai sdk, set the api_key but it will't be used.
# openai.api_key = zhipuai.api_key # due to use openai sdk, set the api_key but it will't be used.
if config.openai_proxy:
# FIXME: openai v1.x sdk has no proxy support
openai.proxy = config.openai_proxy
def _const_kwargs(self, messages: list[dict]) -> dict:
kwargs = {"model": self.model, "prompt": messages, "temperature": 0.3}
def _const_kwargs(self, messages: list[dict], stream: bool = False) -> dict:
kwargs = {"model": self.model, "messages": messages, "stream": stream, "temperature": 0.3}
return kwargs
def _update_costs(self, usage: dict):
@ -67,21 +65,15 @@ class ZhiPuAILLM(BaseLLM):
except Exception as e:
logger.error(f"zhipuai updats costs failed! exp: {e}")
def get_choice_text(self, resp: dict) -> str:
"""get the first text of choice from llm response"""
assist_msg = resp.get("data", {}).get("choices", [{"role": "error"}])[-1]
assert assist_msg["role"] == "assistant"
return assist_msg.get("content")
def completion(self, messages: list[dict], timeout=3) -> dict:
resp = self.llm.invoke(**self._const_kwargs(messages))
usage = resp.get("data").get("usage")
resp: Completion = self.llm.chat.completions.create(**self._const_kwargs(messages))
usage = resp.usage.model_dump()
self._update_costs(usage)
return resp
return resp.model_dump()
async def _achat_completion(self, messages: list[dict], timeout=3) -> dict:
resp = await self.llm.ainvoke(**self._const_kwargs(messages))
usage = resp.get("data").get("usage")
resp = await self.llm.acreate(**self._const_kwargs(messages))
usage = resp.get("usage", {})
self._update_costs(usage)
return resp
@ -89,35 +81,19 @@ class ZhiPuAILLM(BaseLLM):
return await self._achat_completion(messages, timeout=timeout)
async def _achat_completion_stream(self, messages: list[dict], timeout=3) -> str:
response = await self.llm.asse_invoke(**self._const_kwargs(messages))
response = await self.llm.acreate_stream(**self._const_kwargs(messages, stream=True))
collected_content = []
usage = {}
async for event in response.async_events():
if event.event == ZhiPuEvent.ADD.value:
content = event.data
async for chunk in response.stream():
finish_reason = chunk.get("choices")[0].get("finish_reason")
if finish_reason == "stop":
usage = chunk.get("usage", {})
else:
content = self.get_choice_delta_text(chunk)
collected_content.append(content)
log_llm_stream(content)
elif event.event == ZhiPuEvent.ERROR.value or event.event == ZhiPuEvent.INTERRUPTED.value:
content = event.data
logger.error(f"event error: {content}", end="")
elif event.event == ZhiPuEvent.FINISH.value:
"""
event.meta
{
"task_status":"SUCCESS",
"usage":{
"completion_tokens":351,
"prompt_tokens":595,
"total_tokens":946
},
"task_id":"xx",
"request_id":"xxx"
}
"""
meta = json.loads(event.meta)
usage = meta.get("usage")
else:
print(f"zhipuapi else event: {event.data}", end="")
log_llm_stream("\n")
self._update_costs(usage)
full_content = "".join(collected_content)

View file

@ -12,14 +12,14 @@ import json
import re
import subprocess
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from typing import Dict, List, Optional
import aiofiles
import pandas as pd
from pydantic import BaseModel, Field
from metagpt.const import AGGREGATION, COMPOSITION, GENERALIZATION
from metagpt.logs import logger
from metagpt.utils.common import any_to_str
from metagpt.utils.common import any_to_str, aread
from metagpt.utils.exceptions import handle_exception
@ -46,6 +46,13 @@ class ClassInfo(BaseModel):
methods: Dict[str, str] = Field(default_factory=dict)
class ClassRelationship(BaseModel):
src: str = ""
dest: str = ""
relationship: str = ""
label: Optional[str] = None
class RepoParser(BaseModel):
base_directory: Path = Field(default=None)
@ -60,7 +67,8 @@ class RepoParser(BaseModel):
file_info = RepoFileInfo(file=str(file_path.relative_to(self.base_directory)))
for node in tree:
info = RepoParser.node_to_str(node)
file_info.page_info.append(info)
if info:
file_info.page_info.append(info)
if isinstance(node, ast.ClassDef):
class_methods = [m.name for m in node.body if is_func(m)]
file_info.classes.append({"name": node.name, "methods": class_methods})
@ -110,7 +118,9 @@ class RepoParser(BaseModel):
return output_path
@staticmethod
def node_to_str(node) -> (int, int, str, str | Tuple):
def node_to_str(node) -> CodeBlockInfo | None:
if isinstance(node, ast.Try):
return None
if any_to_str(node) == any_to_str(ast.Expr):
return CodeBlockInfo(
lineno=node.lineno,
@ -129,6 +139,7 @@ class RepoParser(BaseModel):
},
any_to_str(ast.If): RepoParser._parse_if,
any_to_str(ast.AsyncFunctionDef): lambda x: x.name,
any_to_str(ast.AnnAssign): lambda x: RepoParser._parse_variable(x.target),
}
func = mappings.get(any_to_str(node))
if func:
@ -143,7 +154,8 @@ class RepoParser(BaseModel):
else:
raise NotImplementedError(f"Not implement:{val}")
return code_block
raise NotImplementedError(f"Not implement code block:{node.lineno}, {node.end_lineno}, {any_to_str(node)}")
logger.warning(f"Unsupported code block:{node.lineno}, {node.end_lineno}, {any_to_str(node)}")
return None
@staticmethod
def _parse_expr(node) -> List:
@ -164,22 +176,51 @@ class RepoParser(BaseModel):
@staticmethod
def _parse_if(n):
tokens = [RepoParser._parse_variable(n.test.left)]
for item in n.test.comparators:
tokens.append(RepoParser._parse_variable(item))
tokens = []
try:
if isinstance(n.test, ast.BoolOp):
tokens = []
for v in n.test.values:
tokens.extend(RepoParser._parse_if_compare(v))
return tokens
if isinstance(n.test, ast.Compare):
v = RepoParser._parse_variable(n.test.left)
if v:
tokens.append(v)
for item in n.test.comparators:
v = RepoParser._parse_variable(item)
if v:
tokens.append(v)
return tokens
except Exception as e:
logger.warning(f"Unsupported if: {n}, err:{e}")
return tokens
@staticmethod
def _parse_if_compare(n):
if hasattr(n, "left"):
return RepoParser._parse_variable(n.left)
else:
return []
@staticmethod
def _parse_variable(node):
funcs = {
any_to_str(ast.Constant): lambda x: x.value,
any_to_str(ast.Name): lambda x: x.id,
any_to_str(ast.Attribute): lambda x: f"{x.value.id}.{x.attr}",
}
func = funcs.get(any_to_str(node))
if not func:
raise NotImplementedError(f"Not implement:{node}")
return func(node)
try:
funcs = {
any_to_str(ast.Constant): lambda x: x.value,
any_to_str(ast.Name): lambda x: x.id,
any_to_str(ast.Attribute): lambda x: f"{x.value.id}.{x.attr}"
if hasattr(x.value, "id")
else f"{x.attr}",
any_to_str(ast.Call): lambda x: RepoParser._parse_variable(x.func),
any_to_str(ast.Tuple): lambda x: "",
}
func = funcs.get(any_to_str(node))
if not func:
raise NotImplementedError(f"Not implement:{node}")
return func(node)
except Exception as e:
logger.warning(f"Unsupported variable:{node}, err:{e}")
@staticmethod
def _parse_assign(node):
@ -197,18 +238,21 @@ class RepoParser(BaseModel):
raise ValueError(f"{result}")
class_view_pathname = path / "classes.dot"
class_views = await self._parse_classes(class_view_pathname)
relationship_views = await self._parse_class_relationships(class_view_pathname)
packages_pathname = path / "packages.dot"
class_views = RepoParser._repair_namespaces(class_views=class_views, path=path)
class_views, relationship_views, package_root = RepoParser._repair_namespaces(
class_views=class_views, relationship_views=relationship_views, path=path
)
class_view_pathname.unlink(missing_ok=True)
packages_pathname.unlink(missing_ok=True)
return class_views
return class_views, relationship_views, package_root
async def _parse_classes(self, class_view_pathname):
class_views = []
if not class_view_pathname.exists():
return class_views
async with aiofiles.open(str(class_view_pathname), mode="r") as reader:
lines = await reader.readlines()
data = await aread(filename=class_view_pathname, encoding="utf-8")
lines = data.split("\n")
for line in lines:
package_name, info = RepoParser._split_class_line(line)
if not package_name:
@ -229,6 +273,19 @@ class RepoParser(BaseModel):
class_views.append(class_info)
return class_views
async def _parse_class_relationships(self, class_view_pathname) -> List[ClassRelationship]:
relationship_views = []
if not class_view_pathname.exists():
return relationship_views
data = await aread(filename=class_view_pathname, encoding="utf-8")
lines = data.split("\n")
for line in lines:
relationship = RepoParser._split_relationship_line(line)
if not relationship:
continue
relationship_views.append(relationship)
return relationship_views
@staticmethod
def _split_class_line(line):
part_splitor = '" ['
@ -247,6 +304,40 @@ class RepoParser(BaseModel):
info = re.sub(r"<br[^>]*>", "\n", info)
return class_name, info
@staticmethod
def _split_relationship_line(line):
splitters = [" -> ", " [", "];"]
idxs = []
for tag in splitters:
if tag not in line:
return None
idxs.append(line.find(tag))
ret = ClassRelationship()
ret.src = line[0 : idxs[0]].strip('"')
ret.dest = line[idxs[0] + len(splitters[0]) : idxs[1]].strip('"')
properties = line[idxs[1] + len(splitters[1]) : idxs[2]].strip(" ")
mappings = {
'arrowhead="empty"': GENERALIZATION,
'arrowhead="diamond"': COMPOSITION,
'arrowhead="odiamond"': AGGREGATION,
}
for k, v in mappings.items():
if k in properties:
ret.relationship = v
if v != GENERALIZATION:
ret.label = RepoParser._get_label(properties)
break
return ret
@staticmethod
def _get_label(line):
tag = 'label="'
if tag not in line:
return ""
ix = line.find(tag)
eix = line.find('"', ix + len(tag))
return line[ix + len(tag) : eix]
@staticmethod
def _create_path_mapping(path: str | Path) -> Dict[str, str]:
mappings = {
@ -271,9 +362,11 @@ class RepoParser(BaseModel):
return mappings
@staticmethod
def _repair_namespaces(class_views: List[ClassInfo], path: str | Path) -> List[ClassInfo]:
def _repair_namespaces(
class_views: List[ClassInfo], relationship_views: List[ClassRelationship], path: str | Path
) -> (List[ClassInfo], List[ClassRelationship], str):
if not class_views:
return []
return [], [], ""
c = class_views[0]
full_key = str(path).lstrip("/").replace("/", ".")
root_namespace = RepoParser._find_root(full_key, c.package)
@ -290,7 +383,12 @@ class RepoParser(BaseModel):
for c in class_views:
c.package = RepoParser._repair_ns(c.package, new_mappings)
return class_views
for i in range(len(relationship_views)):
v = relationship_views[i]
v.src = RepoParser._repair_ns(v.src, new_mappings)
v.dest = RepoParser._repair_ns(v.dest, new_mappings)
relationship_views[i] = v
return class_views, relationship_views, root_path
@staticmethod
def _repair_ns(package, mappings):

View file

@ -36,7 +36,8 @@ class QaEngineer(Role):
profile: str = "QaEngineer"
goal: str = "Write comprehensive and robust tests to ensure codes will work as expected without bugs"
constraints: str = (
"The test code you write should conform to code standard like PEP8, be modular, " "easy to read and maintain"
"The test code you write should conform to code standard like PEP8, be modular, easy to read and maintain."
"Use same language as user requirement"
)
test_round_allowed: int = 5
test_round: int = 0
@ -62,6 +63,8 @@ class QaEngineer(Role):
if not filename or "test" in filename:
continue
code_doc = await src_file_repo.get(filename)
if not code_doc:
continue
test_doc = await tests_file_repo.get("test_" + code_doc.filename)
if not test_doc:
test_doc = Document(

View file

@ -166,6 +166,9 @@ class Role(SerializationMixin, is_polymorphic_base=True):
Role.model_rebuild()
super().__init__(**data)
if self.is_human:
self.llm = HumanProvider()
self.llm.system_prompt = self._get_prefix()
self._watch(data.get("watch") or [UserRequirement])
@ -418,7 +421,7 @@ class Role(SerializationMixin, is_polymorphic_base=True):
Use llm to select actions in _think dynamically
"""
actions_taken = 0
rsp = Message(content="No actions taken yet") # will be overwritten after Role _act
rsp = Message(content="No actions taken yet", cause_by=Action) # will be overwritten after Role _act
while actions_taken < self.rc.max_react_loop:
# think
await self._think()

View file

@ -459,3 +459,63 @@ class CodePlanAndChangeContext(BaseContext):
prd_docs: List[Document]
design_docs: List[Document]
tasks_docs: List[Document]
# mermaid class view
class ClassMeta(BaseModel):
name: str = ""
abstraction: bool = False
static: bool = False
visibility: str = ""
class ClassAttribute(ClassMeta):
value_type: str = ""
default_value: str = ""
def get_mermaid(self, align=1) -> str:
content = "".join(["\t" for i in range(align)]) + self.visibility
if self.value_type:
content += self.value_type + " "
content += self.name
if self.default_value:
content += "="
if self.value_type not in ["str", "string", "String"]:
content += self.default_value
else:
content += '"' + self.default_value.replace('"', "") + '"'
if self.abstraction:
content += "*"
if self.static:
content += "$"
return content
class ClassMethod(ClassMeta):
args: List[ClassAttribute] = Field(default_factory=list)
return_type: str = ""
def get_mermaid(self, align=1) -> str:
content = "".join(["\t" for i in range(align)]) + self.visibility
content += self.name + "(" + ",".join([v.get_mermaid(align=0) for v in self.args]) + ")"
if self.return_type:
content += ":" + self.return_type
if self.abstraction:
content += "*"
if self.static:
content += "$"
return content
class ClassView(ClassMeta):
attributes: List[ClassAttribute] = Field(default_factory=list)
methods: List[ClassMethod] = Field(default_factory=list)
def get_mermaid(self, align=1) -> str:
content = "".join(["\t" for i in range(align)]) + "class " + self.name + "{\n"
for v in self.attributes:
content += v.get_mermaid(align=align + 1) + "\n"
for v in self.methods:
content += v.get_mermaid(align=align + 1) + "\n"
content += "".join(["\t" for i in range(align)]) + "}\n"
return content

View file

@ -83,11 +83,12 @@ class Team(BaseModel):
"""Invest company. raise NoMoneyException when exceed max_budget."""
self.investment = investment
CONFIG.max_budget = investment
CONFIG.cost_manager.max_budget = investment
logger.info(f"Investment: ${investment}.")
@staticmethod
def _check_balance():
if CONFIG.cost_manager.total_cost > CONFIG.cost_manager.max_budget:
if CONFIG.cost_manager.total_cost >= CONFIG.cost_manager.max_budget:
raise NoMoneyException(
CONFIG.cost_manager.total_cost, f"Insufficient funds: {CONFIG.cost_manager.max_budget}"
)

View file

@ -5,6 +5,12 @@
@Author : mashenquan
@File : metagpt_oas3_api_svc.py
@Desc : MetaGPT OpenAPI Specification 3.0 REST API service
curl -X 'POST' \
'http://localhost:8080/openapi/greeting/dave' \
-H 'accept: text/plain' \
-H 'Content-Type: application/json' \
-d '{}'
"""
from pathlib import Path
@ -15,7 +21,7 @@ import connexion
def oas_http_svc():
"""Start the OAS 3.0 OpenAPI HTTP service"""
print("http://localhost:8080/oas3/ui/")
specification_dir = Path(__file__).parent.parent.parent / ".well-known"
specification_dir = Path(__file__).parent.parent.parent / "docs/.well-known"
app = connexion.AsyncApp(__name__, specification_dir=str(specification_dir))
app.add_api("metagpt_oas3_api.yaml")
app.add_api("openapi.yaml")

View file

@ -23,7 +23,7 @@ async def post_greeting(name: str) -> str:
if __name__ == "__main__":
specification_dir = Path(__file__).parent.parent.parent / ".well-known"
specification_dir = Path(__file__).parent.parent.parent / "docs/.well-known"
app = connexion.AsyncApp(__name__, specification_dir=str(specification_dir))
app.add_api("openapi.yaml", arguments={"title": "Hello World Example"})
app.run(port=8082)

View file

@ -407,6 +407,10 @@ def concat_namespace(*args) -> str:
return ":".join(str(value) for value in args)
def split_namespace(ns_class_name: str) -> List[str]:
return ns_class_name.split(":")
def general_after_log(i: "loguru.Logger", sec_format: str = "%0.3f") -> typing.Callable[["RetryCallState"], None]:
"""
Generates a logging function to be used after a call is retried.
@ -546,3 +550,20 @@ async def read_file_block(filename: str | Path, lineno: int, end_lineno: int):
break
lines.append(line)
return "".join(lines)
def list_files(root: str | Path) -> List[Path]:
files = []
try:
directory_path = Path(root)
if not directory_path.exists():
return []
for file_path in directory_path.iterdir():
if file_path.is_file():
files.append(file_path)
else:
subfolder_files = list_files(root=file_path)
files.extend(subfolder_files)
except Exception as e:
logger.error(f"Error: {e}")
return files

View file

@ -12,9 +12,9 @@ import json
from pathlib import Path
from typing import List
import aiofiles
import networkx
from metagpt.utils.common import aread, awrite
from metagpt.utils.graph_repository import SPO, GraphRepository
@ -55,12 +55,10 @@ class DiGraphRepository(GraphRepository):
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
pathname = Path(path) / self.name
async with aiofiles.open(str(pathname.with_suffix(".json")), mode="w", encoding="utf-8") as writer:
await writer.write(data)
await awrite(filename=pathname.with_suffix(".json"), data=data, encoding="utf-8")
async def load(self, pathname: str | Path):
async with aiofiles.open(str(pathname), mode="r", encoding="utf-8") as reader:
data = await reader.read(-1)
data = await aread(filename=pathname, encoding="utf-8")
m = json.loads(data)
self._repo = networkx.node_link_graph(m)

View file

@ -55,6 +55,7 @@ class FileRepository:
"""
pathname = self.workdir / filename
pathname.parent.mkdir(parents=True, exist_ok=True)
content = content if content else "" # avoid `argument must be str, not None` to make it continue
async with aiofiles.open(str(pathname), mode="w") as writer:
await writer.write(content)
logger.info(f"save to: {str(pathname)}")
@ -138,6 +139,8 @@ class FileRepository:
files = self._git_repo.changed_files
relative_files = {}
for p, ct in files.items():
if ct.value == "D": # deleted
continue
try:
rf = Path(p).relative_to(self._relative_path)
except ValueError:

View file

@ -13,19 +13,25 @@ from typing import List
from pydantic import BaseModel
from metagpt.repo_parser import ClassInfo, RepoFileInfo
from metagpt.logs import logger
from metagpt.repo_parser import ClassInfo, ClassRelationship, RepoFileInfo
from metagpt.utils.common import concat_namespace
class GraphKeyword:
IS = "is"
OF = "Of"
ON = "On"
CLASS = "class"
FUNCTION = "function"
HAS_FUNCTION = "has_function"
SOURCE_CODE = "source_code"
NULL = "<null>"
GLOBAL_VARIABLE = "global_variable"
CLASS_FUNCTION = "class_function"
CLASS_PROPERTY = "class_property"
HAS_CLASS_FUNCTION = "has_class_function"
HAS_CLASS_PROPERTY = "has_class_property"
HAS_CLASS = "has_class"
HAS_PAGE_INFO = "has_page_info"
HAS_CLASS_VIEW = "has_class_view"
@ -73,11 +79,13 @@ class GraphRepository(ABC):
await graph_db.insert(subject=file_info.file, predicate=GraphKeyword.IS, object_=file_type)
for c in file_info.classes:
class_name = c.get("name", "")
# file -> class
await graph_db.insert(
subject=file_info.file,
predicate=GraphKeyword.HAS_CLASS,
object_=concat_namespace(file_info.file, class_name),
)
# class detail
await graph_db.insert(
subject=concat_namespace(file_info.file, class_name),
predicate=GraphKeyword.IS,
@ -85,12 +93,22 @@ class GraphRepository(ABC):
)
methods = c.get("methods", [])
for fn in methods:
await graph_db.insert(
subject=concat_namespace(file_info.file, class_name),
predicate=GraphKeyword.HAS_CLASS_FUNCTION,
object_=concat_namespace(file_info.file, class_name, fn),
)
await graph_db.insert(
subject=concat_namespace(file_info.file, class_name, fn),
predicate=GraphKeyword.IS,
object_=GraphKeyword.CLASS_FUNCTION,
)
for f in file_info.functions:
# file -> function
await graph_db.insert(
subject=file_info.file, predicate=GraphKeyword.HAS_FUNCTION, object_=concat_namespace(file_info.file, f)
)
# function detail
await graph_db.insert(
subject=concat_namespace(file_info.file, f), predicate=GraphKeyword.IS, object_=GraphKeyword.FUNCTION
)
@ -105,30 +123,37 @@ class GraphRepository(ABC):
await graph_db.insert(
subject=concat_namespace(file_info.file, *code_block.tokens),
predicate=GraphKeyword.HAS_PAGE_INFO,
object_=code_block.json(ensure_ascii=False),
object_=code_block.model_dump_json(),
)
for k, v in code_block.properties.items():
await graph_db.insert(
subject=concat_namespace(file_info.file, k, v),
predicate=GraphKeyword.HAS_PAGE_INFO,
object_=code_block.json(ensure_ascii=False),
object_=code_block.model_dump_json(),
)
@staticmethod
async def update_graph_db_with_class_views(graph_db: "GraphRepository", class_views: List[ClassInfo]):
for c in class_views:
filename, class_name = c.package.split(":", 1)
filename, _ = c.package.split(":", 1)
await graph_db.insert(subject=filename, predicate=GraphKeyword.IS, object_=GraphKeyword.SOURCE_CODE)
file_types = {".py": "python", ".js": "javascript"}
file_type = file_types.get(Path(filename).suffix, GraphKeyword.NULL)
await graph_db.insert(subject=filename, predicate=GraphKeyword.IS, object_=file_type)
await graph_db.insert(subject=filename, predicate=GraphKeyword.HAS_CLASS, object_=class_name)
await graph_db.insert(subject=filename, predicate=GraphKeyword.HAS_CLASS, object_=c.package)
await graph_db.insert(
subject=c.package,
predicate=GraphKeyword.IS,
object_=GraphKeyword.CLASS,
)
for vn, vt in c.attributes.items():
# class -> property
await graph_db.insert(
subject=c.package,
predicate=GraphKeyword.HAS_CLASS_PROPERTY,
object_=concat_namespace(c.package, vn),
)
# property detail
await graph_db.insert(
subject=concat_namespace(c.package, vn),
predicate=GraphKeyword.IS,
@ -138,6 +163,15 @@ class GraphRepository(ABC):
subject=concat_namespace(c.package, vn), predicate=GraphKeyword.HAS_TYPE_DESC, object_=vt
)
for fn, desc in c.methods.items():
if "</I>" in desc and "<I>" not in desc:
logger.error(desc)
# class -> function
await graph_db.insert(
subject=c.package,
predicate=GraphKeyword.HAS_CLASS_FUNCTION,
object_=concat_namespace(c.package, fn),
)
# function detail
await graph_db.insert(
subject=concat_namespace(c.package, fn),
predicate=GraphKeyword.IS,
@ -148,3 +182,19 @@ class GraphRepository(ABC):
predicate=GraphKeyword.HAS_ARGS_DESC,
object_=desc,
)
@staticmethod
async def update_graph_db_with_class_relationship_views(
graph_db: "GraphRepository", relationship_views: List[ClassRelationship]
):
for r in relationship_views:
await graph_db.insert(
subject=r.src, predicate=GraphKeyword.IS + r.relationship + GraphKeyword.OF, object_=r.dest
)
if not r.label:
continue
await graph_db.insert(
subject=r.src,
predicate=GraphKeyword.IS + r.relationship + GraphKeyword.ON,
object_=concat_namespace(r.dest, r.label),
)

View file

@ -120,6 +120,15 @@ def repair_json_format(output: str) -> str:
elif output.startswith("{") and output.endswith("]"):
output = output[:-1] + "}"
# remove `#` in output json str, usually appeared in `glm-4`
arr = output.split("\n")
new_arr = []
for line in arr:
idx = line.find("#")
if idx >= 0:
line = line[:idx]
new_arr.append(line)
output = "\n".join(new_arr)
return output
@ -168,15 +177,17 @@ def repair_invalid_json(output: str, error: str) -> str:
example 1. json.decoder.JSONDecodeError: Expecting ',' delimiter: line 154 column 1 (char 2765)
example 2. xxx.JSONDecodeError: Expecting property name enclosed in double quotes: line 14 column 1 (char 266)
"""
pattern = r"line ([0-9]+)"
pattern = r"line ([0-9]+) column ([0-9]+)"
matches = re.findall(pattern, error, re.DOTALL)
if len(matches) > 0:
line_no = int(matches[0]) - 1
line_no = int(matches[0][0]) - 1
col_no = int(matches[0][1]) - 1
# due to CustomDecoder can handle `"": ''` or `'': ""`, so convert `"""` -> `"`, `'''` -> `'`
output = output.replace('"""', '"').replace("'''", '"')
arr = output.split("\n")
rline = arr[line_no] # raw line
line = arr[line_no].strip()
# different general problems
if line.endswith("],"):
@ -187,9 +198,12 @@ def repair_invalid_json(output: str, error: str) -> str:
new_line = line.replace("}", "")
elif line.endswith("},") and output.endswith("},"):
new_line = line[:-1]
elif '",' not in line and "," not in line:
elif (rline[col_no] in ["'", '"']) and (line.startswith('"') or line.startswith("'")) and "," not in line:
# problem, `"""` or `'''` without `,`
new_line = f",{line}"
elif '",' not in line and "," not in line and '"' not in line:
new_line = f'{line}",'
elif "," not in line:
elif not line.endswith(","):
# problem, miss char `,` at the end.
new_line = f"{line},"
elif "," in line and len(line) == 1:

View file

@ -27,7 +27,8 @@ TOKEN_COSTS = {
"gpt-4-0613": {"prompt": 0.06, "completion": 0.12},
"gpt-4-1106-preview": {"prompt": 0.01, "completion": 0.03},
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
"chatglm_turbo": {"prompt": 0.0, "completion": 0.00069}, # 32k version, prompt + completion tokens=0.005¥/k-tokens
"glm-3-turbo": {"prompt": 0.0, "completion": 0.0007}, # 128k version, prompt + completion tokens=0.005¥/k-tokens
"glm-4": {"prompt": 0.0, "completion": 0.014}, # 128k version, prompt + completion tokens=0.1¥/k-tokens
"gemini-pro": {"prompt": 0.00025, "completion": 0.0005},
}

View file

@ -22,7 +22,7 @@ pandas==2.0.3
pydantic==2.5.3
#pygame==2.1.3
#pymilvus==2.2.8
pytest==7.2.2
# pytest==7.2.2 # test extras require
python_docx==0.8.11
PyYAML==6.0.1
# sentence_transformers==2.2.2
@ -38,7 +38,7 @@ typing-inspect==0.8.0
typing_extensions==4.9.0
libcst==1.0.1
qdrant-client==1.7.0
pytest-mock==3.11.1
# pytest-mock==3.11.1 # test extras require
# open-interpreter==0.1.7; python_version>"3.9" # Conflict with openai 1.x
ta==0.10.2
semantic-kernel==0.4.3.dev0
@ -50,12 +50,12 @@ aioredis~=2.0.1 # Used by metagpt/utils/redis.py
websocket-client==1.6.2
aiofiles==23.2.1
gitpython==3.1.40
zhipuai==1.0.7
zhipuai==2.0.1
socksio~=1.0.0
gitignore-parser==0.1.9
# connexion[uvicorn]~=3.0.5 # Used by metagpt/tools/openapi_v3_hello.py
websockets~=12.0
networkx~=3.2.1
google-generativeai==0.3.2
playwright==1.40.0
# playwright==1.40.0 # playwright extras require
anytree

View file

@ -46,6 +46,8 @@ extras_require["test"] = [
"chromadb==0.4.14",
"gradio==3.0.0",
"grpcio-status==1.48.2",
"mock==5.1.0",
"pylint==3.0.3",
]
extras_require["pyppeteer"] = [
@ -56,7 +58,7 @@ extras_require["dev"] = (["pylint~=3.0.3", "black~=23.3.0", "isort~=5.12.0", "pr
setup(
name="metagpt",
version="0.5.2",
version="0.6.0",
description="The Multi-Agent Framework",
long_description=long_description,
long_description_content_type="text/markdown",

View file

@ -11,7 +11,7 @@ import json
import logging
import os
import re
from typing import Optional
import uuid
import pytest
@ -19,49 +19,13 @@ from metagpt.config import CONFIG, Config
from metagpt.const import DEFAULT_WORKSPACE_ROOT, TEST_DATA_PATH
from metagpt.llm import LLM
from metagpt.logs import logger
from metagpt.provider.openai_api import OpenAILLM
from metagpt.utils.git_repository import GitRepository
from tests.mock.mock_llm import MockLLM
class MockLLM(OpenAILLM):
rsp_cache: dict = {}
async def original_aask(
self,
msg: str,
system_msgs: Optional[list[str]] = None,
format_msgs: Optional[list[dict[str, str]]] = None,
timeout=3,
stream=True,
):
"""A copy of metagpt.provider.base_llm.BaseLLM.aask, we can't use super().aask because it will be mocked"""
if system_msgs:
message = self._system_msgs(system_msgs)
else:
message = [self._default_system_msg()] if self.use_system_prompt else []
if format_msgs:
message.extend(format_msgs)
message.append(self._user_msg(msg))
rsp = await self.acompletion_text(message, stream=stream, timeout=timeout)
return rsp
async def aask(
self,
msg: str,
system_msgs: Optional[list[str]] = None,
format_msgs: Optional[list[dict[str, str]]] = None,
timeout=3,
stream=True,
) -> str:
if msg not in self.rsp_cache:
# Call the original unmocked method
rsp = await self.original_aask(msg, system_msgs, format_msgs, timeout, stream)
logger.info(f"Added '{rsp[:20]}' ... to response cache")
self.rsp_cache[msg] = rsp
return rsp
else:
logger.info("Use response cache")
return self.rsp_cache[msg]
RSP_CACHE_NEW = {} # used globally for producing new and useful only response cache
ALLOW_OPENAI_API_CALL = int(
os.environ.get("ALLOW_OPENAI_API_CALL", 1)
) # NOTE: should change to default 0 (False) once mock is complete
@pytest.fixture(scope="session")
@ -75,16 +39,37 @@ def rsp_cache():
else:
rsp_cache_json = {}
yield rsp_cache_json
with open(new_rsp_cache_file_path, "w") as f2:
with open(rsp_cache_file_path, "w") as f2:
json.dump(rsp_cache_json, f2, indent=4, ensure_ascii=False)
with open(new_rsp_cache_file_path, "w") as f2:
json.dump(RSP_CACHE_NEW, f2, indent=4, ensure_ascii=False)
@pytest.fixture(scope="function")
def llm_mock(rsp_cache, mocker):
llm = MockLLM()
# Hook to capture the test result
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
if rep.when == "call":
item.test_outcome = rep
@pytest.fixture(scope="function", autouse=True)
def llm_mock(rsp_cache, mocker, request):
llm = MockLLM(allow_open_api_call=ALLOW_OPENAI_API_CALL)
llm.rsp_cache = rsp_cache
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", llm.aask)
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask_batch", llm.aask_batch)
yield mocker
if hasattr(request.node, "test_outcome") and request.node.test_outcome.passed:
if llm.rsp_candidates:
for rsp_candidate in llm.rsp_candidates:
cand_key = list(rsp_candidate.keys())[0]
cand_value = list(rsp_candidate.values())[0]
if cand_key not in llm.rsp_cache:
logger.info(f"Added '{cand_key[:100]} ... -> {cand_value[:20]} ...' to response cache")
llm.rsp_cache.update(rsp_candidate)
RSP_CACHE_NEW.update(rsp_candidate)
class Context:
@ -111,7 +96,7 @@ def llm_api():
logger.info("Tearing down the test")
@pytest.fixture(scope="session")
@pytest.fixture
def proxy():
pattern = re.compile(
rb"(?P<method>[a-zA-Z]+) (?P<uri>(\w+://)?(?P<host>[^\s\'\"<>\[\]{}|/:]+)(:(?P<port>\d+))?[^\s\'\"<>\[\]{}|]*) "
@ -135,8 +120,11 @@ def proxy():
remote_writer.write(data)
await asyncio.gather(pipe(reader, remote_writer), pipe(remote_reader, writer))
server = asyncio.get_event_loop().run_until_complete(asyncio.start_server(handle_client, "127.0.0.1", 0))
return "http://{}:{}".format(*server.sockets[0].getsockname())
async def proxy_func():
server = await asyncio.start_server(handle_client, "127.0.0.1", 0)
return server, "http://{}:{}".format(*server.sockets[0].getsockname())
return proxy_func()
# see https://github.com/Delgan/loguru/issues/59#issuecomment-466591978
@ -151,9 +139,9 @@ def loguru_caplog(caplog):
# init & dispose git repo
@pytest.fixture(scope="session", autouse=True)
@pytest.fixture(scope="function", autouse=True)
def setup_and_teardown_git_repo(request):
CONFIG.git_repo = GitRepository(local_path=DEFAULT_WORKSPACE_ROOT / "unittest")
CONFIG.git_repo = GitRepository(local_path=DEFAULT_WORKSPACE_ROOT / f"unittest/{uuid.uuid4().hex}")
CONFIG.git_reinit = True
# Destroy git repo at the end of the test session.
@ -167,3 +155,48 @@ def setup_and_teardown_git_repo(request):
@pytest.fixture(scope="session", autouse=True)
def init_config():
Config()
@pytest.fixture(scope="function")
def new_filename(mocker):
# NOTE: Mock new filename to make reproducible llm aask, should consider changing after implementing requirement segmentation
mocker.patch("metagpt.utils.file_repository.FileRepository.new_filename", lambda: "20240101")
yield mocker
@pytest.fixture
def aiohttp_mocker(mocker):
class MockAioResponse:
async def json(self, *args, **kwargs):
return self._json
def set_json(self, json):
self._json = json
response = MockAioResponse()
class MockCTXMng:
async def __aenter__(self):
return response
async def __aexit__(self, *args, **kwargs):
pass
def __await__(self):
yield
return response
def mock_request(self, method, url, **kwargs):
return MockCTXMng()
def wrap(method):
def run(self, url, **kwargs):
return mock_request(self, method, url, **kwargs)
return run
mocker.patch("aiohttp.ClientSession.request", mock_request)
for i in ["get", "post", "delete", "patch"]:
mocker.patch(f"aiohttp.ClientSession.{i}", wrap(i))
yield response

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,258 @@
{
"search_metadata": {
"id": "65952b400ead410fae1f548f",
"status": "Success",
"json_endpoint": "https://serpapi.com/searches/f3454e001dacdae1/65952b400ead410fae1f548f.json",
"created_at": "2024-01-03 09:39:12 UTC",
"processed_at": "2024-01-03 09:39:12 UTC",
"google_url": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&sourceid=chrome&ie=UTF-8",
"raw_html_file": "https://serpapi.com/searches/f3454e001dacdae1/65952b400ead410fae1f548f.html",
"total_time_taken": 1.78
},
"search_parameters": {
"engine": "google",
"q": "metagpt",
"google_domain": "google.com",
"hl": "en",
"gl": "us",
"num": "8",
"device": "desktop"
},
"search_information": {
"query_displayed": "metagpt",
"total_results": 110000,
"time_taken_displayed": 0.3,
"menu_items": [
{
"position": 1,
"title": "News",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=metagpt&tbm=nws&source=lnms&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ0pQJegQIDRAB",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&tbm=nws"
},
{
"position": 2,
"title": "Images",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=metagpt&tbm=isch&source=lnms&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ0pQJegQIDBAB",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google_images&gl=us&google_domain=google.com&hl=en&q=metagpt"
},
{
"position": 3,
"title": "Perspectives",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=metagpt&uds=AMIYvT-LYN0C-KgfpAf4hDGmHUqYzPt2YD2Sjup6GzZxffnKpRHzrkDtH-YMw_l16Rw3319fYKZIWOgxIizOkCn4WaiWmK--Gd_KWgcdk2AGw9K3og-5w2Q&udm=4&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQs6gLegQICxAB",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt"
},
{
"position": 4,
"title": "Download",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+download&uds=AMIYvT-5zq-IxPfUvCGLrNgPl7Seu8ODWYIoXhisgEvQZV3Y8pl5TzJLGfCHEIw7og1p8xJsV4GDoO9mlugZYdQpedp8elSjLy5ABJfq6NUCY0MAtXsFqu8&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQxKsJegQIChAB&ictx=0",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+download"
}
],
"organic_results_state": "Results for exact spelling"
},
"inline_videos": [
{
"position": 1,
"title": "How To Install MetaGPT - Build A Startup With One Prompt!!",
"link": "https://www.youtube.com/watch?v=uT75J_KG_aY",
"thumbnail": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/a0db2f9f70f02dd11e3d3d4154df9fd65b46b2fbf4804f7038c9ce99c8efea1c.jpeg",
"channel": "Matthew Berman",
"duration": "6:36",
"platform": "YouTube",
"date": "Aug 14, 2023"
},
{
"position": 2,
"title": "MetaGPT HUGE Update: Autonomous AI Agents with ...",
"link": "https://www.youtube.com/watch?v=Xyws6iI-eH8",
"thumbnail": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/a0db2f9f70f02dd1d578e6031265d66299cf6aecd327454cdf67b92808f3dd86.jpeg",
"channel": "WorldofAI",
"duration": "11:38",
"platform": "YouTube",
"date": "1 week ago"
},
{
"position": 3,
"title": "\ud83d\ude80 MetaGPT Setup: Launch a Startup with One \u270d\ufe0f Prompt!",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao",
"thumbnail": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/a0db2f9f70f02dd1c5666bd22292fdc357357dac89294aabb55ebea0a40ce322.jpeg",
"channel": "Prompt Engineering",
"duration": "14:15",
"platform": "YouTube",
"date": "Sep 4, 2023",
"key_moments": [
{
"time": "00:00",
"title": "Intro",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=0",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQW-YKGXQDHplRpEDgL5Q-HlJ8HggTw_ghp_KWPh8xUcQ&s"
},
{
"time": "00:12",
"title": "What is MetaGPT",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=12",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRJ4RRAXOG6yvGPYqkuj5cMoiyYdAN6g7E3VU04SA3P7w&s"
},
{
"time": "01:06",
"title": "Setup",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=66",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTDlJBrAtfBkC8zI9wY4dOqVIaNFbjcYSZr4M1ZnD7RSw&s"
},
{
"time": "05:23",
"title": "Changing configuration",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=323",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT8MbsIRVXJy__UE4ba0FoCTMGfrykasHm3UGvSzMQAtQ&s"
}
]
}
],
"organic_results": [
{
"position": 1,
"title": "geekan/MetaGPT: \ud83c\udf1f The Multi-Agent Framework",
"link": "https://github.com/geekan/MetaGPT",
"redirect_link": "https://www.google.comhttps://github.com/geekan/MetaGPT",
"displayed_link": "https://github.com \u203a geekan \u203a MetaGPT",
"favicon": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/f37f87ccfb08b6fc2fe7e2076c022e7690f9b18357b8e5feb75a30ffbaaabfb1.png",
"snippet": "MetaGPT takes a one line requirement as input and outputs user stories / competitive analysis / requirements / data structures / APIs / documents, etc.",
"snippet_highlighted_words": [
"MetaGPT"
],
"sitelinks": {
"inline": [
{
"title": "Roadmap",
"link": "https://github.com/geekan/MetaGPT/blob/main/docs/ROADMAP.md"
},
{
"title": "README.md",
"link": "https://github.com/geekan/MetaGPT/blob/main/README.md"
},
{
"title": "Issues",
"link": "https://github.com/geekan/MetaGPT/issues"
},
{
"title": "Actions",
"link": "https://github.com/geekan/MetaGPT/actions"
}
]
},
"source": "GitHub"
},
{
"position": 2,
"title": "MetaGPT: Meta Programming for A Multi-Agent ...",
"link": "https://arxiv.org/abs/2308.00352",
"redirect_link": "https://www.google.comhttps://arxiv.org/abs/2308.00352",
"displayed_link": "https://arxiv.org \u203a cs",
"favicon": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/f37f87ccfb08b6fc2fe7e2076c022e76592372342f3f5dd76573e051b50f1bce.png",
"author": "by S Hong",
"cited_by": "Cited by 53",
"extracted_cited_by": 53,
"date": "2023",
"snippet": "Abstract:Remarkable progress has been made on automated problem solving through societies of agents based on large language models (LLMs).",
"source": "arXiv"
},
{
"position": 3,
"title": "MetaGPT: a Multi-Agent Framework to Automate Your ...",
"link": "https://medium.datadriveninvestor.com/metagpt-a-multi-agent-framework-to-automate-your-software-company-4b6ae747cc36",
"redirect_link": "https://www.google.comhttps://medium.datadriveninvestor.com/metagpt-a-multi-agent-framework-to-automate-your-software-company-4b6ae747cc36",
"displayed_link": "https://medium.datadriveninvestor.com \u203a metagpt-a-...",
"favicon": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/f37f87ccfb08b6fc2fe7e2076c022e76e8319069677ee18a99026fb1e05709cf.png",
"snippet": "MetaGPT is about to reach 10000 stars on Github. It's a Multi-Agent Framework that can behave as an engineer, product manager, architect, project managers.",
"snippet_highlighted_words": [
"MetaGPT"
],
"source": "DataDrivenInvestor"
},
{
"position": 4,
"title": "MetaGPT: Complete Guide to the Best AI Agent Available ...",
"link": "https://www.unite.ai/metagpt-complete-guide-to-the-best-ai-agent-available-right-now/",
"redirect_link": "https://www.google.comhttps://www.unite.ai/metagpt-complete-guide-to-the-best-ai-agent-available-right-now/",
"displayed_link": "https://www.unite.ai \u203a metagpt-complete-guide-to-the-...",
"favicon": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/f37f87ccfb08b6fc2fe7e2076c022e76334a7b2eeab09f16973a82a209ee6339.png",
"date": "Sep 11, 2023",
"snippet": "Discover why MetaGPT outperforms AutoGPT, BabyAgi, and other AI agents in complex coding tasks. Our in-depth article guides you through the ...",
"snippet_highlighted_words": [
"MetaGPT"
],
"source": "Unite.AI"
}
],
"related_searches": [
{
"block_position": 1,
"query": "metagpt online",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+online&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAglEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+online"
},
{
"block_position": 1,
"query": "metagpt paper",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+paper&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgoEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+paper"
},
{
"block_position": 1,
"query": "Metagpt review",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=Metagpt+review&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgrEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=Metagpt+review"
},
{
"block_position": 1,
"query": "Metagpt download",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=Metagpt+download&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgpEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=Metagpt+download"
},
{
"block_position": 1,
"query": "metagpt ai",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+AI&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgeEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+AI"
},
{
"block_position": 1,
"query": "metagpt github",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=Metagpt+github&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgfEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=Metagpt+github"
},
{
"block_position": 1,
"query": "metagpt reddit",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+Reddit&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgnEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+Reddit"
},
{
"block_position": 1,
"query": "how to use metagpt",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=How+to+use+MetaGPT&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgqEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=How+to+use+MetaGPT"
}
],
"pagination": {
"current": 1,
"next": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&start=8&sourceid=chrome&ie=UTF-8",
"other_pages": {
"2": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&start=8&sourceid=chrome&ie=UTF-8",
"3": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&start=16&sourceid=chrome&ie=UTF-8",
"4": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&start=24&sourceid=chrome&ie=UTF-8",
"5": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&start=32&sourceid=chrome&ie=UTF-8"
}
},
"serpapi_pagination": {
"current": 1,
"next_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=8",
"next": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=8",
"other_pages": {
"2": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=8",
"3": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=16",
"4": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=24",
"5": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=32"
}
}
}

View file

@ -0,0 +1,350 @@
{
"search_metadata": {
"id": "65952b400ead410fae1f548f",
"status": "Success",
"json_endpoint": "https://serpapi.com/searches/f3454e001dacdae1/65952b400ead410fae1f548f.json",
"created_at": "2024-01-03 09:39:12 UTC",
"processed_at": "2024-01-03 09:39:12 UTC",
"google_url": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&sourceid=chrome&ie=UTF-8",
"raw_html_file": "https://serpapi.com/searches/f3454e001dacdae1/65952b400ead410fae1f548f.html",
"total_time_taken": 1.78
},
"search_parameters": {
"engine": "google",
"q": "metagpt",
"google_domain": "google.com",
"hl": "en",
"gl": "us",
"num": "8",
"device": "desktop"
},
"search_information": {
"query_displayed": "metagpt",
"total_results": 110000,
"time_taken_displayed": 0.3,
"menu_items": [
{
"position": 1,
"title": "News",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=metagpt&tbm=nws&source=lnms&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ0pQJegQIDRAB",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&tbm=nws"
},
{
"position": 2,
"title": "Images",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=metagpt&tbm=isch&source=lnms&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ0pQJegQIDBAB",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google_images&gl=us&google_domain=google.com&hl=en&q=metagpt"
},
{
"position": 3,
"title": "Perspectives",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=metagpt&uds=AMIYvT-LYN0C-KgfpAf4hDGmHUqYzPt2YD2Sjup6GzZxffnKpRHzrkDtH-YMw_l16Rw3319fYKZIWOgxIizOkCn4WaiWmK--Gd_KWgcdk2AGw9K3og-5w2Q&udm=4&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQs6gLegQICxAB",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt"
},
{
"position": 4,
"title": "Download",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+download&uds=AMIYvT-5zq-IxPfUvCGLrNgPl7Seu8ODWYIoXhisgEvQZV3Y8pl5TzJLGfCHEIw7og1p8xJsV4GDoO9mlugZYdQpedp8elSjLy5ABJfq6NUCY0MAtXsFqu8&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQxKsJegQIChAB&ictx=0",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+download"
},
{
"position": 5,
"title": "Videos",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=metagpt&tbm=vid&source=lnms&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ0pQJegQINxAB",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google_videos&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt"
},
{
"position": 6,
"title": "Shopping",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=metagpt&tbm=shop&source=lnms",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google_shopping&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt"
},
{
"position": 7,
"title": "Review",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+review&uds=AMIYvT9VP83904q4-J94lPXwCEnwL3j5QAtL1fmmW1S1R5RgwRLmxvuFVQ7OcN0dFbrjXQkUwlZlHOt9GNXyfomxI6gDvZxA6gokeHbKUq_anMgIkmFv3IY&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQxKsJegQIOhAB&ictx=0",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+review"
},
{
"position": 8,
"title": "Online",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+online&uds=AMIYvT8Ap1YYLsvgKVUJMi_v4l0FNZz9UYjvpQyVx07CgVk-hay-mNemgcUIz5ipc8mmv44wplpB3umGIvKSQMEgsHCY8aTWe6FLDtUjGT9hv-pihBT6dYw&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQxKsJegQIOxAB&ictx=0",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+online"
},
{
"position": 9,
"title": "App",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=Metagpt+app&uds=AMIYvT_YL6Iqd-0G_f_v9e2v-JybHFZesGv-WkSjqZQUhGvjb7qTf3NoIkE_8qY5quBbzv_GSlurBfqWahyxbnyVMX5mlfpqn-U3E-KHZ3PAJcM8mO6MflU&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQxKsJegQIORAB&ictx=0",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=Metagpt+app"
}
],
"organic_results_state": "Results for exact spelling"
},
"inline_videos": [
{
"position": 1,
"title": "How To Install MetaGPT - Build A Startup With One Prompt!!",
"link": "https://www.youtube.com/watch?v=uT75J_KG_aY",
"thumbnail": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/a0db2f9f70f02dd11e3d3d4154df9fd65b46b2fbf4804f7038c9ce99c8efea1c.jpeg",
"channel": "Matthew Berman",
"duration": "6:36",
"platform": "YouTube",
"date": "Aug 14, 2023"
},
{
"position": 2,
"title": "MetaGPT HUGE Update: Autonomous AI Agents with ...",
"link": "https://www.youtube.com/watch?v=Xyws6iI-eH8",
"thumbnail": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/a0db2f9f70f02dd1d578e6031265d66299cf6aecd327454cdf67b92808f3dd86.jpeg",
"channel": "WorldofAI",
"duration": "11:38",
"platform": "YouTube",
"date": "1 week ago"
},
{
"position": 3,
"title": "\ud83d\ude80 MetaGPT Setup: Launch a Startup with One \u270d\ufe0f Prompt!",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao",
"thumbnail": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/a0db2f9f70f02dd1c5666bd22292fdc357357dac89294aabb55ebea0a40ce322.jpeg",
"channel": "Prompt Engineering",
"duration": "14:15",
"platform": "YouTube",
"date": "Sep 4, 2023",
"key_moments": [
{
"time": "00:00",
"title": "Intro",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=0",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQW-YKGXQDHplRpEDgL5Q-HlJ8HggTw_ghp_KWPh8xUcQ&s"
},
{
"time": "00:12",
"title": "What is MetaGPT",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=12",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRJ4RRAXOG6yvGPYqkuj5cMoiyYdAN6g7E3VU04SA3P7w&s"
},
{
"time": "01:06",
"title": "Setup",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=66",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTDlJBrAtfBkC8zI9wY4dOqVIaNFbjcYSZr4M1ZnD7RSw&s"
},
{
"time": "05:23",
"title": "Changing configuration",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=323",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT8MbsIRVXJy__UE4ba0FoCTMGfrykasHm3UGvSzMQAtQ&s"
},
{
"time": "06:35",
"title": "How to Run",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=395",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRuX6mOUVQVRzvnkOPYNcDpcazRC1QGeHhZh-Az9btUNA&s"
},
{
"time": "09:02",
"title": "What outputs to expect",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=542",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTFnNqvPfGrPnKJTJ1iOHGSNp6sVR5jn0Zy5N2JSGfeEQ&s"
},
{
"time": "10:45",
"title": "Generated Design Documents",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=645",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSN3I0gxudI4Mew93w_tw34HmWREz5XX8ArebReM3Y2_g&s"
},
{
"time": "12:25",
"title": "Run the created code base",
"link": "https://www.youtube.com/watch?v=nqZlTV_L6Ao&t=745",
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQLBx5bgKZ2Gqsu-PsIXuvtM0SBmHvBCndmKtresgqFCg&s"
}
]
}
],
"organic_results": [
{
"position": 1,
"title": "geekan/MetaGPT: \ud83c\udf1f The Multi-Agent Framework",
"link": "https://github.com/geekan/MetaGPT",
"redirect_link": "https://www.google.comhttps://github.com/geekan/MetaGPT",
"displayed_link": "https://github.com \u203a geekan \u203a MetaGPT",
"favicon": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/f37f87ccfb08b6fc2fe7e2076c022e7690f9b18357b8e5feb75a30ffbaaabfb1.png",
"snippet": "MetaGPT takes a one line requirement as input and outputs user stories / competitive analysis / requirements / data structures / APIs / documents, etc.",
"snippet_highlighted_words": [
"MetaGPT"
],
"sitelinks": {
"inline": [
{
"title": "Roadmap",
"link": "https://github.com/geekan/MetaGPT/blob/main/docs/ROADMAP.md"
},
{
"title": "README.md",
"link": "https://github.com/geekan/MetaGPT/blob/main/README.md"
},
{
"title": "Issues",
"link": "https://github.com/geekan/MetaGPT/issues"
},
{
"title": "Actions",
"link": "https://github.com/geekan/MetaGPT/actions"
}
]
},
"source": "GitHub"
},
{
"position": 2,
"title": "MetaGPT: Meta Programming for A Multi-Agent ...",
"link": "https://arxiv.org/abs/2308.00352",
"redirect_link": "https://www.google.comhttps://arxiv.org/abs/2308.00352",
"displayed_link": "https://arxiv.org \u203a cs",
"favicon": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/f37f87ccfb08b6fc2fe7e2076c022e76592372342f3f5dd76573e051b50f1bce.png",
"author": "by S Hong",
"cited_by": "Cited by 53",
"extracted_cited_by": 53,
"date": "2023",
"snippet": "Abstract:Remarkable progress has been made on automated problem solving through societies of agents based on large language models (LLMs).",
"source": "arXiv"
},
{
"position": 3,
"title": "MetaGPT: a Multi-Agent Framework to Automate Your ...",
"link": "https://medium.datadriveninvestor.com/metagpt-a-multi-agent-framework-to-automate-your-software-company-4b6ae747cc36",
"redirect_link": "https://www.google.comhttps://medium.datadriveninvestor.com/metagpt-a-multi-agent-framework-to-automate-your-software-company-4b6ae747cc36",
"displayed_link": "https://medium.datadriveninvestor.com \u203a metagpt-a-...",
"favicon": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/f37f87ccfb08b6fc2fe7e2076c022e76e8319069677ee18a99026fb1e05709cf.png",
"snippet": "MetaGPT is about to reach 10000 stars on Github. It's a Multi-Agent Framework that can behave as an engineer, product manager, architect, project managers.",
"snippet_highlighted_words": [
"MetaGPT"
],
"source": "DataDrivenInvestor"
},
{
"position": 4,
"title": "MetaGPT: Complete Guide to the Best AI Agent Available ...",
"link": "https://www.unite.ai/metagpt-complete-guide-to-the-best-ai-agent-available-right-now/",
"redirect_link": "https://www.google.comhttps://www.unite.ai/metagpt-complete-guide-to-the-best-ai-agent-available-right-now/",
"displayed_link": "https://www.unite.ai \u203a metagpt-complete-guide-to-the-...",
"favicon": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/f37f87ccfb08b6fc2fe7e2076c022e76334a7b2eeab09f16973a82a209ee6339.png",
"date": "Sep 11, 2023",
"snippet": "Discover why MetaGPT outperforms AutoGPT, BabyAgi, and other AI agents in complex coding tasks. Our in-depth article guides you through the ...",
"snippet_highlighted_words": [
"MetaGPT"
],
"source": "Unite.AI"
},
{
"position": 5,
"title": "MetaGPT AI technology page - Lablab.ai",
"link": "https://lablab.ai/tech/metagpt",
"redirect_link": "https://www.google.comhttps://lablab.ai/tech/metagpt",
"displayed_link": "https://lablab.ai \u203a tech \u203a metagpt",
"favicon": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/f37f87ccfb08b6fc2fe7e2076c022e766a141f2bf05b1ab902f83ed00f4148a4.png",
"snippet": "MetaGPT: Collaborative AI for Complex Tasks. MetaGPT is a groundbreaking AI technology, designed to transform the landscape of software development.",
"snippet_highlighted_words": [
"MetaGPT",
"MetaGPT"
],
"source": "lablab.ai"
},
{
"position": 6,
"title": "MetaGPT | Discover AI use cases",
"link": "https://gpt3demo.com/apps/metagpt",
"redirect_link": "https://www.google.comhttps://gpt3demo.com/apps/metagpt",
"displayed_link": "https://gpt3demo.com \u203a apps \u203a metagpt",
"favicon": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/f37f87ccfb08b6fc2fe7e2076c022e76142721493557b5d95328dafb62b6b43a.jpeg",
"snippet": "Assign different roles to GPTs to form a collaborative software entity for complex tasks. MetaGPT takes a one-line requirement as input and outputs user ...",
"snippet_highlighted_words": [
"MetaGPT"
],
"source": "GPT-3 Demo"
},
{
"position": 7,
"title": "Meet MetaGPT: The ChatGPT-Powered AI Assistant That ...",
"link": "https://www.kdnuggets.com/meet-metagpt-the-chatgptpowered-ai-assistant-that-turns-text-into-web-apps",
"redirect_link": "https://www.google.comhttps://www.kdnuggets.com/meet-metagpt-the-chatgptpowered-ai-assistant-that-turns-text-into-web-apps",
"displayed_link": "https://www.kdnuggets.com \u203a meet-metagpt-the-chatg...",
"favicon": "https://serpapi.com/searches/65952b400ead410fae1f548f/images/f37f87ccfb08b6fc2fe7e2076c022e767b0d4a705b7ad21b521b16648b390fe8.png",
"date": "Sep 8, 2023",
"snippet": "This revolutionary AI tool lets you create no-code web applications in just seconds!",
"source": "KDnuggets"
}
],
"related_searches": [
{
"block_position": 1,
"query": "metagpt online",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+online&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAglEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+online"
},
{
"block_position": 1,
"query": "metagpt paper",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+paper&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgoEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+paper"
},
{
"block_position": 1,
"query": "Metagpt review",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=Metagpt+review&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgrEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=Metagpt+review"
},
{
"block_position": 1,
"query": "Metagpt download",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=Metagpt+download&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgpEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=Metagpt+download"
},
{
"block_position": 1,
"query": "metagpt ai",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+AI&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgeEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+AI"
},
{
"block_position": 1,
"query": "metagpt github",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=Metagpt+github&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgfEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=Metagpt+github"
},
{
"block_position": 1,
"query": "metagpt reddit",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=MetaGPT+Reddit&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgnEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=MetaGPT+Reddit"
},
{
"block_position": 1,
"query": "how to use metagpt",
"link": "https://www.google.com/search?num=8&sca_esv=4bcb71572bca9257&sca_upv=1&hl=en&gl=us&q=How+to+use+MetaGPT&sa=X&ved=2ahUKEwjh-qqa9sCDAxV4fTABHZ8gClUQ1QJ6BAgqEAE",
"serpapi_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=How+to+use+MetaGPT"
}
],
"pagination": {
"current": 1,
"next": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&start=8&sourceid=chrome&ie=UTF-8",
"other_pages": {
"2": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&start=8&sourceid=chrome&ie=UTF-8",
"3": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&start=16&sourceid=chrome&ie=UTF-8",
"4": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&start=24&sourceid=chrome&ie=UTF-8",
"5": "https://www.google.com/search?q=metagpt&oq=metagpt&hl=en&gl=us&num=8&start=32&sourceid=chrome&ie=UTF-8"
}
},
"serpapi_pagination": {
"current": 1,
"next_link": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=8",
"next": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=8",
"other_pages": {
"2": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=8",
"3": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=16",
"4": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=24",
"5": "https://serpapi.com/search.json?device=desktop&engine=google&gl=us&google_domain=google.com&hl=en&num=8&q=metagpt&start=32"
}
}
}

View file

@ -0,0 +1,102 @@
[
{
"searchParameters": {
"q": "metagpt",
"num": 8,
"page": 1,
"type": "search",
"engine": "google"
},
"organic": [
{
"title": "geekan/MetaGPT: The Multi-Agent Framework: Given one line Requirement, return PRD, Design, Tasks, Repo - GitHub",
"link": "https://github.com/geekan/MetaGPT",
"snippet": "MetaGPT takes a one line requirement as input and outputs user stories / competitive analysis / requirements / data structures / APIs / documents, etc.",
"sitelinks": [
{
"title": "README.md",
"link": "https://github.com/geekan/MetaGPT/blob/main/README.md"
},
{
"title": "Roadmap",
"link": "https://github.com/geekan/MetaGPT/blob/main/docs/ROADMAP.md"
},
{
"title": "Issues",
"link": "https://github.com/geekan/MetaGPT/issues"
},
{
"title": "Actions",
"link": "https://github.com/geekan/MetaGPT/actions"
}
],
"position": 1
},
{
"title": "MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework - arXiv",
"link": "https://arxiv.org/abs/2308.00352",
"snippet": "Abstract:Remarkable progress has been made on automated problem solving through societies of agents based on large language models (LLMs).",
"date": "Aug 1, 2023",
"position": 2
},
{
"title": "How To Install MetaGPT - Build A Startup With One Prompt!! - YouTube",
"link": "https://youtube.com/watch?v=uT75J_KG_aY",
"snippet": "In this video, we review MetaGPT, a new project that aims ...",
"date": "Aug 14, 2023",
"attributes": {
"Duration": "6:36",
"Posted": "Aug 14, 2023"
},
"imageUrl": "https://i.ytimg.com/vi/uT75J_KG_aY/default.jpg?sqp=-oaymwEECHgQQw&rs=AMzJL3lfWRsXgckPQztWhHaRKYqxffksoA",
"position": 3
},
{
"title": "Meet MetaGPT: The ChatGPT-Powered AI Assistant That Turns Text Into Web Apps",
"link": "https://www.kdnuggets.com/meet-metagpt-the-chatgptpowered-ai-assistant-that-turns-text-into-web-apps",
"snippet": "This revolutionary AI tool lets you create no-code web applications in just seconds!",
"date": "Sep 8, 2023",
"position": 4
},
{
"title": "MetaGPT: Complete Guide to the Best AI Agent Available Right Now - Unite.AI",
"link": "https://www.unite.ai/metagpt-complete-guide-to-the-best-ai-agent-available-right-now/",
"snippet": "Discover why MetaGPT outperforms AutoGPT, BabyAgi, and other AI agents in complex coding tasks. Our in-depth article guides you through the ...",
"date": "Sep 11, 2023",
"position": 5
},
{
"title": "MetaGPT | Discover AI use cases - GPT-3 Demo",
"link": "https://gpt3demo.com/apps/metagpt",
"snippet": "Assign different roles to GPTs to form a collaborative software entity for complex tasks. MetaGPT takes a one-line requirement as input and outputs user ...",
"position": 6
}
],
"relatedSearches": [
{
"query": "How to use MetaGPT"
},
{
"query": "MetaGPT Reddit"
},
{
"query": "Metagpt arXiv"
},
{
"query": "MetaGPT youtube"
},
{
"query": "MetaGPT example"
},
{
"query": "Metagpt huggingface"
},
{
"query": "metagpt: meta programming for multi-agent collaborative framework"
},
{
"query": "MetaGPT alternative"
}
]
}
]

View file

@ -0,0 +1,115 @@
[
{
"searchParameters": {
"q": "metagpt",
"num": 8,
"page": 1,
"type": "search",
"engine": "google"
},
"organic": [
{
"title": "geekan/MetaGPT: The Multi-Agent Framework: Given one line Requirement, return PRD, Design, Tasks, Repo - GitHub",
"link": "https://github.com/geekan/MetaGPT",
"snippet": "MetaGPT takes a one line requirement as input and outputs user stories / competitive analysis / requirements / data structures / APIs / documents, etc.",
"sitelinks": [
{
"title": "README.md",
"link": "https://github.com/geekan/MetaGPT/blob/main/README.md"
},
{
"title": "Roadmap",
"link": "https://github.com/geekan/MetaGPT/blob/main/docs/ROADMAP.md"
},
{
"title": "Issues",
"link": "https://github.com/geekan/MetaGPT/issues"
},
{
"title": "Actions",
"link": "https://github.com/geekan/MetaGPT/actions"
}
],
"position": 1
},
{
"title": "MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework - arXiv",
"link": "https://arxiv.org/abs/2308.00352",
"snippet": "Abstract:Remarkable progress has been made on automated problem solving through societies of agents based on large language models (LLMs).",
"date": "Aug 1, 2023",
"position": 2
},
{
"title": "How To Install MetaGPT - Build A Startup With One Prompt!! - YouTube",
"link": "https://youtube.com/watch?v=uT75J_KG_aY",
"snippet": "In this video, we review MetaGPT, a new project that aims ...",
"date": "Aug 14, 2023",
"attributes": {
"Duration": "6:36",
"Posted": "Aug 14, 2023"
},
"imageUrl": "https://i.ytimg.com/vi/uT75J_KG_aY/default.jpg?sqp=-oaymwEECHgQQw&rs=AMzJL3lfWRsXgckPQztWhHaRKYqxffksoA",
"position": 3
},
{
"title": "Meet MetaGPT: The ChatGPT-Powered AI Assistant That Turns Text Into Web Apps",
"link": "https://www.kdnuggets.com/meet-metagpt-the-chatgptpowered-ai-assistant-that-turns-text-into-web-apps",
"snippet": "This revolutionary AI tool lets you create no-code web applications in just seconds!",
"date": "Sep 8, 2023",
"position": 4
},
{
"title": "MetaGPT: Complete Guide to the Best AI Agent Available Right Now - Unite.AI",
"link": "https://www.unite.ai/metagpt-complete-guide-to-the-best-ai-agent-available-right-now/",
"snippet": "Discover why MetaGPT outperforms AutoGPT, BabyAgi, and other AI agents in complex coding tasks. Our in-depth article guides you through the ...",
"date": "Sep 11, 2023",
"position": 5
},
{
"title": "MetaGPT | Discover AI use cases - GPT-3 Demo",
"link": "https://gpt3demo.com/apps/metagpt",
"snippet": "Assign different roles to GPTs to form a collaborative software entity for complex tasks. MetaGPT takes a one-line requirement as input and outputs user ...",
"position": 6
},
{
"title": "MetaGPT AI technology page - Lablab.ai",
"link": "https://lablab.ai/tech/metagpt",
"snippet": "MetaGPT: Collaborative AI for Complex Tasks. MetaGPT is a groundbreaking AI technology, designed to transform the landscape of software development.",
"position": 7
},
{
"title": "MetaGPT: Meta Programming for Multi-Agent Collaborative Framework | OpenReview",
"link": "https://openreview.net/forum?id=VtmBAGCN7o",
"snippet": "This paper introduces MetaGPT, an innovative meta-programming framework for multi-agent collaborations based on LLM, which encodes Standardized ...",
"date": "Sep 22, 2023",
"position": 8
}
],
"relatedSearches": [
{
"query": "How to use MetaGPT"
},
{
"query": "MetaGPT Reddit"
},
{
"query": "Metagpt arXiv"
},
{
"query": "MetaGPT youtube"
},
{
"query": "MetaGPT example"
},
{
"query": "Metagpt huggingface"
},
{
"query": "metagpt: meta programming for multi-agent collaborative framework"
},
{
"query": "MetaGPT alternative"
}
]
}
]

View file

@ -117,7 +117,6 @@ if __name__ == '__main__':
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_debug_error():
CONFIG.src_workspace = CONFIG.git_repo.workdir / uuid.uuid4().hex
ctx = RunCodeContext(
@ -150,5 +149,16 @@ async def test_debug_error():
rsp = await debug_error.run()
assert "class Player" in rsp # rewrite the same class
# a key logic to rewrite to (original one is "if self.score > 12")
# Problematic code:
# ```
# if self.score > 21 and any(card.rank == 'A' for card in self.hand):
# self.score -= 10
# ```
# Should rewrite to (used "gpt-3.5-turbo-1106"):
# ```
# ace_count = sum(1 for card in self.hand if card.rank == 'A')
# while self.score > 21 and ace_count > 0:
# self.score -= 10
# ace_count -= 1
# ```
assert "while self.score > 21" in rsp

View file

@ -17,7 +17,6 @@ from tests.metagpt.actions.mock_markdown import PRD_SAMPLE
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_design_api():
inputs = ["我们需要一个音乐播放器,它应该有播放、暂停、上一曲、下一曲等功能。", PRD_SAMPLE]
for prd in inputs:

View file

@ -11,7 +11,6 @@ from metagpt.actions.design_api_review import DesignReview
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_design_api_review():
prd = "我们需要一个音乐播放器,它应该有播放、暂停、上一曲、下一曲等功能。"
api_design = """

View file

@ -20,7 +20,6 @@ context = """
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_generate_questions():
action = GenerateQuestions()
rsp = await action.run(context)

View file

@ -54,7 +54,6 @@ async def test_generate_table(invoice_path: Path, expected_result: dict):
("invoice_path", "query", "expected_result"),
[(Path("invoices/invoice-1.pdf"), "Invoicing date", "2023年02月03日")],
)
@pytest.mark.usefixtures("llm_mock")
async def test_reply_question(invoice_path: Path, query: dict, expected_result: str):
invoice_path = TEST_DATA_PATH / invoice_path
ocr_result = await InvoiceOCR().run(file_path=Path(invoice_path))

View file

@ -12,7 +12,6 @@ from metagpt.logs import logger
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_prepare_interview():
action = PrepareInterview()
rsp = await action.run("I just graduated and hope to find a job as a Python engineer")

View file

@ -18,7 +18,6 @@ from tests.metagpt.actions.mock_json import DESIGN, PRD
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_design_api():
await FileRepository.save_file("1.txt", content=str(PRD), relative_path=PRDS_FILE_REPO)
await FileRepository.save_file("1.txt", content=str(DESIGN), relative_path=SYSTEM_DESIGN_FILE_REPO)

View file

@ -11,13 +11,46 @@ from pathlib import Path
import pytest
from metagpt.actions.rebuild_class_view import RebuildClassView
from metagpt.config import CONFIG
from metagpt.const import GRAPH_REPO_FILE_REPO
from metagpt.llm import LLM
@pytest.mark.asyncio
async def test_rebuild():
action = RebuildClassView(name="RedBean", context=Path(__file__).parent.parent, llm=LLM())
action = RebuildClassView(
name="RedBean", context=str(Path(__file__).parent.parent.parent.parent / "metagpt"), llm=LLM()
)
await action.run()
graph_file_repo = CONFIG.git_repo.new_file_repository(relative_path=GRAPH_REPO_FILE_REPO)
assert graph_file_repo.changed_files
@pytest.mark.parametrize(
("path", "direction", "diff", "want"),
[
("metagpt/startup.py", "=", ".", "metagpt/startup.py"),
("metagpt/startup.py", "+", "MetaGPT", "MetaGPT/metagpt/startup.py"),
("metagpt/startup.py", "-", "metagpt", "startup.py"),
],
)
def test_align_path(path, direction, diff, want):
res = RebuildClassView._align_root(path=path, direction=direction, diff_path=diff)
assert res == want
@pytest.mark.parametrize(
("path_root", "package_root", "want_direction", "want_diff"),
[
("/Users/x/github/MetaGPT/metagpt", "/Users/x/github/MetaGPT/metagpt", "=", "."),
("/Users/x/github/MetaGPT", "/Users/x/github/MetaGPT/metagpt", "-", "metagpt"),
("/Users/x/github/MetaGPT/metagpt", "/Users/x/github/MetaGPT", "+", "metagpt"),
],
)
def test_diff_path(path_root, package_root, want_direction, want_diff):
direction, diff = RebuildClassView._diff_path(path_root=Path(path_root), package_root=Path(package_root))
assert direction == want_direction
assert diff == want_diff
if __name__ == "__main__":

View file

@ -0,0 +1,55 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/4
@Author : mashenquan
@File : test_rebuild_sequence_view.py
"""
from pathlib import Path
import pytest
from metagpt.actions.rebuild_sequence_view import RebuildSequenceView
from metagpt.config import CONFIG
from metagpt.const import GRAPH_REPO_FILE_REPO
from metagpt.llm import LLM
from metagpt.utils.common import aread
from metagpt.utils.file_repository import FileRepository
from metagpt.utils.git_repository import ChangeType
@pytest.mark.asyncio
async def test_rebuild():
# Mock
data = await aread(filename=Path(__file__).parent / "../../data/graph_db/networkx.json")
graph_db_filename = Path(CONFIG.git_repo.workdir.name).with_suffix(".json")
await FileRepository.save_file(
filename=str(graph_db_filename),
relative_path=GRAPH_REPO_FILE_REPO,
content=data,
)
CONFIG.git_repo.add_change({f"{GRAPH_REPO_FILE_REPO}/{graph_db_filename}": ChangeType.UNTRACTED})
CONFIG.git_repo.commit("commit1")
action = RebuildSequenceView(
name="RedBean", context=str(Path(__file__).parent.parent.parent.parent / "metagpt"), llm=LLM()
)
await action.run()
graph_file_repo = CONFIG.git_repo.new_file_repository(relative_path=GRAPH_REPO_FILE_REPO)
assert graph_file_repo.changed_files
@pytest.mark.parametrize(
("root", "pathname", "want"),
[
(Path(__file__).parent.parent.parent, "/".join(__file__.split("/")[-2:]), Path(__file__)),
(Path(__file__).parent.parent.parent, "f/g.txt", None),
],
)
def test_get_full_filename(root, pathname, want):
res = RebuildSequenceView._get_full_filename(root=root, pathname=pathname)
assert res == want
if __name__ == "__main__":
pytest.main([__file__, "-s"])

View file

@ -8,14 +8,7 @@
import pytest
from metagpt.actions import CollectLinks, research
@pytest.mark.asyncio
async def test_action():
action = CollectLinks()
result = await action.run(topic="baidu")
assert result
from metagpt.actions import research
@pytest.mark.asyncio

View file

@ -47,7 +47,10 @@ class TestSkillAction:
assert args.get("size_type") == "512x512"
@pytest.mark.asyncio
async def test_parser_action(self):
async def test_parser_action(self, mocker):
# mock
mocker.patch("metagpt.learn.text_to_image", return_value="https://mock.com/xxx")
parser_action = ArgumentsParingAction(skill=self.skill, ask="Draw an apple")
rsp = await parser_action.run()
assert rsp
@ -80,7 +83,8 @@ class TestSkillAction:
@pytest.mark.asyncio
async def test_skill_action_error(self):
action = SkillAction(skill=self.skill, args={})
await action.run()
rsp = await action.run()
assert "Error" in rsp.content
if __name__ == "__main__":

View file

@ -177,7 +177,6 @@ class Snake:
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_summarize_code():
CONFIG.src_workspace = CONFIG.git_repo.workdir / "src"
await FileRepository.save_file(filename="1.json", relative_path=SYSTEM_DESIGN_FILE_REPO, content=DESIGN_CONTENT)

View file

@ -33,7 +33,6 @@ from metagpt.schema import Message
),
],
)
@pytest.mark.usefixtures("llm_mock")
async def test_prompt(agent_description, language, context, knowledge, history_summary):
# Prerequisites
CONFIG.agent_description = agent_description

View file

@ -28,7 +28,6 @@ from tests.metagpt.actions.mock_markdown import TASKS_2, WRITE_CODE_PROMPT_SAMPL
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_write_code():
context = CodingContext(
filename="task_filename.py", design_doc=Document(content="设计一个名为'add'的函数,该函数接受两个整数作为输入,并返回它们的和。")
@ -45,7 +44,6 @@ async def test_write_code():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_write_code_directly():
prompt = WRITE_CODE_PROMPT_SAMPLE + "\n" + TASKS_2[0]
llm = LLM()
@ -54,7 +52,6 @@ async def test_write_code_directly():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_write_code_deps():
# Prerequisites
CONFIG.src_workspace = CONFIG.git_repo.workdir / "snake1/snake1"

View file

@ -12,7 +12,6 @@ from metagpt.schema import CodingContext, Document
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_write_code_review(capfd):
code = """
def add(a, b):

View file

@ -27,14 +27,12 @@ class Person:
],
ids=["google", "numpy", "sphinx"],
)
@pytest.mark.usefixtures("llm_mock")
async def test_write_docstring(style: str, part: str):
ret = await WriteDocstring().run(code, style=style)
assert part in ret
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_write():
code = await WriteDocstring.write_docstring(__file__)
assert code

View file

@ -8,22 +8,25 @@
"""
import pytest
from metagpt.actions import UserRequirement
from metagpt.actions import UserRequirement, WritePRD
from metagpt.config import CONFIG
from metagpt.const import DOCS_FILE_REPO, PRDS_FILE_REPO, REQUIREMENT_FILENAME
from metagpt.logs import logger
from metagpt.roles.product_manager import ProductManager
from metagpt.roles.role import RoleReactMode
from metagpt.schema import Message
from metagpt.utils.common import any_to_str
from metagpt.utils.file_repository import FileRepository
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_write_prd():
async def test_write_prd(new_filename):
product_manager = ProductManager()
requirements = "开发一个基于大语言模型与私有知识库的搜索引擎,希望可以基于大语言模型进行搜索总结"
await FileRepository.save_file(filename=REQUIREMENT_FILENAME, content=requirements, relative_path=DOCS_FILE_REPO)
product_manager.rc.react_mode = RoleReactMode.BY_ORDER
prd = await product_manager.run(Message(content=requirements, cause_by=UserRequirement))
assert prd.cause_by == any_to_str(WritePRD)
logger.info(requirements)
logger.info(prd)
@ -31,3 +34,7 @@ async def test_write_prd():
assert prd is not None
assert prd.content != ""
assert CONFIG.git_repo.new_file_repository(relative_path=PRDS_FILE_REPO).changed_files
if __name__ == "__main__":
pytest.main([__file__, "-s"])

View file

@ -11,7 +11,6 @@ from metagpt.actions.write_prd_review import WritePRDReview
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_write_prd_review():
prd = """
Introduction: This is a new feature for our product.

View file

@ -46,7 +46,6 @@ CONTEXT = """
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_write_review():
write_review = WriteReview()
review = await write_review.run(CONTEXT)

View file

@ -16,7 +16,6 @@ from metagpt.actions.write_teaching_plan import WriteTeachingPlanPart
("topic", "context"),
[("Title", "Lesson 1: Learn to draw an apple."), ("Teaching Content", "Lesson 1: Learn to draw an apple.")],
)
@pytest.mark.usefixtures("llm_mock")
async def test_write_teaching_plan_part(topic, context):
action = WriteTeachingPlanPart(topic=topic, context=context)
rsp = await action.run()

View file

@ -13,7 +13,6 @@ from metagpt.schema import Document, TestingContext
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_write_test():
code = """
import random
@ -40,7 +39,6 @@ async def test_write_test():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_write_code_invalid_code(mocker):
# Mock the _aask method to return an invalid code string
mocker.patch.object(WriteTest, "_aask", return_value="Invalid Code String")

View file

@ -14,7 +14,6 @@ from metagpt.actions.write_tutorial import WriteContent, WriteDirectory
@pytest.mark.asyncio
@pytest.mark.parametrize(("language", "topic"), [("English", "Write a tutorial about Python")])
@pytest.mark.usefixtures("llm_mock")
async def test_write_directory(language: str, topic: str):
ret = await WriteDirectory(language=language).run(topic=topic)
assert isinstance(ret, dict)
@ -30,7 +29,6 @@ async def test_write_directory(language: str, topic: str):
("language", "topic", "directory"),
[("English", "Write a tutorial about Python", {"Introduction": ["What is Python?", "Why learn Python?"]})],
)
@pytest.mark.usefixtures("llm_mock")
async def test_write_content(language: str, topic: str, directory: Dict):
ret = await WriteContent(language=language, directory=directory).run(topic=topic)
assert isinstance(ret, str)

View file

@ -29,6 +29,16 @@ points = [
]
def assert_almost_equal(actual, expected):
delta = 1e-10
if isinstance(expected, list):
assert len(actual) == len(expected)
for ac, exp in zip(actual, expected):
assert abs(ac - exp) <= delta, f"{ac} is not within {delta} of {exp}"
else:
assert abs(actual - expected) <= delta, f"{actual} is not within {delta} of {expected}"
def test_qdrant_store():
qdrant_connection = QdrantConnection(memory=True)
vectors_config = VectorParams(size=2, distance=Distance.COSINE)
@ -42,30 +52,30 @@ def test_qdrant_store():
qdrant_store.add("Book", points)
results = qdrant_store.search("Book", query=[1.0, 1.0])
assert results[0]["id"] == 2
assert results[0]["score"] == 0.999106722578389
assert_almost_equal(results[0]["score"], 0.999106722578389)
assert results[1]["id"] == 7
assert results[1]["score"] == 0.9961650411397226
assert_almost_equal(results[1]["score"], 0.9961650411397226)
results = qdrant_store.search("Book", query=[1.0, 1.0], return_vector=True)
assert results[0]["id"] == 2
assert results[0]["score"] == 0.999106722578389
assert results[0]["vector"] == [0.7363563179969788, 0.6765939593315125]
assert_almost_equal(results[0]["score"], 0.999106722578389)
assert_almost_equal(results[0]["vector"], [0.7363563179969788, 0.6765939593315125])
assert results[1]["id"] == 7
assert results[1]["score"] == 0.9961650411397226
assert results[1]["vector"] == [0.7662628889083862, 0.6425272226333618]
assert_almost_equal(results[1]["score"], 0.9961650411397226)
assert_almost_equal(results[1]["vector"], [0.7662628889083862, 0.6425272226333618])
results = qdrant_store.search(
"Book",
query=[1.0, 1.0],
query_filter=Filter(must=[FieldCondition(key="rand_number", range=Range(gte=8))]),
)
assert results[0]["id"] == 8
assert results[0]["score"] == 0.9100373450784073
assert_almost_equal(results[0]["score"], 0.9100373450784073)
assert results[1]["id"] == 9
assert results[1]["score"] == 0.7127610621127889
assert_almost_equal(results[1]["score"], 0.7127610621127889)
results = qdrant_store.search(
"Book",
query=[1.0, 1.0],
query_filter=Filter(must=[FieldCondition(key="rand_number", range=Range(gte=8))]),
return_vector=True,
)
assert results[0]["vector"] == [0.35037919878959656, 0.9366079568862915]
assert results[1]["vector"] == [0.9999677538871765, 0.00802854634821415]
assert_almost_equal(results[0]["vector"], [0.35037919878959656, 0.9366079568862915])
assert_almost_equal(results[1]["vector"], [0.9999677538871765, 0.00802854634821415])

View file

@ -6,6 +6,8 @@
@File : test_skill_loader.py
@Desc : Unit tests.
"""
from pathlib import Path
import pytest
from metagpt.config import CONFIG
@ -23,7 +25,8 @@ async def test_suite():
{"id": 6, "name": "knowledge", "type": "builtin", "config": {}, "enabled": True},
{"id": 6, "name": "web_search", "type": "builtin", "config": {}, "enabled": True},
]
loader = await SkillsDeclaration.load()
pathname = Path(__file__).parent / "../../../docs/.well-known/skills.yaml"
loader = await SkillsDeclaration.load(skill_yaml_file_name=pathname)
skills = loader.get_skill_list()
assert skills
assert len(skills) >= 3

View file

@ -12,10 +12,18 @@ import pytest
from metagpt.config import CONFIG
from metagpt.learn.text_to_image import text_to_image
from metagpt.tools.metagpt_text_to_image import MetaGPTText2Image
from metagpt.tools.openai_text_to_image import OpenAIText2Image
from metagpt.utils.s3 import S3
@pytest.mark.asyncio
async def test_metagpt_llm():
async def test_text_to_image(mocker):
# mock
mocker.patch.object(MetaGPTText2Image, "text_2_image", return_value=b"mock MetaGPTText2Image")
mocker.patch.object(OpenAIText2Image, "text_2_image", return_value=b"mock OpenAIText2Image")
mocker.patch.object(S3, "cache", return_value="http://mock/s3")
# Prerequisites
assert CONFIG.METAGPT_TEXT_TO_IMAGE_MODEL_URL
assert CONFIG.OPENAI_API_KEY

View file

@ -0,0 +1,8 @@
import pytest
@pytest.fixture(autouse=True)
def llm_mock(rsp_cache, mocker, request):
# An empty fixture to overwrite the global llm_mock fixture
# because in provider folder, we want to test the aask and aask functions for the specific models
pass

View file

@ -3,7 +3,6 @@
# @Desc : the unittest of ZhiPuAILLM
import pytest
from zhipuai.utils.sse_client import Event
from metagpt.config import CONFIG
from metagpt.provider.zhipuai_api import ZhiPuAILLM
@ -15,35 +14,16 @@ messages = [{"role": "user", "content": prompt_msg}]
resp_content = "I'm chatglm-turbo"
default_resp = {
"code": 200,
"data": {
"choices": [{"role": "assistant", "content": resp_content}],
"usage": {"prompt_tokens": 20, "completion_tokens": 20},
},
"choices": [{"finish_reason": "stop", "index": 0, "message": {"content": resp_content, "role": "assistant"}}],
"usage": {"completion_tokens": 22, "prompt_tokens": 19, "total_tokens": 41},
}
def mock_zhipuai_invoke(**kwargs) -> dict:
return default_resp
async def mock_zhipuai_ainvoke(**kwargs) -> dict:
return default_resp
async def mock_zhipuai_asse_invoke(**kwargs):
async def mock_zhipuai_acreate_stream(self, **kwargs):
class MockResponse(object):
async def _aread(self):
class Iterator(object):
events = [
Event(id="xxx", event="add", data=resp_content, retry=0),
Event(
id="xxx",
event="finish",
data="",
meta='{"usage": {"completion_tokens": 20,"prompt_tokens": 20}}',
),
]
events = [{"choices": [{"index": 0, "delta": {"content": resp_content, "role": "assistant"}}]}]
async def __aiter__(self):
for event in self.events:
@ -52,23 +32,26 @@ async def mock_zhipuai_asse_invoke(**kwargs):
async for chunk in Iterator():
yield chunk
async def async_events(self):
async def stream(self):
async for chunk in self._aread():
yield chunk
return MockResponse()
async def mock_zhipuai_acreate(self, **kwargs) -> dict:
return default_resp
@pytest.mark.asyncio
async def test_zhipuai_acompletion(mocker):
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.invoke", mock_zhipuai_invoke)
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.ainvoke", mock_zhipuai_ainvoke)
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.asse_invoke", mock_zhipuai_asse_invoke)
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.acreate", mock_zhipuai_acreate)
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.acreate_stream", mock_zhipuai_acreate_stream)
zhipu_gpt = ZhiPuAILLM()
resp = await zhipu_gpt.acompletion(messages)
assert resp["data"]["choices"][0]["content"] == resp_content
assert resp["choices"][0]["message"]["content"] == resp_content
resp = await zhipu_gpt.aask(prompt_msg, stream=False)
assert resp == resp_content

View file

@ -11,16 +11,16 @@ from metagpt.provider.zhipuai.async_sse_client import AsyncSSEClient
async def test_async_sse_client():
class Iterator(object):
async def __aiter__(self):
yield b"data: test_value"
yield b'data: {"test_key": "test_value"}'
async_sse_client = AsyncSSEClient(event_source=Iterator())
async for event in async_sse_client.async_events():
assert event.data, "test_value"
async for chunk in async_sse_client.stream():
assert "test_value" in chunk.values()
class InvalidIterator(object):
async def __aiter__(self):
yield b"invalid: test_value"
async_sse_client = AsyncSSEClient(event_source=InvalidIterator())
async for event in async_sse_client.async_events():
assert not event
async for chunk in async_sse_client.stream():
assert not chunk

View file

@ -6,15 +6,13 @@ from typing import Any, Tuple
import pytest
import zhipuai
from zhipuai.model_api.api import InvokeType
from zhipuai.utils.http_client import headers as zhipuai_default_headers
from metagpt.provider.zhipuai.zhipu_model_api import ZhiPuModelAPI
api_key = "xxx.xxx"
zhipuai.api_key = api_key
default_resp = b'{"result": "test response"}'
default_resp = b'{"choices": [{"finish_reason": "stop", "index": 0, "message": {"content": "test response", "role": "assistant"}}]}'
async def mock_requestor_arequest(self, **kwargs) -> Tuple[Any, Any, str]:
@ -23,22 +21,15 @@ async def mock_requestor_arequest(self, **kwargs) -> Tuple[Any, Any, str]:
@pytest.mark.asyncio
async def test_zhipu_model_api(mocker):
header = ZhiPuModelAPI.get_header()
zhipuai_default_headers.update({"Authorization": api_key})
assert header == zhipuai_default_headers
sse_header = ZhiPuModelAPI.get_sse_header()
assert len(sse_header["Authorization"]) == 191
url_prefix, url_suffix = ZhiPuModelAPI.split_zhipu_api_url(InvokeType.SYNC, kwargs={"model": "chatglm_turbo"})
url_prefix, url_suffix = ZhiPuModelAPI(api_key=api_key).split_zhipu_api_url()
assert url_prefix == "https://open.bigmodel.cn/api"
assert url_suffix == "/paas/v3/model-api/chatglm_turbo/invoke"
assert url_suffix == "/paas/v4/chat/completions"
mocker.patch("metagpt.provider.general_api_requestor.GeneralAPIRequestor.arequest", mock_requestor_arequest)
result = await ZhiPuModelAPI.arequest(
InvokeType.SYNC, stream=False, method="get", headers={}, kwargs={"model": "chatglm_turbo"}
result = await ZhiPuModelAPI(api_key=api_key).arequest(
stream=False, method="get", headers={}, kwargs={"model": "glm-3-turbo"}
)
assert result == default_resp
result = await ZhiPuModelAPI.ainvoke()
assert result["result"] == "test response"
result = await ZhiPuModelAPI(api_key=api_key).acreate()
assert result["choices"][0]["message"]["content"] == "test response"

View file

@ -284,4 +284,6 @@ class MockMessages:
prd = Message(role="Product Manager", content=PRD, cause_by=WritePRD)
system_design = Message(role="Architect", content=SYSTEM_DESIGN, cause_by=WriteDesign)
tasks = Message(role="Project Manager", content=TASKS, cause_by=WriteTasks)
json_tasks = Message(role="Project Manager", content=json.dumps(JSON_TASKS), cause_by=WriteTasks)
json_tasks = Message(
role="Project Manager", content=json.dumps(JSON_TASKS, ensure_ascii=False), cause_by=WriteTasks
)

View file

@ -22,7 +22,6 @@ from tests.metagpt.roles.mock import MockMessages
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_architect():
# Prerequisites
filename = uuid.uuid4().hex + ".json"

View file

@ -13,7 +13,6 @@ from pydantic import BaseModel
from metagpt.actions.skill_action import SkillAction
from metagpt.actions.talk_action import TalkAction
from metagpt.config import CONFIG
from metagpt.logs import logger
from metagpt.memory.brain_memory import BrainMemory
from metagpt.roles.assistant import Assistant
from metagpt.schema import Message
@ -21,7 +20,6 @@ from metagpt.utils.common import any_to_str
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_run():
CONFIG.language = "Chinese"
@ -88,7 +86,7 @@ async def test_run():
if not has_action:
break
msg: Message = await role.act()
logger.info(msg)
# logger.info(msg)
assert msg
assert msg.cause_by == seed.cause_by
assert msg.content

View file

@ -30,7 +30,6 @@ from tests.metagpt.roles.mock import STRS_FOR_PARSING, TASKS, MockMessages
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_engineer():
# Prerequisites
rqno = "20231221155954.json"
@ -114,7 +113,6 @@ def test_todo():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_new_coding_context():
# Prerequisites
demo_path = Path(__file__).parent / "../../data/demo_project"

View file

@ -41,7 +41,6 @@ from metagpt.schema import Message
),
],
)
@pytest.mark.usefixtures("llm_mock")
async def test_invoice_ocr_assistant(query: str, invoice_path: Path, invoice_table_path: Path, expected_result: dict):
invoice_path = TEST_DATA_PATH / invoice_path
role = InvoiceOCRAssistant()

View file

@ -13,8 +13,7 @@ from tests.metagpt.roles.mock import MockMessages
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_product_manager():
async def test_product_manager(new_filename):
product_manager = ProductManager()
rsp = await product_manager.run(MockMessages.req)
logger.info(rsp)

View file

@ -13,7 +13,6 @@ from tests.metagpt.roles.mock import MockMessages
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_project_manager():
project_manager = ProjectManager()
rsp = await project_manager.run(MockMessages.system_design)

View file

@ -3,6 +3,7 @@
# @Desc : unittest of Role
import pytest
from metagpt.llm import HumanProvider
from metagpt.roles.role import Role
@ -12,5 +13,10 @@ def test_role_desc():
assert role.desc == "Best Seller"
def test_role_human():
role = Role(is_human=True)
assert isinstance(role.llm, HumanProvider)
if __name__ == "__main__":
pytest.main([__file__, "-s"])

View file

@ -17,6 +17,7 @@ from metagpt.schema import Message
@pytest.mark.asyncio
@pytest.mark.skip
async def test_init():
class Inputs(BaseModel):
name: str
@ -103,7 +104,6 @@ async def test_new_file_name():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_run():
CONFIG.set_context({"language": "Chinese", "teaching_language": "English"})
lesson = """

View file

@ -15,7 +15,6 @@ from metagpt.roles.tutorial_assistant import TutorialAssistant
@pytest.mark.asyncio
@pytest.mark.parametrize(("language", "topic"), [("Chinese", "Write a tutorial about pip")])
@pytest.mark.usefixtures("llm_mock")
async def test_tutorial_assistant(language: str, topic: str):
role = TutorialAssistant(language=language)
msg = await role.run(topic)

View file

@ -21,7 +21,6 @@ def test_action_serialize():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_action_deserialize():
action = Action()
serialized_data = action.model_dump()

View file

@ -17,7 +17,6 @@ def test_architect_serialize():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_architect_deserialize():
role = Architect()
ser_role_dict = role.model_dump(by_alias=True)

View file

@ -8,7 +8,6 @@ from metagpt.actions.prepare_interview import PrepareInterview
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_action_deserialize():
action = PrepareInterview()
serialized_data = action.model_dump()

View file

@ -10,8 +10,7 @@ from metagpt.schema import Message
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_product_manager_deserialize():
async def test_product_manager_deserialize(new_filename):
role = ProductManager()
ser_role_dict = role.model_dump(by_alias=True)
new_role = ProductManager(**ser_role_dict)

View file

@ -18,7 +18,6 @@ def test_project_manager_serialize():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_project_manager_deserialize():
role = ProjectManager()
ser_role_dict = role.model_dump(by_alias=True)

View file

@ -69,7 +69,6 @@ def test_engineer_serialize():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_engineer_deserialize():
role = Engineer(use_code_review=True)
ser_role_dict = role.model_dump()
@ -97,7 +96,6 @@ def test_role_serdeser_save():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_role_serdeser_interrupt():
role_c = RoleC()
shutil.rmtree(SERDESER_PATH.joinpath("team"), ignore_errors=True)

View file

@ -109,7 +109,6 @@ async def test_team_recover_save():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_team_recover_multi_roles_save():
idea = "write a snake game"
stg_path = SERDESER_PATH.joinpath("team")

View file

@ -17,7 +17,6 @@ def test_write_design_serialize():
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_mock")
async def test_write_code_deserialize():
context = CodingContext(
filename="test_code.py", design_doc=Document(content="write add function to calculate two numbers")

Some files were not shown because too many files have changed in this diff Show more