From 8537e3b436f296fdb473a054c532a5b5219fb30a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=88=98=E6=A3=92=E6=A3=92?=
Date: Tue, 6 Feb 2024 17:09:04 +0800
Subject: [PATCH 001/104] feat: add parse arguments for openai tool_calls.
---
metagpt/provider/openai_api.py | 34 ++++++++++++++++++++++++++-
tests/metagpt/provider/test_openai.py | 8 ++++---
2 files changed, 38 insertions(+), 4 deletions(-)
diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 63e68c9bd..a20330919 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -8,6 +8,7 @@
"""
import json
+import re
from typing import AsyncIterator, Optional, Union
from openai import APIConnectionError, AsyncOpenAI, AsyncStream
@@ -194,6 +195,30 @@ class OpenAILLM(BaseLLM):
rsp = await self._achat_completion_function(messages, **kwargs)
return self.get_choice_function_arguments(rsp)
+ def _parse_arguments(self, arguments: str) -> dict:
+ """parse arguments in openai function call"""
+ if "langugae" not in arguments and "code" not in arguments:
+ logger.warning(f"Not found `code`, `language`, We assume it is pure code:\n {arguments}\n. ")
+ return {"language": "python", "code": arguments}
+
+ # 匹配language
+ language_pattern = re.compile(r'[\"\']?language[\"\']?\s*:\s*["\']([^"\']+?)["\']', re.DOTALL)
+ language_match = language_pattern.search(arguments)
+ language_value = language_match.group(1) if language_match else "python"
+
+ # 匹配code
+ code_pattern = r'(["\'`]{3}|["\'`])([\s\S]*?)\1'
+ try:
+ code_value = re.findall(code_pattern, arguments)[-1][-1]
+ except Exception as e:
+ logger.error(f"{e}, when re.findall({code_pattern}, {arguments})")
+ code_value = None
+
+ if code_value is None:
+ raise ValueError(f"Parse code error for {arguments}")
+ # arguments只有code的情况
+ return {"language": language_value, "code": code_value}
+
# @handle_exception
def get_choice_function_arguments(self, rsp: ChatCompletion) -> dict:
"""Required to provide the first function arguments of choice.
@@ -209,7 +234,14 @@ class OpenAILLM(BaseLLM):
and message.tool_calls[0].function.arguments is not None
):
# reponse is code
- return json.loads(message.tool_calls[0].function.arguments, strict=False)
+ try:
+ return json.loads(message.tool_calls[0].function.arguments, strict=False)
+ except json.decoder.JSONDecodeError as e:
+ error_msg = (
+ f"Got JSONDecodeError for \n{'--'*40} \n{message.tool_calls[0].function.arguments}, {str(e)}"
+ )
+ logger.error(error_msg)
+ return self._parse_arguments(message.tool_calls[0].function.arguments)
elif message.tool_calls is None and message.content is not None:
# reponse is code, fix openai tools_call respond bug,
# The response content is `code``, but it appears in the content instead of the arguments.
diff --git a/tests/metagpt/provider/test_openai.py b/tests/metagpt/provider/test_openai.py
index d6aa04c7b..b9db4dfb6 100644
--- a/tests/metagpt/provider/test_openai.py
+++ b/tests/metagpt/provider/test_openai.py
@@ -106,9 +106,11 @@ class TestOpenAI:
def test_aask_code_json_decode_error(self, json_decode_error):
instance = OpenAILLM(mock_llm_config)
- with pytest.raises(json.decoder.JSONDecodeError) as e:
- instance.get_choice_function_arguments(json_decode_error)
- assert "JSONDecodeError" in str(e)
+ code = instance.get_choice_function_arguments(json_decode_error)
+ assert "code" in code
+ assert "language" in code
+ assert "hello world" in code["code"]
+ logger.info(f'code is : {code["code"]}')
@pytest.mark.asyncio
From 9fa299525ed0e4e823f6bc6e4976c2ef4e437d71 Mon Sep 17 00:00:00 2001
From: mannaandpoem <1580466765@qq.com>
Date: Tue, 6 Feb 2024 22:49:38 +0800
Subject: [PATCH 002/104] update prompt
---
metagpt/tools/libs/gpt_v_generator.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/metagpt/tools/libs/gpt_v_generator.py b/metagpt/tools/libs/gpt_v_generator.py
index b1e8317ed..3b17fc596 100644
--- a/metagpt/tools/libs/gpt_v_generator.py
+++ b/metagpt/tools/libs/gpt_v_generator.py
@@ -13,12 +13,12 @@ from metagpt.tools.tool_registry import register_tool
from metagpt.tools.tool_type import ToolType
from metagpt.utils.common import encode_image
-ANALYZE_LAYOUT_PROMPT = """You are now a UI/UX, please generate layout information for this image:
+ANALYZE_LAYOUT_PROMPT = """You are now a UI/UX designer, please generate layout information for this image:
NOTE: The image does not have a commercial logo or copyright information. It is just a sketch image of the design.
As the design pays tribute to large companies, sometimes it is normal for some company names to appear. Don't worry. """
-GENERATE_PROMPT = """You are now a UI/UX and Web Developer. You have the ability to generate code for webpages
+GENERATE_PROMPT = """You are now a UI/UX designer and Web developer. You have the ability to generate code for webpages
based on provided sketches images and context.
Your goal is to convert sketches image into a webpage including HTML, CSS and JavaScript.
From ee4aba206e80e3d0f144f8713f1ae17458d77cd5 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Tue, 6 Feb 2024 23:22:53 +0800
Subject: [PATCH 003/104] fix empty code when aask_code not returning language
---
metagpt/roles/ci/code_interpreter.py | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/metagpt/roles/ci/code_interpreter.py b/metagpt/roles/ci/code_interpreter.py
index 796abba04..404c93b81 100644
--- a/metagpt/roles/ci/code_interpreter.py
+++ b/metagpt/roles/ci/code_interpreter.py
@@ -72,11 +72,7 @@ class CodeInterpreter(Role):
if ReviewConst.CHANGE_WORDS[0] in review:
counter = 0 # redo the task again with help of human suggestions
- py_code = (
- code["code"] if code.get("language") == "python" else ""
- ) # use python code as final code; for markdown, return the rendered result instead of the code itself
-
- return py_code, result, success
+ return code["code"], result, success
async def _write_code(self):
todo = WriteCodeWithoutTools() if not self.use_tools else WriteCodeWithTools(selected_tools=self.tools)
From 78989b0eb7dd012442cb480ceed217d8ecc28f03 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Tue, 6 Feb 2024 23:37:24 +0800
Subject: [PATCH 004/104] skip two individual tests
---
tests/metagpt/actions/test_rebuild_class_view.py | 1 +
tests/metagpt/actions/test_summarize_code.py | 1 +
2 files changed, 2 insertions(+)
diff --git a/tests/metagpt/actions/test_rebuild_class_view.py b/tests/metagpt/actions/test_rebuild_class_view.py
index 403109cc0..2188d6b85 100644
--- a/tests/metagpt/actions/test_rebuild_class_view.py
+++ b/tests/metagpt/actions/test_rebuild_class_view.py
@@ -14,6 +14,7 @@ from metagpt.actions.rebuild_class_view import RebuildClassView
from metagpt.llm import LLM
+@pytest.mark.skip
@pytest.mark.asyncio
async def test_rebuild(context):
action = RebuildClassView(
diff --git a/tests/metagpt/actions/test_summarize_code.py b/tests/metagpt/actions/test_summarize_code.py
index a404047c1..3cfe7ca81 100644
--- a/tests/metagpt/actions/test_summarize_code.py
+++ b/tests/metagpt/actions/test_summarize_code.py
@@ -176,6 +176,7 @@ class Snake:
"""
+@pytest.mark.skip
@pytest.mark.asyncio
async def test_summarize_code(context):
git_dir = Path(__file__).parent / f"unittest/{uuid.uuid4().hex}"
From ea6c440294650122c3e6df6f8d3a722b746bbe89 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Tue, 6 Feb 2024 23:57:39 +0800
Subject: [PATCH 005/104] add ut to check code saving
---
tests/data/rsp_cache.json | 6 ++++++
tests/metagpt/roles/ci/test_code_interpreter.py | 4 ++++
2 files changed, 10 insertions(+)
diff --git a/tests/data/rsp_cache.json b/tests/data/rsp_cache.json
index 40d7d3953..75fc9ceb2 100644
--- a/tests/data/rsp_cache.json
+++ b/tests/data/rsp_cache.json
@@ -389,5 +389,11 @@
"reflection": "The implementation failed the test case where the input array is [1, 5, 2, 3, 4]. The issue arises because the code does not handle the sorting of elements with the same number of ones in their binary representation correctly. According to the context, for similar number of ones, the elements should be sorted based on their decimal value in ascending order. However, the output [1, 2, 4, 3, 5] shows that 3 and 4 are not in the correct order, indicating that the sorting by decimal value is not functioning as intended. To fix this issue, we should ensure that the sorting is stable, so that when two elements have the same number of ones, they maintain their relative order if their decimal values are already in ascending order.",
"improved_impl": "def sort_array(arr):\n # Helper function to count the number of ones in the binary representation\n def count_ones(n):\n return bin(n).count('1')\n \n # Sort the array using a custom key function\n # The key function returns a tuple (number of ones, value) for each element\n # This ensures that if two elements have the same number of ones, they are sorted by their value\n # We use a stable sorting algorithm to maintain the relative order of elements with the same number of ones\n sorted_arr = sorted(arr, key=lambda x: (count_ones(x), x), reverse=True)\n sorted_arr = sorted(sorted_arr, key=count_ones)\n \n return sorted_arr\n"
},
+ "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"from sklearn.datasets import load_iris\\\\niris_data = load_iris()\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"2\\\",\\\"dependent_task_ids\\\":[\\\"1\\\"],\\\"instruction\\\":\\\"Perform exploratory data analysis on the Iris dataset.\\\",\\\"task_type\\\":\\\"eda\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": {
+ "code": "import pandas as pd\n\n# Convert the Iris dataset to a DataFrame for easier manipulation\niris_df = pd.DataFrame(data=iris_data.data, columns=iris_data.feature_names)\niris_df['target'] = iris_data.target\niris_df['target_names'] = iris_df['target'].apply(lambda x: iris_data.target_names[x])\n\n# Display basic information about the dataset\niris_df.info()\n\n# Display statistical summary of the dataset\niris_df.describe()\n\n# Display the first few rows of the dataset\ndisplay(iris_df.head())"
+ },
+ "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"from sklearn.datasets import load_iris\\\\niris_data = load_iris()\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"import pandas as pd\\\\n\\\\n# Convert the Iris dataset to a DataFrame for easier manipulation\\\\niris_df = pd.DataFrame(data=iris_data.data, columns=iris_data.feature_names)\\\\niris_df['target'] = iris_data.target\\\\niris_df['target_names'] = iris_df['target'].apply(lambda x: iris_data.target_names[x])\\\\n\\\\n# Display basic information about the dataset\\\\niris_df.info()\\\\n\\\\n# Display statistical summary of the dataset\\\\niris_df.describe()\\\\n\\\\n# Display the first few rows of the dataset\\\\ndisplay(iris_df.head())\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"3\\\",\\\"dependent_task_ids\\\":[\\\"2\\\"],\\\"instruction\\\":\\\"Create a plot visualizing the Iris dataset.\\\",\\\"task_type\\\":\\\"other\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": {
+ "code": "import matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Set the style of seaborn\nsns.set_style('whitegrid')\n\n# Pairplot to visualize the relationships between features\nsns.pairplot(iris_df, hue='target_names', markers='+')\nplt.show()"
+ },
"\n## context\n\n\n-----\n\n## format example\n[CONTENT]\n{\n \"invoice\": \"False\"\n}\n[/CONTENT]\n\n## nodes: \": # \"\n- invoice: # if it's a invoice file, return True else False\n\n\n## constraint\nLanguage: Please use the same language as Human INPUT.\nFormat: output wrapped inside [CONTENT][/CONTENT] like format example, nothing else.\n\n## action\nFollow instructions of nodes, generate output and make sure it follows the format example.\n": "[CONTENT]\n{\n \"invoice\": \"True\"\n}\n[/CONTENT]"
}
\ No newline at end of file
diff --git a/tests/metagpt/roles/ci/test_code_interpreter.py b/tests/metagpt/roles/ci/test_code_interpreter.py
index f23292965..9d2f2429b 100644
--- a/tests/metagpt/roles/ci/test_code_interpreter.py
+++ b/tests/metagpt/roles/ci/test_code_interpreter.py
@@ -17,3 +17,7 @@ async def test_code_interpreter(mocker, auto_run):
rsp = await ci.run(requirement)
logger.info(rsp)
assert len(rsp.content) > 0
+
+ finished_tasks = ci.planner.plan.get_finished_tasks()
+ assert len(finished_tasks) > 0
+ assert len(finished_tasks[0].code) > 0 # check one task to see if code is recorded
From 6f31289e7e0efd96a22400b31df8179eab286875 Mon Sep 17 00:00:00 2001
From: better629
Date: Wed, 7 Feb 2024 10:02:15 +0800
Subject: [PATCH 006/104] re-commit zhipu-api due to merge mistake
---
examples/llm_hello_world.py | 8 -------
examples/llm_vision.py | 23 ++++++++++++++++++
metagpt/provider/general_api_requestor.py | 3 ++-
metagpt/provider/zhipuai_api.py | 28 ++++++++++------------
metagpt/utils/token_counter.py | 7 +++---
tests/metagpt/provider/test_zhipuai_api.py | 4 ++--
6 files changed, 43 insertions(+), 30 deletions(-)
create mode 100644 examples/llm_vision.py
diff --git a/examples/llm_hello_world.py b/examples/llm_hello_world.py
index 1d132eb8a..219a303c8 100644
--- a/examples/llm_hello_world.py
+++ b/examples/llm_hello_world.py
@@ -6,11 +6,9 @@
@File : llm_hello_world.py
"""
import asyncio
-from pathlib import Path
from metagpt.llm import LLM
from metagpt.logs import logger
-from metagpt.utils.common import encode_image
async def main():
@@ -29,12 +27,6 @@ async def main():
if hasattr(llm, "completion"):
logger.info(llm.completion(hello_msg))
- # check if the configured llm supports llm-vision capacity. If not, it will throw a error
- invoice_path = Path(__file__).parent.joinpath("..", "tests", "data", "invoices", "invoice-2.png")
- img_base64 = encode_image(invoice_path)
- res = await llm.aask(msg="if this is a invoice, just return True else return False", images=[img_base64])
- assert "true" in res.lower()
-
if __name__ == "__main__":
asyncio.run(main())
diff --git a/examples/llm_vision.py b/examples/llm_vision.py
new file mode 100644
index 000000000..276decd59
--- /dev/null
+++ b/examples/llm_vision.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Desc : example to run the ability of LLM vision
+
+import asyncio
+from pathlib import Path
+
+from metagpt.llm import LLM
+from metagpt.utils.common import encode_image
+
+
+async def main():
+ llm = LLM()
+
+ # check if the configured llm supports llm-vision capacity. If not, it will throw a error
+ invoice_path = Path(__file__).parent.joinpath("..", "tests", "data", "invoices", "invoice-2.png")
+ img_base64 = encode_image(invoice_path)
+ res = await llm.aask(msg="if this is a invoice, just return True else return False", images=[img_base64])
+ assert "true" in res.lower()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/metagpt/provider/general_api_requestor.py b/metagpt/provider/general_api_requestor.py
index 500cd1426..18f4dd909 100644
--- a/metagpt/provider/general_api_requestor.py
+++ b/metagpt/provider/general_api_requestor.py
@@ -60,7 +60,8 @@ class GeneralAPIRequestor(APIRequestor):
self, result: requests.Response, stream: bool
) -> Tuple[Union[bytes, Iterator[Generator]], bytes]:
"""Returns the response(s) and a bool indicating whether it is a stream."""
- if stream and "text/event-stream" in result.headers.get("Content-Type", ""):
+ content_type = result.headers.get("Content-Type", "")
+ if stream and ("text/event-stream" in content_type or "application/x-ndjson" in content_type):
return (
self._interpret_response_line(line, result.status_code, result.headers, stream=True)
for line in parse_stream(result.iter_lines())
diff --git a/metagpt/provider/zhipuai_api.py b/metagpt/provider/zhipuai_api.py
index 9108a1fba..9e8e5fb53 100644
--- a/metagpt/provider/zhipuai_api.py
+++ b/metagpt/provider/zhipuai_api.py
@@ -3,9 +3,8 @@
# @Desc : zhipuai LLM from https://open.bigmodel.cn/dev/api#sdk
from enum import Enum
+from typing import Optional
-import openai
-import zhipuai
from requests import ConnectionError
from tenacity import (
after_log,
@@ -14,6 +13,7 @@ from tenacity import (
stop_after_attempt,
wait_random_exponential,
)
+from zhipuai.types.chat.chat_completion import Completion
from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.logs import log_llm_stream, logger
@@ -21,6 +21,7 @@ from metagpt.provider.base_llm import BaseLLM
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import log_and_reraise
from metagpt.provider.zhipuai.zhipu_model_api import ZhiPuModelAPI
+from metagpt.utils.cost_manager import CostManager
class ZhiPuEvent(Enum):
@@ -38,20 +39,15 @@ class ZhiPuAILLM(BaseLLM):
"""
def __init__(self, config: LLMConfig):
- self.__init_zhipuai(config)
- self.llm = ZhiPuModelAPI
- self.model = "chatglm_turbo" # so far only one model, just use it
- self.use_system_prompt: bool = False # zhipuai has no system prompt when use api
self.config = config
+ self.__init_zhipuai()
+ self.cost_manager: Optional[CostManager] = None
- def __init_zhipuai(self, config: LLMConfig):
- assert config.api_key
- zhipuai.api_key = config.api_key
- # due to use openai sdk, set the api_key but it will't be used.
- # openai.api_key = zhipuai.api_key # due to use openai sdk, set the api_key but it will't be used.
- if config.proxy:
- # FIXME: openai v1.x sdk has no proxy support
- openai.proxy = config.proxy
+ def __init_zhipuai(self):
+ assert self.config.api_key
+ self.api_key = self.config.api_key
+ self.model = self.config.model # so far, it support glm-3-turbo、glm-4
+ self.llm = ZhiPuModelAPI(api_key=self.api_key)
def _const_kwargs(self, messages: list[dict], stream: bool = False) -> dict:
kwargs = {"model": self.model, "messages": messages, "stream": stream, "temperature": 0.3}
@@ -63,12 +59,12 @@ class ZhiPuAILLM(BaseLLM):
try:
prompt_tokens = int(usage.get("prompt_tokens", 0))
completion_tokens = int(usage.get("completion_tokens", 0))
- self.config.cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
+ self.cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
except Exception as e:
logger.error(f"zhipuai updats costs failed! exp: {e}")
def completion(self, messages: list[dict], timeout=3) -> dict:
- resp = self.llm.chat.completions.create(**self._const_kwargs(messages))
+ resp: Completion = self.llm.chat.completions.create(**self._const_kwargs(messages))
usage = resp.usage.model_dump()
self._update_costs(usage)
return resp.model_dump()
diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py
index a0fb3b70d..65f5fe76f 100644
--- a/metagpt/utils/token_counter.py
+++ b/metagpt/utils/token_counter.py
@@ -32,8 +32,8 @@ TOKEN_COSTS = {
"gpt-4-vision-preview": {"prompt": 0.01, "completion": 0.03}, # TODO add extra image price calculator
"gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03},
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
- "glm-3-turbo": {"prompt": 0.0, "completion": 0.0007}, # 128k version, prompt + completion tokens=0.005¥/k-tokens
- "glm-4": {"prompt": 0.0, "completion": 0.014}, # 128k version, prompt + completion tokens=0.1¥/k-tokens
+ "glm-3-turbo": {"prompt": 0.0007, "completion": 0.0007}, # 128k version, prompt + completion tokens=0.005¥/k-tokens
+ "glm-4": {"prompt": 0.014, "completion": 0.014}, # 128k version, prompt + completion tokens=0.1¥/k-tokens
"gemini-pro": {"prompt": 0.00025, "completion": 0.0005},
}
@@ -58,7 +58,8 @@ TOKEN_MAX = {
"gpt-4-vision-preview": 128000,
"gpt-4-1106-vision-preview": 128000,
"text-embedding-ada-002": 8192,
- "chatglm_turbo": 32768,
+ "glm-3-turbo": 128000,
+ "glm-4": 128000,
"gemini-pro": 32768,
}
diff --git a/tests/metagpt/provider/test_zhipuai_api.py b/tests/metagpt/provider/test_zhipuai_api.py
index 798209710..ad2ececa2 100644
--- a/tests/metagpt/provider/test_zhipuai_api.py
+++ b/tests/metagpt/provider/test_zhipuai_api.py
@@ -17,7 +17,7 @@ default_resp = {
}
-async def mock_zhipuai_acreate_stream(**kwargs):
+async def mock_zhipuai_acreate_stream(self, **kwargs):
class MockResponse(object):
async def _aread(self):
class Iterator(object):
@@ -37,7 +37,7 @@ async def mock_zhipuai_acreate_stream(**kwargs):
return MockResponse()
-async def mock_zhipuai_acreate(**kwargs) -> dict:
+async def mock_zhipuai_acreate(self, **kwargs) -> dict:
return default_resp
From f31f371d539ba8a206aad46b015108b786f23da7 Mon Sep 17 00:00:00 2001
From: Zhou Tuo <45249333+FromCSUZhou@users.noreply.github.com>
Date: Wed, 7 Feb 2024 02:49:47 +0000
Subject: [PATCH 007/104] add email_login tool and add email summarization
scenario example
---
examples/email_summary.py | 25 ++++++++++++++
metagpt/tools/libs/__init__.py | 10 +++++-
metagpt/tools/libs/email_login.py | 57 +++++++++++++++++++++++++++++++
metagpt/tools/tool_type.py | 4 +++
4 files changed, 95 insertions(+), 1 deletion(-)
create mode 100644 examples/email_summary.py
create mode 100644 metagpt/tools/libs/email_login.py
diff --git a/examples/email_summary.py b/examples/email_summary.py
new file mode 100644
index 000000000..39c6df1c1
--- /dev/null
+++ b/examples/email_summary.py
@@ -0,0 +1,25 @@
+# -*- encoding: utf-8 -*-
+"""
+@Date : 2024/02/07
+@Author : Tuo Zhou
+@File : email_summary.py
+"""
+
+from metagpt.roles.ci.code_interpreter import CodeInterpreter
+
+
+async def main():
+ # prompt_response = """I will give you your Outlook email account(englishgpt@outlook.com) and password(the outlook_email_password item in the environment variable). You need to find the latest email in my inbox with the sender's suffix @qq.com and reply to him "Thank you! I have received your email~"""""
+ prompt_summary = """I will give you your Outlook email account(englishgpt@outlook.com) and password(outlook_email_password item in the environment variable).
+ Firstly, Please help me present the latest 5 senders and full letter contents.
+ Then, summarize each of the 5 emails into one sentence with Chinese(you can do this by yourself, don't need import other models to do this) and output them in a markdown format."""
+ # ci_response = CodeInterpreter(goal=prompt_response, use_tools=True)
+ ci_summary = CodeInterpreter(goal=prompt_summary, use_tools=True)
+
+ await ci_summary.run(prompt_summary)
+
+
+if __name__ == "__main__":
+ import asyncio
+
+ asyncio.run(main())
diff --git a/metagpt/tools/libs/__init__.py b/metagpt/tools/libs/__init__.py
index c9767c1e5..91596fd3d 100644
--- a/metagpt/tools/libs/__init__.py
+++ b/metagpt/tools/libs/__init__.py
@@ -10,6 +10,14 @@ from metagpt.tools.libs import (
sd_engine,
gpt_v_generator,
web_scraping,
+ email_login,
)
-_ = data_preprocess, feature_engineering, sd_engine, gpt_v_generator, web_scraping # Avoid pre-commit error
+_ = (
+ data_preprocess,
+ feature_engineering,
+ sd_engine,
+ gpt_v_generator,
+ web_scraping,
+ email_login,
+) # Avoid pre-commit error
diff --git a/metagpt/tools/libs/email_login.py b/metagpt/tools/libs/email_login.py
new file mode 100644
index 000000000..946e294eb
--- /dev/null
+++ b/metagpt/tools/libs/email_login.py
@@ -0,0 +1,57 @@
+from imap_tools import MailBox
+
+from metagpt.tools.tool_registry import register_tool
+from metagpt.tools.tool_type import ToolType
+
+
+@register_tool(tool_type=ToolType.EMAIL_LOGIN.type_name)
+def email_login_imap(email_address, email_password):
+ """
+ Use imap_tools package to log in to your email (the email that supports IMAP protocol) to verify and return the account object.
+
+ Args:
+ email_address (str): Email address that needs to be logged in and linked.
+ email_password (str): Password for the email address that needs to be logged in and linked.
+
+ Returns:
+ object: The imap_tools's MailBox object returned after successfully connecting to the mailbox through imap_tools, including various information about this account (email, etc.), or None if login fails.
+ """
+
+ # Define a dictionary mapping email domains to their IMAP server addresses
+ imap_servers = {
+ "outlook.com": "imap-mail.outlook.com", # Outlook
+ "163.com": "imap.163.com", # 163 Mail
+ "qq.com": "imap.qq.com", # QQ Mail
+ "gmail.com": "imap.gmail.com", # Gmail
+ "yahoo.com": "imap.mail.yahoo.com", # Yahoo Mail
+ "icloud.com": "imap.mail.me.com", # iCloud Mail
+ "hotmail.com": "imap-mail.outlook.com", # Hotmail (同 Outlook)
+ "live.com": "imap-mail.outlook.com", # Live (同 Outlook)
+ "sina.com": "imap.sina.com", # Sina Mail
+ "sohu.com": "imap.sohu.com", # Sohu Mail
+ "yahoo.co.jp": "imap.mail.yahoo.co.jp", # Yahoo Mail Japan
+ "yandex.com": "imap.yandex.com", # Yandex Mail
+ "mail.ru": "imap.mail.ru", # Mail.ru
+ "aol.com": "imap.aol.com", # AOL Mail
+ "gmx.com": "imap.gmx.com", # GMX Mail
+ "zoho.com": "imap.zoho.com", # Zoho Mail
+ }
+
+ # Extract the domain from the email address
+ domain = email_address.split("@")[-1]
+
+ # Determine the correct IMAP server
+ imap_server = imap_servers.get(domain)
+
+ if not imap_server:
+ print(f"IMAP server for {domain} not found.")
+ return None
+
+ # Attempt to log in to the email account
+ try:
+ mailbox = MailBox(imap_server).login(email_address, email_password)
+ print("Login successful")
+ return mailbox
+ except Exception as e:
+ print(f"Login failed: {e}")
+ return None
diff --git a/metagpt/tools/tool_type.py b/metagpt/tools/tool_type.py
index 6fa971c56..5a0c66a03 100644
--- a/metagpt/tools/tool_type.py
+++ b/metagpt/tools/tool_type.py
@@ -17,6 +17,10 @@ class ToolType(Enum):
desc="Only for changing value inplace.",
usage_prompt=DATA_PREPROCESS_PROMPT,
)
+ EMAIL_LOGIN = ToolTypeDef(
+ name="email_login",
+ desc="For logging to an email.",
+ )
FEATURE_ENGINEERING = ToolTypeDef(
name="feature_engineering",
desc="Only for creating new columns for input data.",
From 3b4379d12569cae719ff58f6c39208eed05483aa Mon Sep 17 00:00:00 2001
From: voidking
Date: Wed, 7 Feb 2024 10:34:04 +0800
Subject: [PATCH 008/104] chore: move the required playwright to
requirements.txt
---
requirements.txt | 2 +-
setup.py | 1 -
2 files changed, 1 insertion(+), 2 deletions(-)
diff --git a/requirements.txt b/requirements.txt
index 6cb25d52b..804ff4359 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -63,7 +63,7 @@ gitignore-parser==0.1.9
websockets~=12.0
networkx~=3.2.1
google-generativeai==0.3.2
-# playwright==1.40.0 # playwright extras require
+playwright>=1.26 # used at metagpt/tools/libs/web_scraping.py
anytree
ipywidgets==8.1.1
Pillow
diff --git a/setup.py b/setup.py
index b16d978cf..be3956ea4 100644
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,6 @@ requirements = (here / "requirements.txt").read_text(encoding="utf-8").splitline
extras_require = {
- "playwright": ["playwright>=1.26", "beautifulsoup4"],
"selenium": ["selenium>4", "webdriver_manager", "beautifulsoup4"],
"search-google": ["google-api-python-client==2.94.0"],
"search-ddg": ["duckduckgo-search~=4.1.1"],
From 63ab24a77bbb850baed77b515941342d48329aca Mon Sep 17 00:00:00 2001
From: voidking
Date: Wed, 7 Feb 2024 11:54:31 +0800
Subject: [PATCH 009/104] chore: add one more space
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 804ff4359..1426500ce 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -63,7 +63,7 @@ gitignore-parser==0.1.9
websockets~=12.0
networkx~=3.2.1
google-generativeai==0.3.2
-playwright>=1.26 # used at metagpt/tools/libs/web_scraping.py
+playwright>=1.26 # used at metagpt/tools/libs/web_scraping.py
anytree
ipywidgets==8.1.1
Pillow
From f51df40ebb4cd5af6263c613e2ffa898a3ae1ccd Mon Sep 17 00:00:00 2001
From: lidanyang
Date: Wed, 7 Feb 2024 12:02:46 +0800
Subject: [PATCH 010/104] add eda prompt
---
metagpt/prompts/tool_types.py | 7 +++++++
metagpt/tools/tool_type.py | 7 ++++++-
2 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/metagpt/prompts/tool_types.py b/metagpt/prompts/tool_types.py
index f27fbea99..8728d22f3 100644
--- a/metagpt/prompts/tool_types.py
+++ b/metagpt/prompts/tool_types.py
@@ -1,3 +1,10 @@
+# Prompt for using tools of "eda" type
+EDA_PROMPT = """
+The current task is about exploratory data analysis, please note the following:
+- Distinguish column types with `select_dtypes` for tailored analysis and visualization, such as correlation.
+- Remember to `import numpy as np` before using Numpy functions.
+"""
+
# Prompt for using tools of "data_preprocess" type
DATA_PREPROCESS_PROMPT = """
The current task is about data preprocessing, please note the following:
diff --git a/metagpt/tools/tool_type.py b/metagpt/tools/tool_type.py
index 6fa971c56..7f3f132a6 100644
--- a/metagpt/tools/tool_type.py
+++ b/metagpt/tools/tool_type.py
@@ -1,6 +1,7 @@
from enum import Enum
from metagpt.prompts.tool_types import (
+ EDA_PROMPT,
DATA_PREPROCESS_PROMPT,
FEATURE_ENGINEERING_PROMPT,
IMAGE2WEBPAGE_PROMPT,
@@ -11,7 +12,11 @@ from metagpt.tools.tool_data_type import ToolTypeDef
class ToolType(Enum):
- EDA = ToolTypeDef(name="eda", desc="For performing exploratory data analysis")
+ EDA = ToolTypeDef(
+ name="eda",
+ desc="For performing exploratory data analysis",
+ usage_prompt=EDA_PROMPT,
+ )
DATA_PREPROCESS = ToolTypeDef(
name="data_preprocess",
desc="Only for changing value inplace.",
From 18a17bede099a1f188c0f1e55b0665bc65aec43c Mon Sep 17 00:00:00 2001
From: yzlin
Date: Wed, 7 Feb 2024 13:00:31 +0800
Subject: [PATCH 011/104] create ci example folder, mv existing and create new
---
examples/{ => ci}/crawl_webpage.py | 0
examples/ci/data_analysis.py | 14 ++++++++++++++
examples/{ => ci}/imitate_webpage.py | 2 +-
examples/ci/rm_image_background.py | 15 +++++++++++++++
examples/{ => ci}/sd_tool_usage.py | 0
examples/ci/solve_math_problems.py | 17 +++++++++++++++++
6 files changed, 47 insertions(+), 1 deletion(-)
rename examples/{ => ci}/crawl_webpage.py (100%)
create mode 100644 examples/ci/data_analysis.py
rename examples/{ => ci}/imitate_webpage.py (92%)
create mode 100644 examples/ci/rm_image_background.py
rename examples/{ => ci}/sd_tool_usage.py (100%)
create mode 100644 examples/ci/solve_math_problems.py
diff --git a/examples/crawl_webpage.py b/examples/ci/crawl_webpage.py
similarity index 100%
rename from examples/crawl_webpage.py
rename to examples/ci/crawl_webpage.py
diff --git a/examples/ci/data_analysis.py b/examples/ci/data_analysis.py
new file mode 100644
index 000000000..8dc4340d4
--- /dev/null
+++ b/examples/ci/data_analysis.py
@@ -0,0 +1,14 @@
+import asyncio
+
+from metagpt.roles.ci.code_interpreter import CodeInterpreter
+
+
+async def main(requirement: str = ""):
+ code_interpreter = CodeInterpreter(use_tools=False)
+ await code_interpreter.run(requirement)
+
+
+if __name__ == "__main__":
+ requirement = "Run data analysis on sklearn Iris dataset, include a plot"
+
+ asyncio.run(main(requirement))
diff --git a/examples/imitate_webpage.py b/examples/ci/imitate_webpage.py
similarity index 92%
rename from examples/imitate_webpage.py
rename to examples/ci/imitate_webpage.py
index 5075e1e39..6a83d3a33 100644
--- a/examples/imitate_webpage.py
+++ b/examples/ci/imitate_webpage.py
@@ -15,7 +15,7 @@ Firstly, utilize Selenium and WebDriver for rendering.
Secondly, convert image to a webpage including HTML, CSS and JS in one go.
Finally, save webpage in a text file.
Note: All required dependencies and environments have been fully installed and configured."""
- ci = CodeInterpreter(goal=prompt, use_tools=True)
+ ci = CodeInterpreter(use_tools=True)
await ci.run(prompt)
diff --git a/examples/ci/rm_image_background.py b/examples/ci/rm_image_background.py
new file mode 100644
index 000000000..83dbbdc77
--- /dev/null
+++ b/examples/ci/rm_image_background.py
@@ -0,0 +1,15 @@
+import asyncio
+
+from metagpt.roles.ci.code_interpreter import CodeInterpreter
+
+
+async def main(requirement: str = ""):
+ code_interpreter = CodeInterpreter(use_tools=False)
+ await code_interpreter.run(requirement)
+
+
+if __name__ == "__main__":
+ image_path = "/your/path/to/the/image.jpeg"
+ save_path = "/your/intended/save/path/for/image_rm_bg.png"
+ requirement = f"This is a image, you need to use python toolkit rembg to remove the background of the image and save the result. image path:{image_path}; save path:{save_path}."
+ asyncio.run(main(requirement))
diff --git a/examples/sd_tool_usage.py b/examples/ci/sd_tool_usage.py
similarity index 100%
rename from examples/sd_tool_usage.py
rename to examples/ci/sd_tool_usage.py
diff --git a/examples/ci/solve_math_problems.py b/examples/ci/solve_math_problems.py
new file mode 100644
index 000000000..5bf06b9d8
--- /dev/null
+++ b/examples/ci/solve_math_problems.py
@@ -0,0 +1,17 @@
+import asyncio
+
+from metagpt.roles.ci.code_interpreter import CodeInterpreter
+
+
+async def main(requirement: str = ""):
+ code_interpreter = CodeInterpreter(use_tools=False, goal=requirement)
+ await code_interpreter.run(requirement)
+
+
+if __name__ == "__main__":
+ problem = "At a school, all 60 students play on at least one of three teams: Basketball, Soccer, and Mathletics. 8 students play all three sports, half the students play basketball, and the ratio of the size of the math team to the size of the basketball team to the size of the soccer team is $4:3:2$. How many students at the school play on exactly two teams?"
+ requirement = (
+ f"This is a math problem:{problem}. You can analyze and solve it step by step or use Python code to solve it."
+ )
+
+ asyncio.run(main(requirement))
From 9ed352b2cedf74a44202c1711dadbe05a14e86ae Mon Sep 17 00:00:00 2001
From: lidanyang
Date: Wed, 7 Feb 2024 14:56:19 +0800
Subject: [PATCH 012/104] add example for ml_engineer_with_tools
---
examples/ml_engineer_with_tools.py | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
create mode 100644 examples/ml_engineer_with_tools.py
diff --git a/examples/ml_engineer_with_tools.py b/examples/ml_engineer_with_tools.py
new file mode 100644
index 000000000..1c90f2946
--- /dev/null
+++ b/examples/ml_engineer_with_tools.py
@@ -0,0 +1,16 @@
+import asyncio
+
+from metagpt.roles.ci.ml_engineer import MLEngineer
+
+
+async def main(requirement: str, auto_run: bool = True, use_tools: bool = True):
+ role = MLEngineer(goal=requirement, auto_run=auto_run, use_tools=use_tools)
+ await role.run(requirement)
+
+
+if __name__ == "__main__":
+ data_path = "your_path_to_icr/icr-identify-age-related-conditions" # 替换 'your_path_to_icr' 为实际数据存放的路径
+ train_path = f"{data_path}/your_train_data.csv" # 替换 'your_train_data.csv' 为你的训练数据文件名
+ eval_path = f"{data_path}/your_eval_data.csv" # 替换 'your_eval_data.csv' 为你的评估数据文件名
+ requirement = f"This is a medical dataset with over fifty anonymized health characteristics linked to three age-related conditions. Your goal is to predict whether a subject has or has not been diagnosed with one of these conditions.The target column is Class. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report f1 score on the eval data. Train data path: {train_path}, eval data path:{eval_path}."
+ asyncio.run(main(requirement))
From d29ab799bf7de735a76754e85d662cbbd8e5525e Mon Sep 17 00:00:00 2001
From: yzlin
Date: Wed, 7 Feb 2024 15:55:20 +0800
Subject: [PATCH 013/104] change math problems
---
examples/ci/solve_math_problems.py | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/examples/ci/solve_math_problems.py b/examples/ci/solve_math_problems.py
index 5bf06b9d8..8c660975b 100644
--- a/examples/ci/solve_math_problems.py
+++ b/examples/ci/solve_math_problems.py
@@ -4,14 +4,10 @@ from metagpt.roles.ci.code_interpreter import CodeInterpreter
async def main(requirement: str = ""):
- code_interpreter = CodeInterpreter(use_tools=False, goal=requirement)
+ code_interpreter = CodeInterpreter(use_tools=False)
await code_interpreter.run(requirement)
if __name__ == "__main__":
- problem = "At a school, all 60 students play on at least one of three teams: Basketball, Soccer, and Mathletics. 8 students play all three sports, half the students play basketball, and the ratio of the size of the math team to the size of the basketball team to the size of the soccer team is $4:3:2$. How many students at the school play on exactly two teams?"
- requirement = (
- f"This is a math problem:{problem}. You can analyze and solve it step by step or use Python code to solve it."
- )
-
+ requirement = "Solve this math problem: The greatest common divisor of positive integers m and n is 6. The least common multiple of m and n is 126. What is the least possible value of m + n?"
asyncio.run(main(requirement))
From d180d3912e33aca2c5968f4a80c6a94b2189d020 Mon Sep 17 00:00:00 2001
From: better629
Date: Wed, 7 Feb 2024 15:56:01 +0800
Subject: [PATCH 014/104] add qianfan api support
---
examples/llm_hello_world.py | 21 +++--
metagpt/configs/llm_config.py | 8 +-
metagpt/provider/__init__.py | 2 +
metagpt/provider/base_llm.py | 16 ++++
metagpt/provider/qianfan_api.py | 151 ++++++++++++++++++++++++++++++++
metagpt/utils/cost_manager.py | 4 +-
metagpt/utils/token_counter.py | 53 +++++++++++
requirements.txt | 1 +
8 files changed, 245 insertions(+), 11 deletions(-)
create mode 100644 metagpt/provider/qianfan_api.py
diff --git a/examples/llm_hello_world.py b/examples/llm_hello_world.py
index 1d132eb8a..e22edbdf2 100644
--- a/examples/llm_hello_world.py
+++ b/examples/llm_hello_world.py
@@ -6,16 +6,25 @@
@File : llm_hello_world.py
"""
import asyncio
-from pathlib import Path
from metagpt.llm import LLM
from metagpt.logs import logger
-from metagpt.utils.common import encode_image
async def main():
llm = LLM()
- logger.info(await llm.aask("hello world"))
+ # llm type check
+ id_ques = "what's your name"
+ logger.info(f"{id_ques}: ")
+ logger.info(await llm.aask(id_ques))
+ logger.info("\n\n")
+
+ logger.info(
+ await llm.aask(
+ "who are you", system_msgs=["act as a robot, answer 'I'am robot' if the question is 'who are you'"]
+ )
+ )
+
logger.info(await llm.aask_batch(["hi", "write python hello world."]))
hello_msg = [{"role": "user", "content": "count from 1 to 10. split by newline."}]
@@ -29,12 +38,6 @@ async def main():
if hasattr(llm, "completion"):
logger.info(llm.completion(hello_msg))
- # check if the configured llm supports llm-vision capacity. If not, it will throw a error
- invoice_path = Path(__file__).parent.joinpath("..", "tests", "data", "invoices", "invoice-2.png")
- img_base64 = encode_image(invoice_path)
- res = await llm.aask(msg="if this is a invoice, just return True else return False", images=[img_base64])
- assert "true" in res.lower()
-
if __name__ == "__main__":
asyncio.run(main())
diff --git a/metagpt/configs/llm_config.py b/metagpt/configs/llm_config.py
index fb923d3e4..1b05b5270 100644
--- a/metagpt/configs/llm_config.py
+++ b/metagpt/configs/llm_config.py
@@ -24,6 +24,7 @@ class LLMType(Enum):
METAGPT = "metagpt"
AZURE = "azure"
OLLAMA = "ollama"
+ QIANFAN = "qianfan" # Baidu BCE
def __missing__(self, key):
return self.OPENAI
@@ -36,13 +37,18 @@ class LLMConfig(YamlModel):
Optional Fields in pydantic: https://docs.pydantic.dev/latest/migration/#required-optional-and-nullable-fields
"""
- api_key: str
+ api_key: str = "sk-"
api_type: LLMType = LLMType.OPENAI
base_url: str = "https://api.openai.com/v1"
api_version: Optional[str] = None
model: Optional[str] = None # also stands for DEPLOYMENT_NAME
+ # For Cloud Service Provider like Baidu/ Alibaba
+ access_key: Optional[str] = None
+ secret_key: Optional[str] = None
+ endpoint: Optional[str] = None # for self-deployed model on the cloud
+
# For Spark(Xunfei), maybe remove later
app_id: Optional[str] = None
api_secret: Optional[str] = None
diff --git a/metagpt/provider/__init__.py b/metagpt/provider/__init__.py
index 675734811..8c0aab836 100644
--- a/metagpt/provider/__init__.py
+++ b/metagpt/provider/__init__.py
@@ -16,6 +16,7 @@ from metagpt.provider.azure_openai_api import AzureOpenAILLM
from metagpt.provider.metagpt_api import MetaGPTLLM
from metagpt.provider.human_provider import HumanProvider
from metagpt.provider.spark_api import SparkLLM
+from metagpt.provider.qianfan_api import QianFanLLM
__all__ = [
"FireworksLLM",
@@ -28,4 +29,5 @@ __all__ = [
"OllamaLLM",
"HumanProvider",
"SparkLLM",
+ "QianFanLLM",
]
diff --git a/metagpt/provider/base_llm.py b/metagpt/provider/base_llm.py
index b144471b5..d3d9c829b 100644
--- a/metagpt/provider/base_llm.py
+++ b/metagpt/provider/base_llm.py
@@ -67,6 +67,22 @@ class BaseLLM(ABC):
def _default_system_msg(self):
return self._system_msg(self.system_prompt)
+ def _update_costs(self, usage: dict, model: str = None, local_calc_usage: bool = True):
+ """update each request's token cost
+ Args:
+ model (str): model name or in some scenarios called endpoint
+ local_calc_usage (bool): some models don't calculate usage, it will overwrite calc_usage
+ """
+ calc_usage = self.config.calc_usage and local_calc_usage
+ model = model if model else self.model
+ if calc_usage and self.cost_manager:
+ try:
+ prompt_tokens = int(usage.get("prompt_tokens", 0))
+ completion_tokens = int(usage.get("completion_tokens", 0))
+ self.cost_manager.update_cost(prompt_tokens, completion_tokens, model)
+ except Exception as e:
+ logger.error(f"{self.__class__.__name__} updats costs failed! exp: {e}")
+
async def aask(
self,
msg: str,
diff --git a/metagpt/provider/qianfan_api.py b/metagpt/provider/qianfan_api.py
new file mode 100644
index 000000000..180935e61
--- /dev/null
+++ b/metagpt/provider/qianfan_api.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Desc : llm api of qianfan from Baidu, supports ERNIE(wen xin yi yan) and opensource models
+import copy
+import os
+
+import qianfan
+from qianfan.resources.typing import JsonBody
+from tenacity import (
+ after_log,
+ retry,
+ retry_if_exception_type,
+ stop_after_attempt,
+ wait_random_exponential,
+)
+
+from metagpt.configs.llm_config import LLMConfig, LLMType
+from metagpt.logs import log_llm_stream, logger
+from metagpt.provider.base_llm import BaseLLM
+from metagpt.provider.llm_provider_registry import register_provider
+from metagpt.provider.openai_api import log_and_reraise
+from metagpt.utils.cost_manager import CostManager
+from metagpt.utils.token_counter import (
+ QianFan_EndPoint_TOKEN_COSTS,
+ QianFan_MODEL_TOKEN_COSTS,
+)
+
+
+@register_provider(LLMType.QIANFAN)
+class QianFanLLM(BaseLLM):
+ """
+ Refs
+ Auth: https://cloud.baidu.com/doc/WENXINWORKSHOP/s/3lmokh7n6#%E3%80%90%E6%8E%A8%E8%8D%90%E3%80%91%E4%BD%BF%E7%94%A8%E5%AE%89%E5%85%A8%E8%AE%A4%E8%AF%81aksk%E9%89%B4%E6%9D%83%E8%B0%83%E7%94%A8%E6%B5%81%E7%A8%8B
+ Token Price: https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7#tokens%E5%90%8E%E4%BB%98%E8%B4%B9
+ Models: https://cloud.baidu.com/doc/WENXINWORKSHOP/s/wlmhm7vuo#%E5%AF%B9%E8%AF%9Dchat
+ https://cloud.baidu.com/doc/WENXINWORKSHOP/s/xlmokikxe#%E6%94%AF%E6%8C%81%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8
+ """
+
+ def __init__(self, config: LLMConfig):
+ self.config = config
+ self.use_system_prompt = False # only some ERNIE-x related models support system_prompt
+ self.__init_qianfan()
+ self.cost_manager = CostManager(token_costs=self.token_costs)
+
+ def __init_qianfan(self):
+ if self.config.access_key and self.config.secret_key:
+ # for system level auth, use access_key and secret_key, recommended by official
+ # set environment variable due to official recommendation
+ os.environ.setdefault("QIANFAN_ACCESS_KEY", self.config.access_key)
+ os.environ.setdefault("QIANFAN_SECRET_KEY", self.config.secret_key)
+ elif self.config.api_key and self.config.secret_key:
+ # for application level auth, use api_key and secret_key
+ # set environment variable due to official recommendation
+ os.environ.setdefault("QIANFAN_AK", self.config.api_key)
+ os.environ.setdefault("QIANFAN_SK", self.config.secret_key)
+ else:
+ raise ValueError("Set the `access_key`&`secret_key` or `api_key`&`secret_key` first")
+
+ support_system_pairs = [
+ ("ERNIE-Bot-4", "completions_pro"), # (model, corresponding-endpoint)
+ ("ERNIE-Bot-8k", "ernie_bot_8k"),
+ ("ERNIE-Bot", "completions"),
+ ("ERNIE-Bot-turbo", "eb-instant"),
+ ("ERNIE-Speed", "ernie_speed"),
+ ("EB-turbo-AppBuilder", "ai_apaas"),
+ ]
+ if self.config.model in [pair[0] for pair in support_system_pairs]:
+ # only some ERNIE models support
+ self.use_system_prompt = True
+ if self.config.endpoint in [pair[1] for pair in support_system_pairs]:
+ self.use_system_prompt = True
+
+ assert not (self.config.model and self.config.endpoint), "Only set `model` or `endpoint` in the config"
+ assert self.config.model or self.config.endpoint, "Should set one of `model` or `endpoint` in the config"
+
+ self.token_costs = copy.deepcopy(QianFan_MODEL_TOKEN_COSTS)
+ self.token_costs.update(QianFan_EndPoint_TOKEN_COSTS)
+
+ # self deployed model on the cloud not to calculate usage, it charges resource pool rental fee
+ self.calc_usage = self.config.calc_usage and self.config.endpoint is None
+ self.client = qianfan.ChatCompletion()
+
+ def _const_kwargs(self, messages: list[dict], stream: bool = False) -> dict:
+ kwargs = {
+ "messages": messages,
+ "stream": stream,
+ }
+ if self.config.temperature > 0:
+ # different model has default temperature. only set when it's specified.
+ kwargs["temperature"] = self.config.temperature
+ if self.config.endpoint:
+ kwargs["endpoint"] = self.config.endpoint
+ elif self.config.model:
+ kwargs["model"] = self.config.model
+
+ if self.use_system_prompt:
+ # if the model support system prompt, extract and pass it
+ if messages[0]["role"] == "system":
+ kwargs["messages"] = messages[1:]
+ kwargs["system"] = messages[0]["content"] # set system prompt here
+ return kwargs
+
+ def _update_costs(self, usage: dict):
+ """update each request's token cost"""
+ model_or_endpoint = self.config.model if self.config.model else self.config.endpoint
+ local_calc_usage = True if model_or_endpoint in self.token_costs else False
+ super()._update_costs(usage, model_or_endpoint, local_calc_usage)
+
+ def get_choice_text(self, resp: JsonBody) -> str:
+ return resp.get("result", "")
+
+ def completion(self, messages: list[dict]) -> JsonBody:
+ resp = self.client.do(**self._const_kwargs(messages=messages, stream=False))
+ self._update_costs(resp.body.get("usage", {}))
+ return resp.body
+
+ async def _achat_completion(self, messages: list[dict]) -> JsonBody:
+ resp = await self.client.ado(**self._const_kwargs(messages=messages, stream=False))
+ self._update_costs(resp.body.get("usage", {}))
+ return resp.body
+
+ async def acompletion(self, messages: list[dict], timeout=3) -> JsonBody:
+ return await self._achat_completion(messages)
+
+ async def _achat_completion_stream(self, messages: list[dict]) -> str:
+ resp = await self.client.ado(**self._const_kwargs(messages=messages, stream=True))
+ collected_content = []
+ usage = {}
+ async for chunk in resp:
+ content = chunk.body.get("result", "")
+ usage = chunk.body.get("usage", {})
+ log_llm_stream(content)
+ collected_content.append(content)
+ log_llm_stream("\n")
+
+ self._update_costs(usage)
+ full_content = "".join(collected_content)
+ return full_content
+
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_random_exponential(min=1, max=60),
+ after=after_log(logger, logger.level("WARNING").name),
+ retry=retry_if_exception_type(ConnectionError),
+ retry_error_callback=log_and_reraise,
+ )
+ async def acompletion_text(self, messages: list[dict], stream=False, timeout: int = 3) -> str:
+ if stream:
+ return await self._achat_completion_stream(messages)
+ resp = await self._achat_completion(messages)
+ return self.get_choice_text(resp)
diff --git a/metagpt/utils/cost_manager.py b/metagpt/utils/cost_manager.py
index 7bf5154b6..e1c0f415b 100644
--- a/metagpt/utils/cost_manager.py
+++ b/metagpt/utils/cost_manager.py
@@ -29,6 +29,7 @@ class CostManager(BaseModel):
total_budget: float = 0
max_budget: float = 10.0
total_cost: float = 0
+ token_costs: dict[str, dict[str, float]] = TOKEN_COSTS
def update_cost(self, prompt_tokens, completion_tokens, model):
"""
@@ -42,7 +43,8 @@ class CostManager(BaseModel):
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
cost = (
- prompt_tokens * TOKEN_COSTS[model]["prompt"] + completion_tokens * TOKEN_COSTS[model]["completion"]
+ prompt_tokens * self.token_costs[model]["prompt"]
+ + completion_tokens * self.token_costs[model]["completion"]
) / 1000
self.total_cost += cost
logger.info(
diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py
index a0fb3b70d..b69ec73d3 100644
--- a/metagpt/utils/token_counter.py
+++ b/metagpt/utils/token_counter.py
@@ -38,6 +38,59 @@ TOKEN_COSTS = {
}
+"""
+QianFan Token Price https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7#tokens%E5%90%8E%E4%BB%98%E8%B4%B9
+Due to QianFan has multi price strategies, we unify `Tokens post-payment` as a statistical method.
+"""
+QianFan_MODEL_TOKEN_COSTS = {
+ "ERNIE-Bot-4": {"prompt": 0.017, "completion": 0.017},
+ "ERNIE-Bot-8k": {"prompt": 0.0034, "completion": 0.0067},
+ "ERNIE-Bot": {"prompt": 0.017, "completion": 0.017},
+ "ERNIE-Bot-turbo": {"prompt": 0.0011, "completion": 0.0011},
+ "EB-turbo-AppBuilder": {"prompt": 0.0011, "completion": 0.0011},
+ "ERNIE-Speed": {"prompt": 0.00056, "completion": 0.0011},
+ "BLOOMZ-7B": {"prompt": 0.00056, "completion": 0.00056},
+ "Llama-2-7B-Chat": {"prompt": 0.00056, "completion": 0.00056},
+ "Llama-2-13B-Chat": {"prompt": 0.00084, "completion": 0.00084},
+ "Llama-2-70B-Chat": {"prompt": 0.0049, "completion": 0.0049},
+ "ChatGLM2-6B-32K": {"prompt": 0.00056, "completion": 0.00056},
+ "AquilaChat-7B": {"prompt": 0.00056, "completion": 0.00056},
+ "Mixtral-8x7B-Instruct": {"prompt": 0.0049, "completion": 0.0049},
+ "SQLCoder-7B": {"prompt": 0.00056, "completion": 0.00056},
+ "CodeLlama-7B-Instruct": {"prompt": 0.00056, "completion": 0.00056},
+ "XuanYuan-70B-Chat-4bit": {"prompt": 0.0049, "completion": 0.0049},
+ "Qianfan-BLOOMZ-7B-compressed": {"prompt": 0.00056, "completion": 0.00056},
+ "Qianfan-Chinese-Llama-2-7B": {"prompt": 0.00056, "completion": 0.00056},
+ "Qianfan-Chinese-Llama-2-13B": {"prompt": 0.00084, "completion": 0.00084},
+ "ChatLaw": {"prompt": 0.0011, "completion": 0.0011},
+ "Yi-34B-Chat": {"prompt": 0.0, "completion": 0.0},
+}
+
+QianFan_EndPoint_TOKEN_COSTS = {
+ "completions_pro": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot-4"],
+ "ernie_bot_8k": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot-8k"],
+ "completions": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot"],
+ "eb-instant": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot-turbo"],
+ "ai_apaas": QianFan_MODEL_TOKEN_COSTS["EB-turbo-AppBuilder"],
+ "ernie_speed": QianFan_MODEL_TOKEN_COSTS["ERNIE-Speed"],
+ "bloomz_7b1": QianFan_MODEL_TOKEN_COSTS["BLOOMZ-7B"],
+ "llama_2_7b": QianFan_MODEL_TOKEN_COSTS["Llama-2-7B-Chat"],
+ "llama_2_13b": QianFan_MODEL_TOKEN_COSTS["Llama-2-13B-Chat"],
+ "llama_2_70b": QianFan_MODEL_TOKEN_COSTS["Llama-2-70B-Chat"],
+ "chatglm2_6b_32k": QianFan_MODEL_TOKEN_COSTS["ChatGLM2-6B-32K"],
+ "aquilachat_7b": QianFan_MODEL_TOKEN_COSTS["AquilaChat-7B"],
+ "mixtral_8x7b_instruct": QianFan_MODEL_TOKEN_COSTS["Mixtral-8x7B-Instruct"],
+ "sqlcoder_7b": QianFan_MODEL_TOKEN_COSTS["SQLCoder-7B"],
+ "codellama_7b_instruct": QianFan_MODEL_TOKEN_COSTS["CodeLlama-7B-Instruct"],
+ "xuanyuan_70b_chat": QianFan_MODEL_TOKEN_COSTS["XuanYuan-70B-Chat-4bit"],
+ "qianfan_bloomz_7b_compressed": QianFan_MODEL_TOKEN_COSTS["Qianfan-BLOOMZ-7B-compressed"],
+ "qianfan_chinese_llama_2_7b": QianFan_MODEL_TOKEN_COSTS["Qianfan-Chinese-Llama-2-7B"],
+ "qianfan_chinese_llama_2_13b": QianFan_MODEL_TOKEN_COSTS["Qianfan-Chinese-Llama-2-13B"],
+ "chatlaw": QianFan_MODEL_TOKEN_COSTS["ChatLaw"],
+ "yi_34b_chat": QianFan_MODEL_TOKEN_COSTS["Yi-34B-Chat"],
+}
+
+
TOKEN_MAX = {
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
diff --git a/requirements.txt b/requirements.txt
index 6cb25d52b..c893bd713 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -67,3 +67,4 @@ google-generativeai==0.3.2
anytree
ipywidgets==8.1.1
Pillow
+qianfan==0.3.1
From 4a991b4cd4eb0bdfb415b8ffb8bb02c3d33f10e2 Mon Sep 17 00:00:00 2001
From: Zhou Tuo <45249333+FromCSUZhou@users.noreply.github.com>
Date: Wed, 7 Feb 2024 07:59:54 +0000
Subject: [PATCH 015/104] add email_login unit test
---
examples/email_summary.py | 11 +++--
metagpt/tools/libs/email_login.py | 45 +++++++++---------
tests/metagpt/tools/libs/test_email_login.py | 50 ++++++++++++++++++++
3 files changed, 79 insertions(+), 27 deletions(-)
create mode 100644 tests/metagpt/tools/libs/test_email_login.py
diff --git a/examples/email_summary.py b/examples/email_summary.py
index 39c6df1c1..0862991da 100644
--- a/examples/email_summary.py
+++ b/examples/email_summary.py
@@ -9,14 +9,15 @@ from metagpt.roles.ci.code_interpreter import CodeInterpreter
async def main():
- # prompt_response = """I will give you your Outlook email account(englishgpt@outlook.com) and password(the outlook_email_password item in the environment variable). You need to find the latest email in my inbox with the sender's suffix @qq.com and reply to him "Thank you! I have received your email~"""""
- prompt_summary = """I will give you your Outlook email account(englishgpt@outlook.com) and password(outlook_email_password item in the environment variable).
+ # For email response prompt
+ # prompt = """I will give you your Outlook email account(englishgpt@outlook.com) and password(the outlook_email_password item in the environment variable). You need to find the latest email in my inbox with the sender's suffix @qq.com and reply to him "Thank you! I have received your email~"""""
+ prompt = """I will give you your Outlook email account(englishgpt@outlook.com) and password(outlook_email_password item in the environment variable).
Firstly, Please help me present the latest 5 senders and full letter contents.
Then, summarize each of the 5 emails into one sentence with Chinese(you can do this by yourself, don't need import other models to do this) and output them in a markdown format."""
- # ci_response = CodeInterpreter(goal=prompt_response, use_tools=True)
- ci_summary = CodeInterpreter(goal=prompt_summary, use_tools=True)
- await ci_summary.run(prompt_summary)
+ ci = CodeInterpreter(use_tools=True)
+
+ await ci.run(prompt)
if __name__ == "__main__":
diff --git a/metagpt/tools/libs/email_login.py b/metagpt/tools/libs/email_login.py
index 946e294eb..77772e15a 100644
--- a/metagpt/tools/libs/email_login.py
+++ b/metagpt/tools/libs/email_login.py
@@ -1,8 +1,29 @@
from imap_tools import MailBox
+from metagpt.logs import logger
from metagpt.tools.tool_registry import register_tool
from metagpt.tools.tool_type import ToolType
+# Define a dictionary mapping email domains to their IMAP server addresses
+IMAP_SERVERS = {
+ "outlook.com": "imap-mail.outlook.com", # Outlook
+ "163.com": "imap.163.com", # 163 Mail
+ "qq.com": "imap.qq.com", # QQ Mail
+ "gmail.com": "imap.gmail.com", # Gmail
+ "yahoo.com": "imap.mail.yahoo.com", # Yahoo Mail
+ "icloud.com": "imap.mail.me.com", # iCloud Mail
+ "hotmail.com": "imap-mail.outlook.com", # Hotmail (同 Outlook)
+ "live.com": "imap-mail.outlook.com", # Live (同 Outlook)
+ "sina.com": "imap.sina.com", # Sina Mail
+ "sohu.com": "imap.sohu.com", # Sohu Mail
+ "yahoo.co.jp": "imap.mail.yahoo.co.jp", # Yahoo Mail Japan
+ "yandex.com": "imap.yandex.com", # Yandex Mail
+ "mail.ru": "imap.mail.ru", # Mail.ru
+ "aol.com": "imap.aol.com", # AOL Mail
+ "gmx.com": "imap.gmx.com", # GMX Mail
+ "zoho.com": "imap.zoho.com", # Zoho Mail
+}
+
@register_tool(tool_type=ToolType.EMAIL_LOGIN.type_name)
def email_login_imap(email_address, email_password):
@@ -17,34 +38,14 @@ def email_login_imap(email_address, email_password):
object: The imap_tools's MailBox object returned after successfully connecting to the mailbox through imap_tools, including various information about this account (email, etc.), or None if login fails.
"""
- # Define a dictionary mapping email domains to their IMAP server addresses
- imap_servers = {
- "outlook.com": "imap-mail.outlook.com", # Outlook
- "163.com": "imap.163.com", # 163 Mail
- "qq.com": "imap.qq.com", # QQ Mail
- "gmail.com": "imap.gmail.com", # Gmail
- "yahoo.com": "imap.mail.yahoo.com", # Yahoo Mail
- "icloud.com": "imap.mail.me.com", # iCloud Mail
- "hotmail.com": "imap-mail.outlook.com", # Hotmail (同 Outlook)
- "live.com": "imap-mail.outlook.com", # Live (同 Outlook)
- "sina.com": "imap.sina.com", # Sina Mail
- "sohu.com": "imap.sohu.com", # Sohu Mail
- "yahoo.co.jp": "imap.mail.yahoo.co.jp", # Yahoo Mail Japan
- "yandex.com": "imap.yandex.com", # Yandex Mail
- "mail.ru": "imap.mail.ru", # Mail.ru
- "aol.com": "imap.aol.com", # AOL Mail
- "gmx.com": "imap.gmx.com", # GMX Mail
- "zoho.com": "imap.zoho.com", # Zoho Mail
- }
-
# Extract the domain from the email address
domain = email_address.split("@")[-1]
# Determine the correct IMAP server
- imap_server = imap_servers.get(domain)
+ imap_server = IMAP_SERVERS.get(domain)
if not imap_server:
- print(f"IMAP server for {domain} not found.")
+ logger.error(f"IMAP server for {domain} not found.")
return None
# Attempt to log in to the email account
diff --git a/tests/metagpt/tools/libs/test_email_login.py b/tests/metagpt/tools/libs/test_email_login.py
new file mode 100644
index 000000000..fd8d41506
--- /dev/null
+++ b/tests/metagpt/tools/libs/test_email_login.py
@@ -0,0 +1,50 @@
+import os
+from unittest.mock import Mock, patch
+
+import pytest
+
+from metagpt.tools.libs.email_login import email_login_imap
+
+# Configuration for the test IMAP servers
+TEST_IMAP_SERVERS = {"outlook.com": "imap-mail.outlook.com"}
+
+# Setup correct and incorrect email information
+correct_email_address = "englishgpt@outlook.com"
+correct_email_password = os.environ.get("outlook_email_password")
+incorrect_email_address = "test@unknown.com"
+incorrect_email_password = "incorrect_password"
+
+
+@pytest.fixture
+def imap_server_setup():
+ # Use patch to mock the behavior of MailBox from the correct module path
+ with patch("metagpt.tools.libs.email_login.MailBox") as mock_mailbox:
+ # Setup for successful login
+ mock_mail_instance = Mock()
+ mock_mail_instance.login.return_value = mock_mail_instance
+ mock_mailbox.return_value = mock_mail_instance
+ yield mock_mail_instance
+
+
+def test_email_login_imap_success(imap_server_setup):
+ # Mock successful login
+ mailbox = email_login_imap(correct_email_address, correct_email_password)
+ assert mailbox is not None
+ # Correctly assert that the login method of the MailBox mock was called with the correct arguments
+ imap_server_setup.login.assert_called_with(correct_email_address, correct_email_password)
+
+
+def test_email_login_imap_failure_due_to_incorrect_server(imap_server_setup):
+ # Attempt to login with an incorrect server
+ mailbox = email_login_imap(incorrect_email_address, incorrect_email_password)
+ assert mailbox is None
+
+
+def test_email_login_imap_failure_due_to_wrong_credentials(imap_server_setup):
+ # Configure mock to throw an exception to simulate login failure due to incorrect credentials
+ imap_server_setup.login.side_effect = Exception("Login failed")
+ # Attempt to login which should simulate a failure
+ mailbox = email_login_imap(correct_email_address, incorrect_email_password)
+ assert mailbox is None
+ # Verify that the login method was called with the expected arguments
+ imap_server_setup.login.assert_called_with(correct_email_address, incorrect_email_password)
From 15a9c5e94135992e9854a57d14d581040879386f Mon Sep 17 00:00:00 2001
From: better629
Date: Wed, 7 Feb 2024 16:16:14 +0800
Subject: [PATCH 016/104] simplify _update_costs and related code
---
metagpt/provider/base_llm.py | 13 ++++++++++---
metagpt/provider/fireworks_api.py | 15 ++-------------
metagpt/provider/google_gemini_api.py | 10 ----------
metagpt/provider/ollama_api.py | 10 ----------
metagpt/provider/open_llm_api.py | 13 +------------
metagpt/provider/openai_api.py | 17 ++---------------
metagpt/provider/qianfan_api.py | 8 ++++----
metagpt/provider/zhipuai_api.py | 10 ----------
8 files changed, 19 insertions(+), 77 deletions(-)
diff --git a/metagpt/provider/base_llm.py b/metagpt/provider/base_llm.py
index d3d9c829b..2f57b15aa 100644
--- a/metagpt/provider/base_llm.py
+++ b/metagpt/provider/base_llm.py
@@ -11,11 +11,12 @@ from abc import ABC, abstractmethod
from typing import Optional, Union
from openai import AsyncOpenAI
+from pydantic import BaseModel
from metagpt.configs.llm_config import LLMConfig
from metagpt.logs import logger
from metagpt.schema import Message
-from metagpt.utils.cost_manager import CostManager
+from metagpt.utils.cost_manager import CostManager, Costs
class BaseLLM(ABC):
@@ -67,14 +68,15 @@ class BaseLLM(ABC):
def _default_system_msg(self):
return self._system_msg(self.system_prompt)
- def _update_costs(self, usage: dict, model: str = None, local_calc_usage: bool = True):
+ def _update_costs(self, usage: Union[dict, BaseModel], model: str = None, local_calc_usage: bool = True):
"""update each request's token cost
Args:
model (str): model name or in some scenarios called endpoint
- local_calc_usage (bool): some models don't calculate usage, it will overwrite calc_usage
+ local_calc_usage (bool): some models don't calculate usage, it will overwrite LLMConfig.calc_usage
"""
calc_usage = self.config.calc_usage and local_calc_usage
model = model if model else self.model
+ usage = usage.model_dump() if isinstance(usage, BaseModel) else usage
if calc_usage and self.cost_manager:
try:
prompt_tokens = int(usage.get("prompt_tokens", 0))
@@ -83,6 +85,11 @@ class BaseLLM(ABC):
except Exception as e:
logger.error(f"{self.__class__.__name__} updats costs failed! exp: {e}")
+ def get_costs(self) -> Costs:
+ if not self.cost_manager:
+ return Costs(0, 0, 0, 0)
+ return self.cost_manager.get_costs()
+
async def aask(
self,
msg: str,
diff --git a/metagpt/provider/fireworks_api.py b/metagpt/provider/fireworks_api.py
index d56453a85..e62a7066e 100644
--- a/metagpt/provider/fireworks_api.py
+++ b/metagpt/provider/fireworks_api.py
@@ -19,7 +19,7 @@ from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.logs import logger
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import OpenAILLM, log_and_reraise
-from metagpt.utils.cost_manager import CostManager, Costs
+from metagpt.utils.cost_manager import CostManager
MODEL_GRADE_TOKEN_COSTS = {
"-1": {"prompt": 0.0, "completion": 0.0}, # abnormal condition
@@ -81,17 +81,6 @@ class FireworksLLM(OpenAILLM):
kwargs = dict(api_key=self.config.api_key, base_url=self.config.base_url)
return kwargs
- def _update_costs(self, usage: CompletionUsage):
- if self.config.calc_usage and usage:
- try:
- # use FireworksCostManager not context.cost_manager
- self.cost_manager.update_cost(usage.prompt_tokens, usage.completion_tokens, self.model)
- except Exception as e:
- logger.error(f"updating costs failed!, exp: {e}")
-
- def get_costs(self) -> Costs:
- return self.cost_manager.get_costs()
-
async def _achat_completion_stream(self, messages: list[dict], timeout=3) -> str:
response: AsyncStream[ChatCompletionChunk] = await self.aclient.chat.completions.create(
**self._cons_kwargs(messages), stream=True
@@ -113,7 +102,7 @@ class FireworksLLM(OpenAILLM):
usage = CompletionUsage(**chunk.usage)
full_content = "".join(collected_content)
- self._update_costs(usage)
+ self._update_costs(usage.model_dump())
return full_content
@retry(
diff --git a/metagpt/provider/google_gemini_api.py b/metagpt/provider/google_gemini_api.py
index 2647ab16b..87ea81c80 100644
--- a/metagpt/provider/google_gemini_api.py
+++ b/metagpt/provider/google_gemini_api.py
@@ -72,16 +72,6 @@ class GeminiLLM(BaseLLM):
kwargs = {"contents": messages, "generation_config": GenerationConfig(temperature=0.3), "stream": stream}
return kwargs
- def _update_costs(self, usage: dict):
- """update each request's token cost"""
- if self.config.calc_usage:
- try:
- prompt_tokens = int(usage.get("prompt_tokens", 0))
- completion_tokens = int(usage.get("completion_tokens", 0))
- self.cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
- except Exception as e:
- logger.error(f"google gemini updats costs failed! exp: {e}")
-
def get_choice_text(self, resp: GenerateContentResponse) -> str:
return resp.text
diff --git a/metagpt/provider/ollama_api.py b/metagpt/provider/ollama_api.py
index c9103b018..52e8dbe36 100644
--- a/metagpt/provider/ollama_api.py
+++ b/metagpt/provider/ollama_api.py
@@ -46,16 +46,6 @@ class OllamaLLM(BaseLLM):
kwargs = {"model": self.model, "messages": messages, "options": {"temperature": 0.3}, "stream": stream}
return kwargs
- def _update_costs(self, usage: dict):
- """update each request's token cost"""
- if self.config.calc_usage:
- try:
- prompt_tokens = int(usage.get("prompt_tokens", 0))
- completion_tokens = int(usage.get("completion_tokens", 0))
- self._cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
- except Exception as e:
- logger.error(f"ollama updats costs failed! exp: {e}")
-
def get_choice_text(self, resp: dict) -> str:
"""get the resp content from llm response"""
assist_msg = resp.get("message", {})
diff --git a/metagpt/provider/open_llm_api.py b/metagpt/provider/open_llm_api.py
index a29b263a4..69371e379 100644
--- a/metagpt/provider/open_llm_api.py
+++ b/metagpt/provider/open_llm_api.py
@@ -8,7 +8,7 @@ from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.logs import logger
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import OpenAILLM
-from metagpt.utils.cost_manager import Costs, TokenCostManager
+from metagpt.utils.cost_manager import TokenCostManager
from metagpt.utils.token_counter import count_message_tokens, count_string_tokens
@@ -34,14 +34,3 @@ class OpenLLM(OpenAILLM):
logger.error(f"usage calculation failed!: {e}")
return usage
-
- def _update_costs(self, usage: CompletionUsage):
- if self.config.calc_usage and usage:
- try:
- # use OpenLLMCostManager not CONFIG.cost_manager
- self._cost_manager.update_cost(usage.prompt_tokens, usage.completion_tokens, self.model)
- except Exception as e:
- logger.error(f"updating costs failed!, exp: {e}")
-
- def get_costs(self) -> Costs:
- return self._cost_manager.get_costs()
diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 63e68c9bd..1e5770d74 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -29,7 +29,7 @@ from metagpt.provider.constant import GENERAL_FUNCTION_SCHEMA
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.schema import Message
from metagpt.utils.common import CodeParser, decode_image
-from metagpt.utils.cost_manager import CostManager, Costs
+from metagpt.utils.cost_manager import CostManager
from metagpt.utils.exceptions import handle_exception
from metagpt.utils.token_counter import (
count_message_tokens,
@@ -55,16 +55,13 @@ class OpenAILLM(BaseLLM):
def __init__(self, config: LLMConfig):
self.config = config
- self._init_model()
self._init_client()
self.auto_max_tokens = False
self.cost_manager: Optional[CostManager] = None
- def _init_model(self):
- self.model = self.config.model # Used in _calc_usage & _cons_kwargs
-
def _init_client(self):
"""https://github.com/openai/openai-python#async-usage"""
+ self.model = self.config.model # Used in _calc_usage & _cons_kwargs
kwargs = self._make_client_kwargs()
self.aclient = AsyncOpenAI(**kwargs)
@@ -240,16 +237,6 @@ class OpenAILLM(BaseLLM):
return usage
- @handle_exception
- def _update_costs(self, usage: CompletionUsage):
- if self.config.calc_usage and usage and self.cost_manager:
- self.cost_manager.update_cost(usage.prompt_tokens, usage.completion_tokens, self.model)
-
- def get_costs(self) -> Costs:
- if not self.cost_manager:
- return Costs(0, 0, 0, 0)
- return self.cost_manager.get_costs()
-
def _get_max_tokens(self, messages: list[dict]):
if not self.auto_max_tokens:
return self.config.max_token
diff --git a/metagpt/provider/qianfan_api.py b/metagpt/provider/qianfan_api.py
index 180935e61..fbbff7085 100644
--- a/metagpt/provider/qianfan_api.py
+++ b/metagpt/provider/qianfan_api.py
@@ -78,7 +78,7 @@ class QianFanLLM(BaseLLM):
# self deployed model on the cloud not to calculate usage, it charges resource pool rental fee
self.calc_usage = self.config.calc_usage and self.config.endpoint is None
- self.client = qianfan.ChatCompletion()
+ self.aclient = qianfan.ChatCompletion()
def _const_kwargs(self, messages: list[dict], stream: bool = False) -> dict:
kwargs = {
@@ -110,12 +110,12 @@ class QianFanLLM(BaseLLM):
return resp.get("result", "")
def completion(self, messages: list[dict]) -> JsonBody:
- resp = self.client.do(**self._const_kwargs(messages=messages, stream=False))
+ resp = self.aclient.do(**self._const_kwargs(messages=messages, stream=False))
self._update_costs(resp.body.get("usage", {}))
return resp.body
async def _achat_completion(self, messages: list[dict]) -> JsonBody:
- resp = await self.client.ado(**self._const_kwargs(messages=messages, stream=False))
+ resp = await self.aclient.ado(**self._const_kwargs(messages=messages, stream=False))
self._update_costs(resp.body.get("usage", {}))
return resp.body
@@ -123,7 +123,7 @@ class QianFanLLM(BaseLLM):
return await self._achat_completion(messages)
async def _achat_completion_stream(self, messages: list[dict]) -> str:
- resp = await self.client.ado(**self._const_kwargs(messages=messages, stream=True))
+ resp = await self.aclient.ado(**self._const_kwargs(messages=messages, stream=True))
collected_content = []
usage = {}
async for chunk in resp:
diff --git a/metagpt/provider/zhipuai_api.py b/metagpt/provider/zhipuai_api.py
index 9108a1fba..b7c160a41 100644
--- a/metagpt/provider/zhipuai_api.py
+++ b/metagpt/provider/zhipuai_api.py
@@ -57,16 +57,6 @@ class ZhiPuAILLM(BaseLLM):
kwargs = {"model": self.model, "messages": messages, "stream": stream, "temperature": 0.3}
return kwargs
- def _update_costs(self, usage: dict):
- """update each request's token cost"""
- if self.config.calc_usage:
- try:
- prompt_tokens = int(usage.get("prompt_tokens", 0))
- completion_tokens = int(usage.get("completion_tokens", 0))
- self.config.cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
- except Exception as e:
- logger.error(f"zhipuai updats costs failed! exp: {e}")
-
def completion(self, messages: list[dict], timeout=3) -> dict:
resp = self.llm.chat.completions.create(**self._const_kwargs(messages))
usage = resp.usage.model_dump()
From 4370060802b3da936880aefb7aa28a6ba22780cd Mon Sep 17 00:00:00 2001
From: geekan
Date: Wed, 7 Feb 2024 16:23:54 +0800
Subject: [PATCH 017/104] fix bug
---
config/config2.yaml.example | 2 +-
metagpt/actions/research.py | 2 +-
metagpt/utils/cost_manager.py | 4 ++++
3 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/config/config2.yaml.example b/config/config2.yaml.example
index 8f4a33fc1..2217f1b2c 100644
--- a/config/config2.yaml.example
+++ b/config/config2.yaml.example
@@ -1,5 +1,5 @@
llm:
- api_type: "openai"
+ api_type: "openai" # or azure / ollama etc.
base_url: "YOUR_BASE_URL"
api_key: "YOUR_API_KEY"
model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
diff --git a/metagpt/actions/research.py b/metagpt/actions/research.py
index 2ebeadb66..316e9f299 100644
--- a/metagpt/actions/research.py
+++ b/metagpt/actions/research.py
@@ -133,7 +133,7 @@ class CollectLinks(Action):
if len(remove) == 0:
break
- model_name = config.get_openai_llm().model
+ model_name = config.model
prompt = reduce_message_length(gen_msg(), model_name, system_text, 4096)
logger.debug(prompt)
queries = await self._aask(prompt, [system_text])
diff --git a/metagpt/utils/cost_manager.py b/metagpt/utils/cost_manager.py
index 7bf5154b6..c4c93f91f 100644
--- a/metagpt/utils/cost_manager.py
+++ b/metagpt/utils/cost_manager.py
@@ -41,6 +41,10 @@ class CostManager(BaseModel):
"""
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
+ if model not in TOKEN_COSTS:
+ logger.warning(f"Model {model} not found in TOKEN_COSTS.")
+ return
+
cost = (
prompt_tokens * TOKEN_COSTS[model]["prompt"] + completion_tokens * TOKEN_COSTS[model]["completion"]
) / 1000
From c0867643d828084e7503f05ae44987dccf3687d1 Mon Sep 17 00:00:00 2001
From: geekan
Date: Wed, 7 Feb 2024 16:24:33 +0800
Subject: [PATCH 018/104] fix bug
---
metagpt/actions/research.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/metagpt/actions/research.py b/metagpt/actions/research.py
index 316e9f299..ce8d8a967 100644
--- a/metagpt/actions/research.py
+++ b/metagpt/actions/research.py
@@ -133,7 +133,7 @@ class CollectLinks(Action):
if len(remove) == 0:
break
- model_name = config.model
+ model_name = config.llm.model
prompt = reduce_message_length(gen_msg(), model_name, system_text, 4096)
logger.debug(prompt)
queries = await self._aask(prompt, [system_text])
From d112371dadf02ee9a828c6708d0bbaa3e600c113 Mon Sep 17 00:00:00 2001
From: geekan
Date: Wed, 7 Feb 2024 16:33:24 +0800
Subject: [PATCH 019/104] fix bug
---
metagpt/utils/text.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/metagpt/utils/text.py b/metagpt/utils/text.py
index dd9678438..921efe706 100644
--- a/metagpt/utils/text.py
+++ b/metagpt/utils/text.py
@@ -25,7 +25,7 @@ def reduce_message_length(
"""
max_token = TOKEN_MAX.get(model_name, 2048) - count_string_tokens(system_text, model_name) - reserved
for msg in msgs:
- if count_string_tokens(msg, model_name) < max_token:
+ if count_string_tokens(msg, model_name) < max_token or model_name not in TOKEN_MAX:
return msg
raise RuntimeError("fail to reduce message length")
From 50a14718baeacfada5cf7008e2761a801adbd968 Mon Sep 17 00:00:00 2001
From: geekan
Date: Wed, 7 Feb 2024 16:37:23 +0800
Subject: [PATCH 020/104] refine log
---
metagpt/provider/openai_api.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 63e68c9bd..120748d15 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -236,7 +236,7 @@ class OpenAILLM(BaseLLM):
usage.prompt_tokens = count_message_tokens(messages, self.model)
usage.completion_tokens = count_string_tokens(rsp, self.model)
except Exception as e:
- logger.error(f"usage calculation failed: {e}")
+ logger.warning(f"usage calculation failed: {e}")
return usage
From 3369c9e53671313d73ae57a17c010e3de786fb41 Mon Sep 17 00:00:00 2001
From: Zhou Tuo <45249333+FromCSUZhou@users.noreply.github.com>
Date: Wed, 7 Feb 2024 08:59:32 +0000
Subject: [PATCH 021/104] modify by comment
---
examples/email_summary.py | 9 +++++----
metagpt/tools/libs/email_login.py | 4 ++--
tests/metagpt/tools/libs/test_email_login.py | 16 +++++++---------
3 files changed, 14 insertions(+), 15 deletions(-)
diff --git a/examples/email_summary.py b/examples/email_summary.py
index 0862991da..dd8dd8c8e 100644
--- a/examples/email_summary.py
+++ b/examples/email_summary.py
@@ -10,10 +10,11 @@ from metagpt.roles.ci.code_interpreter import CodeInterpreter
async def main():
# For email response prompt
- # prompt = """I will give you your Outlook email account(englishgpt@outlook.com) and password(the outlook_email_password item in the environment variable). You need to find the latest email in my inbox with the sender's suffix @qq.com and reply to him "Thank you! I have received your email~"""""
- prompt = """I will give you your Outlook email account(englishgpt@outlook.com) and password(outlook_email_password item in the environment variable).
- Firstly, Please help me present the latest 5 senders and full letter contents.
- Then, summarize each of the 5 emails into one sentence with Chinese(you can do this by yourself, don't need import other models to do this) and output them in a markdown format."""
+ email_account = "your_email_account"
+ # prompt = f"""I will give you your Outlook email account({email_account}) and password(email_password item in the environment variable). You need to find the latest email in my inbox with the sender's suffix @qq.com and reply to him "Thank you! I have received your email~"""""
+ prompt = f"""I will give you your Outlook email account({email_account}) and password(email_password item in the environment variable).
+ Firstly, Please help me fetch the latest 5 senders and full letter contents.
+ Then, summarize each of the 5 emails into one sentence(you can do this by yourself, no need import other models to do this) and output them in a markdown format."""
ci = CodeInterpreter(use_tools=True)
diff --git a/metagpt/tools/libs/email_login.py b/metagpt/tools/libs/email_login.py
index 77772e15a..8fd77274c 100644
--- a/metagpt/tools/libs/email_login.py
+++ b/metagpt/tools/libs/email_login.py
@@ -51,8 +51,8 @@ def email_login_imap(email_address, email_password):
# Attempt to log in to the email account
try:
mailbox = MailBox(imap_server).login(email_address, email_password)
- print("Login successful")
+ logger.info("Login successful")
return mailbox
except Exception as e:
- print(f"Login failed: {e}")
+ logger.error(f"Login failed: {e}")
return None
diff --git a/tests/metagpt/tools/libs/test_email_login.py b/tests/metagpt/tools/libs/test_email_login.py
index fd8d41506..c18d15c7d 100644
--- a/tests/metagpt/tools/libs/test_email_login.py
+++ b/tests/metagpt/tools/libs/test_email_login.py
@@ -1,5 +1,4 @@
import os
-from unittest.mock import Mock, patch
import pytest
@@ -16,14 +15,13 @@ incorrect_email_password = "incorrect_password"
@pytest.fixture
-def imap_server_setup():
- # Use patch to mock the behavior of MailBox from the correct module path
- with patch("metagpt.tools.libs.email_login.MailBox") as mock_mailbox:
- # Setup for successful login
- mock_mail_instance = Mock()
- mock_mail_instance.login.return_value = mock_mail_instance
- mock_mailbox.return_value = mock_mail_instance
- yield mock_mail_instance
+def imap_server_setup(mocker):
+ # Use the mocker fixture to mock the MailBox class
+ mock_mailbox = mocker.patch("metagpt.tools.libs.email_login.MailBox")
+ mock_mail_instance = mocker.Mock()
+ mock_mail_instance.login.return_value = mock_mail_instance
+ mock_mailbox.return_value = mock_mail_instance
+ return mock_mail_instance
def test_email_login_imap_success(imap_server_setup):
From ce63e455dfe1071a99ee421c1e17df07db20200d Mon Sep 17 00:00:00 2001
From: geekan
Date: Wed, 7 Feb 2024 17:03:10 +0800
Subject: [PATCH 022/104] fix bug
---
metagpt/provider/openai_api.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 120748d15..756f8c483 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -253,7 +253,7 @@ class OpenAILLM(BaseLLM):
def _get_max_tokens(self, messages: list[dict]):
if not self.auto_max_tokens:
return self.config.max_token
- return get_max_completion_tokens(messages, self.model, self.config.max_tokens)
+ return get_max_completion_tokens(messages, self.model, self.config.max_token)
@handle_exception
async def amoderation(self, content: Union[str, list[str]]):
From dc240a2efd161614f2e4b5090238f72682158ae5 Mon Sep 17 00:00:00 2001
From: better629
Date: Wed, 7 Feb 2024 17:40:27 +0800
Subject: [PATCH 023/104] simplify provider ut code
---
.github/workflows/fulltest.yaml | 1 -
.github/workflows/unittest.yaml | 2 +-
tests/metagpt/provider/mock_llm_config.py | 10 +++
tests/metagpt/provider/req_resp_const.py | 80 +++++++++++++++++++
tests/metagpt/provider/test_anthropic_api.py | 12 +--
tests/metagpt/provider/test_base_llm.py | 53 +++++-------
tests/metagpt/provider/test_fireworks_llm.py | 65 ++++-----------
.../provider/test_google_gemini_api.py | 37 +++++----
tests/metagpt/provider/test_ollama_api.py | 20 +++--
tests/metagpt/provider/test_open_llm_api.py | 65 +++++----------
tests/metagpt/provider/test_qianfan_api.py | 15 ++++
tests/metagpt/provider/test_spark_api.py | 36 +++++----
tests/metagpt/provider/test_zhipuai_api.py | 33 ++++----
tests/spark.yaml | 7 --
14 files changed, 235 insertions(+), 201 deletions(-)
create mode 100644 tests/metagpt/provider/req_resp_const.py
create mode 100644 tests/metagpt/provider/test_qianfan_api.py
delete mode 100644 tests/spark.yaml
diff --git a/.github/workflows/fulltest.yaml b/.github/workflows/fulltest.yaml
index f5c6049e1..70c800481 100644
--- a/.github/workflows/fulltest.yaml
+++ b/.github/workflows/fulltest.yaml
@@ -54,7 +54,6 @@ jobs:
export ALLOW_OPENAI_API_CALL=0
echo "${{ secrets.METAGPT_KEY_YAML }}" | base64 -d > config/key.yaml
mkdir -p ~/.metagpt && echo "${{ secrets.METAGPT_CONFIG2_YAML }}" | base64 -d > ~/.metagpt/config2.yaml
- echo "${{ secrets.SPARK_YAML }}" | base64 -d > ~/.metagpt/spark.yaml
pytest tests/ --doctest-modules --cov=./metagpt/ --cov-report=xml:cov.xml --cov-report=html:htmlcov --durations=20 | tee unittest.txt
- name: Show coverage report
run: |
diff --git a/.github/workflows/unittest.yaml b/.github/workflows/unittest.yaml
index 2e7e3ce2b..afa9faba7 100644
--- a/.github/workflows/unittest.yaml
+++ b/.github/workflows/unittest.yaml
@@ -31,7 +31,7 @@ jobs:
- name: Test with pytest
run: |
export ALLOW_OPENAI_API_CALL=0
- mkdir -p ~/.metagpt && cp tests/config2.yaml ~/.metagpt/config2.yaml && cp tests/spark.yaml ~/.metagpt/spark.yaml
+ mkdir -p ~/.metagpt && cp tests/config2.yaml ~/.metagpt/config2.yaml
pytest tests/ --doctest-modules --cov=./metagpt/ --cov-report=xml:cov.xml --cov-report=html:htmlcov --durations=20 | tee unittest.txt
- name: Show coverage report
run: |
diff --git a/tests/metagpt/provider/mock_llm_config.py b/tests/metagpt/provider/mock_llm_config.py
index e2f626a6a..21780f914 100644
--- a/tests/metagpt/provider/mock_llm_config.py
+++ b/tests/metagpt/provider/mock_llm_config.py
@@ -42,3 +42,13 @@ mock_llm_config_zhipu = LLMConfig(
model="mock_zhipu_model",
proxy="http://localhost:8080",
)
+
+
+mock_llm_config_spark = LLMConfig(
+ api_type="spark",
+ app_id="xxx",
+ api_key="xxx",
+ api_secret="xxx",
+ domain="generalv2",
+ base_url="wss://spark-api.xf-yun.com/v3.1/chat",
+)
diff --git a/tests/metagpt/provider/req_resp_const.py b/tests/metagpt/provider/req_resp_const.py
new file mode 100644
index 000000000..a3a7a363c
--- /dev/null
+++ b/tests/metagpt/provider/req_resp_const.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Desc : default request & response data for provider unittest
+
+from openai.types.chat.chat_completion import (
+ ChatCompletion,
+ ChatCompletionMessage,
+ Choice,
+)
+from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
+from openai.types.chat.chat_completion_chunk import Choice as AChoice
+from openai.types.chat.chat_completion_chunk import ChoiceDelta
+from openai.types.completion_usage import CompletionUsage
+
+prompt = "who are you?"
+messages = [{"role": "user", "content": prompt}]
+
+resp_cont_tmpl = "I'm {name}"
+default_resp_cont = resp_cont_tmpl.format(name="GPT")
+
+
+# part of whole ChatCompletion of openai like structure
+def get_part_chat_completion(llm_name: str) -> dict:
+ part_chat_completion = {
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": resp_cont_tmpl.format(name=llm_name),
+ },
+ "finish_reason": "stop",
+ }
+ ],
+ "usage": {"completion_tokens": 22, "prompt_tokens": 19, "total_tokens": 41},
+ }
+ return part_chat_completion
+
+
+def get_openai_chat_completion(llm_name: str) -> ChatCompletion:
+ openai_chat_completion = ChatCompletion(
+ id="cmpl-a6652c1bb181caae8dd19ad8",
+ model="xx/xxx",
+ object="chat.completion",
+ created=1703300855,
+ choices=[
+ Choice(
+ finish_reason="stop",
+ index=0,
+ message=ChatCompletionMessage(role="assistant", content=resp_cont_tmpl.format(name=llm_name)),
+ logprobs=None,
+ )
+ ],
+ usage=CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202),
+ )
+ return openai_chat_completion
+
+
+def get_openai_chat_completion_chunk(llm_name: str, usage_as_dict: bool = False) -> ChatCompletionChunk:
+ usage = CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202)
+ usage = usage if not usage_as_dict else usage.model_dump()
+ openai_chat_completion_chunk = ChatCompletionChunk(
+ id="cmpl-a6652c1bb181caae8dd19ad8",
+ model="xx/xxx",
+ object="chat.completion.chunk",
+ created=1703300855,
+ choices=[
+ AChoice(
+ delta=ChoiceDelta(role="assistant", content=resp_cont_tmpl.format(name=llm_name)),
+ finish_reason="stop",
+ index=0,
+ logprobs=None,
+ )
+ ],
+ usage=usage,
+ )
+ return openai_chat_completion_chunk
+
+
+gemini_messages = [{"role": "user", "parts": prompt}]
diff --git a/tests/metagpt/provider/test_anthropic_api.py b/tests/metagpt/provider/test_anthropic_api.py
index 6962ab064..93cfd7dbc 100644
--- a/tests/metagpt/provider/test_anthropic_api.py
+++ b/tests/metagpt/provider/test_anthropic_api.py
@@ -8,25 +8,25 @@ from anthropic.resources.completions import Completion
from metagpt.provider.anthropic_api import Claude2
from tests.metagpt.provider.mock_llm_config import mock_llm_config
+from tests.metagpt.provider.req_resp_const import prompt, resp_cont_tmpl
-prompt = "who are you"
-resp = "I'am Claude2"
+resp_cont = resp_cont_tmpl.format(name="Claude")
def mock_anthropic_completions_create(self, model: str, prompt: str, max_tokens_to_sample: int) -> Completion:
- return Completion(id="xx", completion=resp, model="claude-2", stop_reason="stop_sequence", type="completion")
+ return Completion(id="xx", completion=resp_cont, model="claude-2", stop_reason="stop_sequence", type="completion")
async def mock_anthropic_acompletions_create(self, model: str, prompt: str, max_tokens_to_sample: int) -> Completion:
- return Completion(id="xx", completion=resp, model="claude-2", stop_reason="stop_sequence", type="completion")
+ return Completion(id="xx", completion=resp_cont, model="claude-2", stop_reason="stop_sequence", type="completion")
def test_claude2_ask(mocker):
mocker.patch("anthropic.resources.completions.Completions.create", mock_anthropic_completions_create)
- assert resp == Claude2(mock_llm_config).ask(prompt)
+ assert resp_cont == Claude2(mock_llm_config).ask(prompt)
@pytest.mark.asyncio
async def test_claude2_aask(mocker):
mocker.patch("anthropic.resources.completions.AsyncCompletions.create", mock_anthropic_acompletions_create)
- assert resp == await Claude2(mock_llm_config).aask(prompt)
+ assert resp_cont == await Claude2(mock_llm_config).aask(prompt)
diff --git a/tests/metagpt/provider/test_base_llm.py b/tests/metagpt/provider/test_base_llm.py
index cc781f78a..0babd6d5f 100644
--- a/tests/metagpt/provider/test_base_llm.py
+++ b/tests/metagpt/provider/test_base_llm.py
@@ -11,21 +11,13 @@ import pytest
from metagpt.configs.llm_config import LLMConfig
from metagpt.provider.base_llm import BaseLLM
from metagpt.schema import Message
+from tests.metagpt.provider.req_resp_const import (
+ default_resp_cont,
+ get_part_chat_completion,
+ prompt,
+)
-default_chat_resp = {
- "choices": [
- {
- "index": 0,
- "message": {
- "role": "assistant",
- "content": "I'am GPT",
- },
- "finish_reason": "stop",
- }
- ]
-}
-prompt_msg = "who are you"
-resp_content = default_chat_resp["choices"][0]["message"]["content"]
+llm_name = "GPT"
class MockBaseLLM(BaseLLM):
@@ -33,16 +25,13 @@ class MockBaseLLM(BaseLLM):
pass
def completion(self, messages: list[dict], timeout=3):
- return default_chat_resp
+ return get_part_chat_completion(llm_name)
async def acompletion(self, messages: list[dict], timeout=3):
- return default_chat_resp
+ return get_part_chat_completion(llm_name)
async def acompletion_text(self, messages: list[dict], stream=False, timeout=3) -> str:
- return resp_content
-
- async def close(self):
- return default_chat_resp
+ return default_resp_cont
def test_base_llm():
@@ -86,25 +75,25 @@ def test_base_llm():
choice_text = base_llm.get_choice_text(openai_funccall_resp)
assert choice_text == openai_funccall_resp["choices"][0]["message"]["content"]
- # resp = base_llm.ask(prompt_msg)
- # assert resp == resp_content
+ # resp = base_llm.ask(prompt)
+ # assert resp == default_resp_cont
- # resp = base_llm.ask_batch([prompt_msg])
- # assert resp == resp_content
+ # resp = base_llm.ask_batch([prompt])
+ # assert resp == default_resp_cont
- # resp = base_llm.ask_code([prompt_msg])
- # assert resp == resp_content
+ # resp = base_llm.ask_code([prompt])
+ # assert resp == default_resp_cont
@pytest.mark.asyncio
async def test_async_base_llm():
base_llm = MockBaseLLM()
- resp = await base_llm.aask(prompt_msg)
- assert resp == resp_content
+ resp = await base_llm.aask(prompt)
+ assert resp == default_resp_cont
- resp = await base_llm.aask_batch([prompt_msg])
- assert resp == resp_content
+ resp = await base_llm.aask_batch([prompt])
+ assert resp == default_resp_cont
- # resp = await base_llm.aask_code([prompt_msg])
- # assert resp == resp_content
+ # resp = await base_llm.aask_code([prompt])
+ # assert resp == default_resp_cont
diff --git a/tests/metagpt/provider/test_fireworks_llm.py b/tests/metagpt/provider/test_fireworks_llm.py
index 66b55e5b2..834f6305f 100644
--- a/tests/metagpt/provider/test_fireworks_llm.py
+++ b/tests/metagpt/provider/test_fireworks_llm.py
@@ -3,14 +3,7 @@
# @Desc : the unittest of fireworks api
import pytest
-from openai.types.chat.chat_completion import (
- ChatCompletion,
- ChatCompletionMessage,
- Choice,
-)
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
-from openai.types.chat.chat_completion_chunk import Choice as AChoice
-from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.completion_usage import CompletionUsage
from metagpt.provider.fireworks_api import (
@@ -20,42 +13,18 @@ from metagpt.provider.fireworks_api import (
)
from metagpt.utils.cost_manager import Costs
from tests.metagpt.provider.mock_llm_config import mock_llm_config
-
-resp_content = "I'm fireworks"
-default_resp = ChatCompletion(
- id="cmpl-a6652c1bb181caae8dd19ad8",
- model="accounts/fireworks/models/llama-v2-13b-chat",
- object="chat.completion",
- created=1703300855,
- choices=[
- Choice(
- finish_reason="stop",
- index=0,
- message=ChatCompletionMessage(role="assistant", content=resp_content),
- logprobs=None,
- )
- ],
- usage=CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202),
+from tests.metagpt.provider.req_resp_const import (
+ get_openai_chat_completion,
+ get_openai_chat_completion_chunk,
+ messages,
+ prompt,
+ resp_cont_tmpl,
)
-default_resp_chunk = ChatCompletionChunk(
- id=default_resp.id,
- model=default_resp.model,
- object="chat.completion.chunk",
- created=default_resp.created,
- choices=[
- AChoice(
- delta=ChoiceDelta(content=resp_content, role="assistant"),
- finish_reason="stop",
- index=0,
- logprobs=None,
- )
- ],
- usage=dict(default_resp.usage),
-)
-
-prompt_msg = "who are you"
-messages = [{"role": "user", "content": prompt_msg}]
+llm_name = "fireworks"
+resp_cont = resp_cont_tmpl.format(name=llm_name)
+default_resp = get_openai_chat_completion(llm_name)
+default_resp_chunk = get_openai_chat_completion_chunk(llm_name, usage_as_dict=True)
def test_fireworks_costmanager():
@@ -99,16 +68,16 @@ async def test_fireworks_acompletion(mocker):
)
resp = await fireworks_gpt.acompletion(messages)
- assert resp.choices[0].message.content in resp_content
+ assert resp.choices[0].message.content in resp_cont
- resp = await fireworks_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
+ resp = await fireworks_gpt.aask(prompt, stream=False)
+ assert resp == resp_cont
resp = await fireworks_gpt.acompletion_text(messages, stream=False)
- assert resp == resp_content
+ assert resp == resp_cont
resp = await fireworks_gpt.acompletion_text(messages, stream=True)
- assert resp == resp_content
+ assert resp == resp_cont
- resp = await fireworks_gpt.aask(prompt_msg)
- assert resp == resp_content
+ resp = await fireworks_gpt.aask(prompt)
+ assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_google_gemini_api.py b/tests/metagpt/provider/test_google_gemini_api.py
index 404ae1e90..ad0c7bbfe 100644
--- a/tests/metagpt/provider/test_google_gemini_api.py
+++ b/tests/metagpt/provider/test_google_gemini_api.py
@@ -11,6 +11,11 @@ from google.generativeai.types import content_types
from metagpt.provider.google_gemini_api import GeminiLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config
+from tests.metagpt.provider.req_resp_const import (
+ gemini_messages,
+ prompt,
+ resp_cont_tmpl,
+)
@dataclass
@@ -18,10 +23,8 @@ class MockGeminiResponse(ABC):
text: str
-prompt_msg = "who are you"
-messages = [{"role": "user", "parts": prompt_msg}]
-resp_content = "I'm gemini from google"
-default_resp = MockGeminiResponse(text=resp_content)
+resp_cont = resp_cont_tmpl.format(name="gemini")
+default_resp = MockGeminiResponse(text=resp_cont)
def mock_gemini_count_tokens(self, contents: content_types.ContentsType) -> glm.CountTokensResponse:
@@ -62,26 +65,26 @@ async def test_gemini_acompletion(mocker):
gemini_gpt = GeminiLLM(mock_llm_config)
- assert gemini_gpt._user_msg(prompt_msg) == {"role": "user", "parts": [prompt_msg]}
- assert gemini_gpt._assistant_msg(prompt_msg) == {"role": "model", "parts": [prompt_msg]}
+ assert gemini_gpt._user_msg(prompt) == {"role": "user", "parts": [prompt]}
+ assert gemini_gpt._assistant_msg(prompt) == {"role": "model", "parts": [prompt]}
- usage = gemini_gpt.get_usage(messages, resp_content)
+ usage = gemini_gpt.get_usage(gemini_messages, resp_cont)
assert usage == {"prompt_tokens": 20, "completion_tokens": 20}
- resp = gemini_gpt.completion(messages)
+ resp = gemini_gpt.completion(gemini_messages)
assert resp == default_resp
- resp = await gemini_gpt.acompletion(messages)
+ resp = await gemini_gpt.acompletion(gemini_messages)
assert resp.text == default_resp.text
- resp = await gemini_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
+ resp = await gemini_gpt.aask(prompt, stream=False)
+ assert resp == resp_cont
- resp = await gemini_gpt.acompletion_text(messages, stream=False)
- assert resp == resp_content
+ resp = await gemini_gpt.acompletion_text(gemini_messages, stream=False)
+ assert resp == resp_cont
- resp = await gemini_gpt.acompletion_text(messages, stream=True)
- assert resp == resp_content
+ resp = await gemini_gpt.acompletion_text(gemini_messages, stream=True)
+ assert resp == resp_cont
- resp = await gemini_gpt.aask(prompt_msg)
- assert resp == resp_content
+ resp = await gemini_gpt.aask(prompt)
+ assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_ollama_api.py b/tests/metagpt/provider/test_ollama_api.py
index 5d942598b..8e2625e35 100644
--- a/tests/metagpt/provider/test_ollama_api.py
+++ b/tests/metagpt/provider/test_ollama_api.py
@@ -9,12 +9,10 @@ import pytest
from metagpt.provider.ollama_api import OllamaLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config
+from tests.metagpt.provider.req_resp_const import messages, prompt, resp_cont_tmpl
-prompt_msg = "who are you"
-messages = [{"role": "user", "content": prompt_msg}]
-
-resp_content = "I'm ollama"
-default_resp = {"message": {"role": "assistant", "content": resp_content}}
+resp_cont = resp_cont_tmpl.format(name="ollama")
+default_resp = {"message": {"role": "assistant", "content": resp_cont}}
async def mock_ollama_arequest(self, stream: bool = False, **kwargs) -> Tuple[Any, Any, bool]:
@@ -46,14 +44,14 @@ async def test_gemini_acompletion(mocker):
resp = await ollama_gpt.acompletion(messages)
assert resp["message"]["content"] == default_resp["message"]["content"]
- resp = await ollama_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
+ resp = await ollama_gpt.aask(prompt, stream=False)
+ assert resp == resp_cont
resp = await ollama_gpt.acompletion_text(messages, stream=False)
- assert resp == resp_content
+ assert resp == resp_cont
resp = await ollama_gpt.acompletion_text(messages, stream=True)
- assert resp == resp_content
+ assert resp == resp_cont
- resp = await ollama_gpt.aask(prompt_msg)
- assert resp == resp_content
+ resp = await ollama_gpt.aask(prompt)
+ assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_open_llm_api.py b/tests/metagpt/provider/test_open_llm_api.py
index fc7b510cc..5b8a506e9 100644
--- a/tests/metagpt/provider/test_open_llm_api.py
+++ b/tests/metagpt/provider/test_open_llm_api.py
@@ -3,53 +3,25 @@
# @Desc :
import pytest
-from openai.types.chat.chat_completion import (
- ChatCompletion,
- ChatCompletionMessage,
- Choice,
-)
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
-from openai.types.chat.chat_completion_chunk import Choice as AChoice
-from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.completion_usage import CompletionUsage
from metagpt.provider.open_llm_api import OpenLLM
-from metagpt.utils.cost_manager import Costs
+from metagpt.utils.cost_manager import CostManager, Costs
from tests.metagpt.provider.mock_llm_config import mock_llm_config
-
-resp_content = "I'm llama2"
-default_resp = ChatCompletion(
- id="cmpl-a6652c1bb181caae8dd19ad8",
- model="llama-v2-13b-chat",
- object="chat.completion",
- created=1703302755,
- choices=[
- Choice(
- finish_reason="stop",
- index=0,
- message=ChatCompletionMessage(role="assistant", content=resp_content),
- logprobs=None,
- )
- ],
+from tests.metagpt.provider.req_resp_const import (
+ get_openai_chat_completion,
+ get_openai_chat_completion_chunk,
+ messages,
+ prompt,
+ resp_cont_tmpl,
)
-default_resp_chunk = ChatCompletionChunk(
- id=default_resp.id,
- model=default_resp.model,
- object="chat.completion.chunk",
- created=default_resp.created,
- choices=[
- AChoice(
- delta=ChoiceDelta(content=resp_content, role="assistant"),
- finish_reason="stop",
- index=0,
- logprobs=None,
- )
- ],
-)
+llm_name = "llama2-7b"
+resp_cont = resp_cont_tmpl.format(name=llm_name)
+default_resp = get_openai_chat_completion(llm_name)
-prompt_msg = "who are you"
-messages = [{"role": "user", "content": prompt_msg}]
+default_resp_chunk = get_openai_chat_completion_chunk(llm_name)
async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs) -> ChatCompletionChunk:
@@ -71,22 +43,23 @@ async def test_openllm_acompletion(mocker):
openllm_gpt = OpenLLM(mock_llm_config)
openllm_gpt.model = "llama-v2-13b-chat"
+ openllm_gpt.cost_manager = CostManager()
openllm_gpt._update_costs(usage=CompletionUsage(prompt_tokens=100, completion_tokens=100, total_tokens=200))
assert openllm_gpt.get_costs() == Costs(
total_prompt_tokens=100, total_completion_tokens=100, total_cost=0, total_budget=0
)
resp = await openllm_gpt.acompletion(messages)
- assert resp.choices[0].message.content in resp_content
+ assert resp.choices[0].message.content in resp_cont
- resp = await openllm_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
+ resp = await openllm_gpt.aask(prompt, stream=False)
+ assert resp == resp_cont
resp = await openllm_gpt.acompletion_text(messages, stream=False)
- assert resp == resp_content
+ assert resp == resp_cont
resp = await openllm_gpt.acompletion_text(messages, stream=True)
- assert resp == resp_content
+ assert resp == resp_cont
- resp = await openllm_gpt.aask(prompt_msg)
- assert resp == resp_content
+ resp = await openllm_gpt.aask(prompt)
+ assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_qianfan_api.py b/tests/metagpt/provider/test_qianfan_api.py
new file mode 100644
index 000000000..76271b1e8
--- /dev/null
+++ b/tests/metagpt/provider/test_qianfan_api.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Desc : the unittest of qianfan api
+
+import pytest
+
+from metagpt.provider.qianfan_api import QianFanLLM
+from tests.metagpt.provider.req_resp_const import prompt, messages, resp_cont_tmpl
+
+
+resp_cont = resp_cont_tmpl.format(name="ERNIE-Bot-turbo")
+
+
+def test_qianfan_acompletion(mocker):
+ assert True, True
diff --git a/tests/metagpt/provider/test_spark_api.py b/tests/metagpt/provider/test_spark_api.py
index f5a6f66fd..32a839393 100644
--- a/tests/metagpt/provider/test_spark_api.py
+++ b/tests/metagpt/provider/test_spark_api.py
@@ -4,12 +4,14 @@
import pytest
-from metagpt.config2 import Config
from metagpt.provider.spark_api import GetMessageFromWeb, SparkLLM
-from tests.metagpt.provider.mock_llm_config import mock_llm_config
+from tests.metagpt.provider.mock_llm_config import (
+ mock_llm_config,
+ mock_llm_config_spark,
+)
+from tests.metagpt.provider.req_resp_const import prompt, resp_cont_tmpl
-prompt_msg = "who are you"
-resp_content = "I'm Spark"
+resp_cont = resp_cont_tmpl.format(name="Spark")
class MockWebSocketApp(object):
@@ -23,7 +25,7 @@ class MockWebSocketApp(object):
def test_get_msg_from_web(mocker):
mocker.patch("websocket.WebSocketApp", MockWebSocketApp)
- get_msg_from_web = GetMessageFromWeb(prompt_msg, mock_llm_config)
+ get_msg_from_web = GetMessageFromWeb(prompt, mock_llm_config)
assert get_msg_from_web.gen_params()["parameter"]["chat"]["domain"] == "mock_domain"
ret = get_msg_from_web.run()
@@ -31,15 +33,17 @@ def test_get_msg_from_web(mocker):
def mock_spark_get_msg_from_web_run(self) -> str:
- return resp_content
+ return resp_cont
@pytest.mark.asyncio
-async def test_spark_aask():
- llm = SparkLLM(Config.from_home("spark.yaml").llm)
+async def test_spark_aask(mocker):
+ mocker.patch("metagpt.provider.spark_api.GetMessageFromWeb.run", mock_spark_get_msg_from_web_run)
+
+ llm = SparkLLM(mock_llm_config_spark)
resp = await llm.aask("Hello!")
- print(resp)
+ assert resp == resp_cont
@pytest.mark.asyncio
@@ -49,16 +53,16 @@ async def test_spark_acompletion(mocker):
spark_gpt = SparkLLM(mock_llm_config)
resp = await spark_gpt.acompletion([])
- assert resp == resp_content
+ assert resp == resp_cont
- resp = await spark_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
+ resp = await spark_gpt.aask(prompt, stream=False)
+ assert resp == resp_cont
resp = await spark_gpt.acompletion_text([], stream=False)
- assert resp == resp_content
+ assert resp == resp_cont
resp = await spark_gpt.acompletion_text([], stream=True)
- assert resp == resp_content
+ assert resp == resp_cont
- resp = await spark_gpt.aask(prompt_msg)
- assert resp == resp_content
+ resp = await spark_gpt.aask(prompt)
+ assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_zhipuai_api.py b/tests/metagpt/provider/test_zhipuai_api.py
index 798209710..064562bff 100644
--- a/tests/metagpt/provider/test_zhipuai_api.py
+++ b/tests/metagpt/provider/test_zhipuai_api.py
@@ -6,22 +6,23 @@ import pytest
from metagpt.provider.zhipuai_api import ZhiPuAILLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_zhipu
+from tests.metagpt.provider.req_resp_const import (
+ get_part_chat_completion,
+ messages,
+ prompt,
+ resp_cont_tmpl,
+)
-prompt_msg = "who are you"
-messages = [{"role": "user", "content": prompt_msg}]
-
-resp_content = "I'm chatglm-turbo"
-default_resp = {
- "choices": [{"finish_reason": "stop", "index": 0, "message": {"content": resp_content, "role": "assistant"}}],
- "usage": {"completion_tokens": 22, "prompt_tokens": 19, "total_tokens": 41},
-}
+llm_name = "ChatGLM-4"
+resp_cont = resp_cont_tmpl.format(name=llm_name)
+default_resp = get_part_chat_completion(llm_name)
async def mock_zhipuai_acreate_stream(**kwargs):
class MockResponse(object):
async def _aread(self):
class Iterator(object):
- events = [{"choices": [{"index": 0, "delta": {"content": resp_content, "role": "assistant"}}]}]
+ events = [{"choices": [{"index": 0, "delta": {"content": resp_cont, "role": "assistant"}}]}]
async def __aiter__(self):
for event in self.events:
@@ -49,19 +50,19 @@ async def test_zhipuai_acompletion(mocker):
zhipu_gpt = ZhiPuAILLM(mock_llm_config_zhipu)
resp = await zhipu_gpt.acompletion(messages)
- assert resp["choices"][0]["message"]["content"] == resp_content
+ assert resp["choices"][0]["message"]["content"] == resp_cont
- resp = await zhipu_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
+ resp = await zhipu_gpt.aask(prompt, stream=False)
+ assert resp == resp_cont
resp = await zhipu_gpt.acompletion_text(messages, stream=False)
- assert resp == resp_content
+ assert resp == resp_cont
resp = await zhipu_gpt.acompletion_text(messages, stream=True)
- assert resp == resp_content
+ assert resp == resp_cont
- resp = await zhipu_gpt.aask(prompt_msg)
- assert resp == resp_content
+ resp = await zhipu_gpt.aask(prompt)
+ assert resp == resp_cont
def test_zhipuai_proxy():
diff --git a/tests/spark.yaml b/tests/spark.yaml
deleted file mode 100644
index a5bbd98bd..000000000
--- a/tests/spark.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-llm:
- api_type: "spark"
- app_id: "xxx"
- api_key: "xxx"
- api_secret: "xxx"
- domain: "generalv2"
- base_url: "wss://spark-api.xf-yun.com/v3.1/chat"
\ No newline at end of file
From d94f4fbfbc3bd4310669f06e9bac9a7c89001712 Mon Sep 17 00:00:00 2001
From: shenchucheng
Date: Wed, 7 Feb 2024 17:44:36 +0800
Subject: [PATCH 024/104] fix research bugs
---
metagpt/provider/openai_api.py | 4 +++-
metagpt/utils/text.py | 2 +-
tests/metagpt/utils/test_text.py | 1 +
3 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 63e68c9bd..7b2cd6220 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -253,7 +253,9 @@ class OpenAILLM(BaseLLM):
def _get_max_tokens(self, messages: list[dict]):
if not self.auto_max_tokens:
return self.config.max_token
- return get_max_completion_tokens(messages, self.model, self.config.max_tokens)
+ # FIXME
+ # https://community.openai.com/t/why-is-gpt-3-5-turbo-1106-max-tokens-limited-to-4096/494973/3
+ return min(get_max_completion_tokens(messages, self.model, self.config.max_tokens), 4096)
@handle_exception
async def amoderation(self, content: Union[str, list[str]]):
diff --git a/metagpt/utils/text.py b/metagpt/utils/text.py
index 921efe706..fb8b94232 100644
--- a/metagpt/utils/text.py
+++ b/metagpt/utils/text.py
@@ -93,7 +93,7 @@ def split_paragraph(paragraph: str, sep: str = ".,", count: int = 2) -> list[str
continue
ret = ["".join(j) for j in _split_by_count(sentences, count)]
return ret
- return _split_by_count(paragraph, count)
+ return list(_split_by_count(paragraph, count))
def decode_unicode_escape(text: str) -> str:
diff --git a/tests/metagpt/utils/test_text.py b/tests/metagpt/utils/test_text.py
index 7003c7767..c9a9753be 100644
--- a/tests/metagpt/utils/test_text.py
+++ b/tests/metagpt/utils/test_text.py
@@ -42,6 +42,7 @@ def test_reduce_message_length(msgs, model_name, system_text, reserved, expected
(" ".join("Hello World." for _ in range(1000)), "Prompt: {}", "gpt-3.5-turbo-16k", "System", 3000, 1),
(" ".join("Hello World." for _ in range(4000)), "Prompt: {}", "gpt-4", "System", 2000, 2),
(" ".join("Hello World." for _ in range(8000)), "Prompt: {}", "gpt-4-32k", "System", 4000, 1),
+ (" ".join("Hello World" for _ in range(8000)), "Prompt: {}", "gpt-3.5-turbo", "System", 1000, 8),
],
)
def test_generate_prompt_chunk(text, prompt_template, model_name, system_text, reserved, expected):
From d3f6e38e8a9805d6bc80e5489dde99007bd20b6d Mon Sep 17 00:00:00 2001
From: better629
Date: Wed, 7 Feb 2024 18:32:32 +0800
Subject: [PATCH 025/104] add qianfan ut code and update xx_llm from xx_gpt
---
metagpt/provider/qianfan_api.py | 3 +-
tests/metagpt/provider/mock_llm_config.py | 7 +++
tests/metagpt/provider/req_resp_const.py | 57 ++++++++++++++++---
tests/metagpt/provider/test_base_llm.py | 6 +-
tests/metagpt/provider/test_fireworks_llm.py | 26 ++++-----
.../provider/test_google_gemini_api.py | 20 +++----
tests/metagpt/provider/test_ollama_api.py | 12 ++--
tests/metagpt/provider/test_open_llm_api.py | 28 ++++-----
tests/metagpt/provider/test_qianfan_api.py | 39 +++++++++++--
tests/metagpt/provider/test_spark_api.py | 12 ++--
tests/metagpt/provider/test_zhipuai_api.py | 23 +++-----
11 files changed, 153 insertions(+), 80 deletions(-)
diff --git a/metagpt/provider/qianfan_api.py b/metagpt/provider/qianfan_api.py
index fbbff7085..6f94b9cea 100644
--- a/metagpt/provider/qianfan_api.py
+++ b/metagpt/provider/qianfan_api.py
@@ -5,6 +5,7 @@ import copy
import os
import qianfan
+from qianfan import ChatCompletion
from qianfan.resources.typing import JsonBody
from tenacity import (
after_log,
@@ -78,7 +79,7 @@ class QianFanLLM(BaseLLM):
# self deployed model on the cloud not to calculate usage, it charges resource pool rental fee
self.calc_usage = self.config.calc_usage and self.config.endpoint is None
- self.aclient = qianfan.ChatCompletion()
+ self.aclient: ChatCompletion = qianfan.ChatCompletion()
def _const_kwargs(self, messages: list[dict], stream: bool = False) -> dict:
kwargs = {
diff --git a/tests/metagpt/provider/mock_llm_config.py b/tests/metagpt/provider/mock_llm_config.py
index 21780f914..e61e32e8b 100644
--- a/tests/metagpt/provider/mock_llm_config.py
+++ b/tests/metagpt/provider/mock_llm_config.py
@@ -52,3 +52,10 @@ mock_llm_config_spark = LLMConfig(
domain="generalv2",
base_url="wss://spark-api.xf-yun.com/v3.1/chat",
)
+
+mock_llm_config_qianfan = LLMConfig(
+ api_type="qianfan",
+ access_key="xxx",
+ secret_key="xxx",
+ model="ERNIE-Bot-turbo"
+)
diff --git a/tests/metagpt/provider/req_resp_const.py b/tests/metagpt/provider/req_resp_const.py
index a3a7a363c..20d8e0914 100644
--- a/tests/metagpt/provider/req_resp_const.py
+++ b/tests/metagpt/provider/req_resp_const.py
@@ -2,6 +2,7 @@
# -*- coding: utf-8 -*-
# @Desc : default request & response data for provider unittest
+from typing import Dict
from openai.types.chat.chat_completion import (
ChatCompletion,
ChatCompletionMessage,
@@ -11,6 +12,9 @@ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
from openai.types.chat.chat_completion_chunk import Choice as AChoice
from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.completion_usage import CompletionUsage
+from qianfan.resources.typing import QfResponse, default_field
+
+from metagpt.provider.base_llm import BaseLLM
prompt = "who are you?"
messages = [{"role": "user", "content": prompt}]
@@ -20,14 +24,14 @@ default_resp_cont = resp_cont_tmpl.format(name="GPT")
# part of whole ChatCompletion of openai like structure
-def get_part_chat_completion(llm_name: str) -> dict:
+def get_part_chat_completion(name: str) -> dict:
part_chat_completion = {
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
- "content": resp_cont_tmpl.format(name=llm_name),
+ "content": resp_cont_tmpl.format(name=name),
},
"finish_reason": "stop",
}
@@ -37,7 +41,7 @@ def get_part_chat_completion(llm_name: str) -> dict:
return part_chat_completion
-def get_openai_chat_completion(llm_name: str) -> ChatCompletion:
+def get_openai_chat_completion(name: str) -> ChatCompletion:
openai_chat_completion = ChatCompletion(
id="cmpl-a6652c1bb181caae8dd19ad8",
model="xx/xxx",
@@ -47,7 +51,7 @@ def get_openai_chat_completion(llm_name: str) -> ChatCompletion:
Choice(
finish_reason="stop",
index=0,
- message=ChatCompletionMessage(role="assistant", content=resp_cont_tmpl.format(name=llm_name)),
+ message=ChatCompletionMessage(role="assistant", content=resp_cont_tmpl.format(name=name)),
logprobs=None,
)
],
@@ -56,7 +60,7 @@ def get_openai_chat_completion(llm_name: str) -> ChatCompletion:
return openai_chat_completion
-def get_openai_chat_completion_chunk(llm_name: str, usage_as_dict: bool = False) -> ChatCompletionChunk:
+def get_openai_chat_completion_chunk(name: str, usage_as_dict: bool = False) -> ChatCompletionChunk:
usage = CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202)
usage = usage if not usage_as_dict else usage.model_dump()
openai_chat_completion_chunk = ChatCompletionChunk(
@@ -66,7 +70,7 @@ def get_openai_chat_completion_chunk(llm_name: str, usage_as_dict: bool = False)
created=1703300855,
choices=[
AChoice(
- delta=ChoiceDelta(role="assistant", content=resp_cont_tmpl.format(name=llm_name)),
+ delta=ChoiceDelta(role="assistant", content=resp_cont_tmpl.format(name=name)),
finish_reason="stop",
index=0,
logprobs=None,
@@ -76,5 +80,44 @@ def get_openai_chat_completion_chunk(llm_name: str, usage_as_dict: bool = False)
)
return openai_chat_completion_chunk
-
+# For gemini
gemini_messages = [{"role": "user", "parts": prompt}]
+
+
+# For QianFan
+qf_jsonbody_dict = {
+ "id": "as-4v1h587fyv",
+ "object": "chat.completion",
+ "created": 1695021339,
+ "result": "",
+ "is_truncated": False,
+ "need_clear_history": False,
+ "usage": {
+ "prompt_tokens": 7,
+ "completion_tokens": 15,
+ "total_tokens": 22
+ }
+}
+
+
+def get_qianfan_response(name: str) -> QfResponse:
+ qf_jsonbody_dict["result"] = resp_cont_tmpl.format(name=name)
+ return QfResponse(
+ code=200,
+ body=qf_jsonbody_dict
+ )
+
+
+# For llm general chat functions call
+async def llm_general_chat_funcs_test(llm: BaseLLM, prompt: str, messages: list[dict], resp_cont: str):
+ resp = await llm.aask(prompt, stream=False)
+ assert resp == resp_cont
+
+ resp = await llm.aask(prompt)
+ assert resp == resp_cont
+
+ resp = await llm.acompletion_text(messages, stream=False)
+ assert resp == resp_cont
+
+ resp = await llm.acompletion_text(messages, stream=True)
+ assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_base_llm.py b/tests/metagpt/provider/test_base_llm.py
index 0babd6d5f..cf44343bc 100644
--- a/tests/metagpt/provider/test_base_llm.py
+++ b/tests/metagpt/provider/test_base_llm.py
@@ -17,7 +17,7 @@ from tests.metagpt.provider.req_resp_const import (
prompt,
)
-llm_name = "GPT"
+name = "GPT"
class MockBaseLLM(BaseLLM):
@@ -25,10 +25,10 @@ class MockBaseLLM(BaseLLM):
pass
def completion(self, messages: list[dict], timeout=3):
- return get_part_chat_completion(llm_name)
+ return get_part_chat_completion(name)
async def acompletion(self, messages: list[dict], timeout=3):
- return get_part_chat_completion(llm_name)
+ return get_part_chat_completion(name)
async def acompletion_text(self, messages: list[dict], stream=False, timeout=3) -> str:
return default_resp_cont
diff --git a/tests/metagpt/provider/test_fireworks_llm.py b/tests/metagpt/provider/test_fireworks_llm.py
index 834f6305f..e28f7500b 100644
--- a/tests/metagpt/provider/test_fireworks_llm.py
+++ b/tests/metagpt/provider/test_fireworks_llm.py
@@ -21,10 +21,10 @@ from tests.metagpt.provider.req_resp_const import (
resp_cont_tmpl,
)
-llm_name = "fireworks"
-resp_cont = resp_cont_tmpl.format(name=llm_name)
-default_resp = get_openai_chat_completion(llm_name)
-default_resp_chunk = get_openai_chat_completion_chunk(llm_name, usage_as_dict=True)
+name = "fireworks"
+resp_cont = resp_cont_tmpl.format(name=name)
+default_resp = get_openai_chat_completion(name)
+default_resp_chunk = get_openai_chat_completion_chunk(name, usage_as_dict=True)
def test_fireworks_costmanager():
@@ -57,27 +57,27 @@ async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs)
async def test_fireworks_acompletion(mocker):
mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
- fireworks_gpt = FireworksLLM(mock_llm_config)
- fireworks_gpt.model = "llama-v2-13b-chat"
+ fireworks_llm = FireworksLLM(mock_llm_config)
+ fireworks_llm.model = "llama-v2-13b-chat"
- fireworks_gpt._update_costs(
+ fireworks_llm._update_costs(
usage=CompletionUsage(prompt_tokens=500000, completion_tokens=500000, total_tokens=1000000)
)
- assert fireworks_gpt.get_costs() == Costs(
+ assert fireworks_llm.get_costs() == Costs(
total_prompt_tokens=500000, total_completion_tokens=500000, total_cost=0.5, total_budget=0
)
- resp = await fireworks_gpt.acompletion(messages)
+ resp = await fireworks_llm.acompletion(messages)
assert resp.choices[0].message.content in resp_cont
- resp = await fireworks_gpt.aask(prompt, stream=False)
+ resp = await fireworks_llm.aask(prompt, stream=False)
assert resp == resp_cont
- resp = await fireworks_gpt.acompletion_text(messages, stream=False)
+ resp = await fireworks_llm.acompletion_text(messages, stream=False)
assert resp == resp_cont
- resp = await fireworks_gpt.acompletion_text(messages, stream=True)
+ resp = await fireworks_llm.acompletion_text(messages, stream=True)
assert resp == resp_cont
- resp = await fireworks_gpt.aask(prompt)
+ resp = await fireworks_llm.aask(prompt)
assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_google_gemini_api.py b/tests/metagpt/provider/test_google_gemini_api.py
index ad0c7bbfe..dae9d123b 100644
--- a/tests/metagpt/provider/test_google_gemini_api.py
+++ b/tests/metagpt/provider/test_google_gemini_api.py
@@ -63,28 +63,28 @@ async def test_gemini_acompletion(mocker):
mock_gemini_generate_content_async,
)
- gemini_gpt = GeminiLLM(mock_llm_config)
+ gemini_llm = GeminiLLM(mock_llm_config)
- assert gemini_gpt._user_msg(prompt) == {"role": "user", "parts": [prompt]}
- assert gemini_gpt._assistant_msg(prompt) == {"role": "model", "parts": [prompt]}
+ assert gemini_llm._user_msg(prompt) == {"role": "user", "parts": [prompt]}
+ assert gemini_llm._assistant_msg(prompt) == {"role": "model", "parts": [prompt]}
- usage = gemini_gpt.get_usage(gemini_messages, resp_cont)
+ usage = gemini_llm.get_usage(gemini_messages, resp_cont)
assert usage == {"prompt_tokens": 20, "completion_tokens": 20}
- resp = gemini_gpt.completion(gemini_messages)
+ resp = gemini_llm.completion(gemini_messages)
assert resp == default_resp
- resp = await gemini_gpt.acompletion(gemini_messages)
+ resp = await gemini_llm.acompletion(gemini_messages)
assert resp.text == default_resp.text
- resp = await gemini_gpt.aask(prompt, stream=False)
+ resp = await gemini_llm.aask(prompt, stream=False)
assert resp == resp_cont
- resp = await gemini_gpt.acompletion_text(gemini_messages, stream=False)
+ resp = await gemini_llm.acompletion_text(gemini_messages, stream=False)
assert resp == resp_cont
- resp = await gemini_gpt.acompletion_text(gemini_messages, stream=True)
+ resp = await gemini_llm.acompletion_text(gemini_messages, stream=True)
assert resp == resp_cont
- resp = await gemini_gpt.aask(prompt)
+ resp = await gemini_llm.aask(prompt)
assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_ollama_api.py b/tests/metagpt/provider/test_ollama_api.py
index 8e2625e35..01d53251c 100644
--- a/tests/metagpt/provider/test_ollama_api.py
+++ b/tests/metagpt/provider/test_ollama_api.py
@@ -39,19 +39,19 @@ async def mock_ollama_arequest(self, stream: bool = False, **kwargs) -> Tuple[An
async def test_gemini_acompletion(mocker):
mocker.patch("metagpt.provider.general_api_requestor.GeneralAPIRequestor.arequest", mock_ollama_arequest)
- ollama_gpt = OllamaLLM(mock_llm_config)
+ ollama_llm = OllamaLLM(mock_llm_config)
- resp = await ollama_gpt.acompletion(messages)
+ resp = await ollama_llm.acompletion(messages)
assert resp["message"]["content"] == default_resp["message"]["content"]
- resp = await ollama_gpt.aask(prompt, stream=False)
+ resp = await ollama_llm.aask(prompt, stream=False)
assert resp == resp_cont
- resp = await ollama_gpt.acompletion_text(messages, stream=False)
+ resp = await ollama_llm.acompletion_text(messages, stream=False)
assert resp == resp_cont
- resp = await ollama_gpt.acompletion_text(messages, stream=True)
+ resp = await ollama_llm.acompletion_text(messages, stream=True)
assert resp == resp_cont
- resp = await ollama_gpt.aask(prompt)
+ resp = await ollama_llm.aask(prompt)
assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_open_llm_api.py b/tests/metagpt/provider/test_open_llm_api.py
index 5b8a506e9..b2e759d06 100644
--- a/tests/metagpt/provider/test_open_llm_api.py
+++ b/tests/metagpt/provider/test_open_llm_api.py
@@ -17,11 +17,11 @@ from tests.metagpt.provider.req_resp_const import (
resp_cont_tmpl,
)
-llm_name = "llama2-7b"
-resp_cont = resp_cont_tmpl.format(name=llm_name)
-default_resp = get_openai_chat_completion(llm_name)
+name = "llama2-7b"
+resp_cont = resp_cont_tmpl.format(name=name)
+default_resp = get_openai_chat_completion(name)
-default_resp_chunk = get_openai_chat_completion_chunk(llm_name)
+default_resp_chunk = get_openai_chat_completion_chunk(name)
async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs) -> ChatCompletionChunk:
@@ -40,26 +40,26 @@ async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs)
async def test_openllm_acompletion(mocker):
mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
- openllm_gpt = OpenLLM(mock_llm_config)
- openllm_gpt.model = "llama-v2-13b-chat"
+ openllm_llm = OpenLLM(mock_llm_config)
+ openllm_llm.model = "llama-v2-13b-chat"
- openllm_gpt.cost_manager = CostManager()
- openllm_gpt._update_costs(usage=CompletionUsage(prompt_tokens=100, completion_tokens=100, total_tokens=200))
- assert openllm_gpt.get_costs() == Costs(
+ openllm_llm.cost_manager = CostManager()
+ openllm_llm._update_costs(usage=CompletionUsage(prompt_tokens=100, completion_tokens=100, total_tokens=200))
+ assert openllm_llm.get_costs() == Costs(
total_prompt_tokens=100, total_completion_tokens=100, total_cost=0, total_budget=0
)
- resp = await openllm_gpt.acompletion(messages)
+ resp = await openllm_llm.acompletion(messages)
assert resp.choices[0].message.content in resp_cont
- resp = await openllm_gpt.aask(prompt, stream=False)
+ resp = await openllm_llm.aask(prompt, stream=False)
assert resp == resp_cont
- resp = await openllm_gpt.acompletion_text(messages, stream=False)
+ resp = await openllm_llm.acompletion_text(messages, stream=False)
assert resp == resp_cont
- resp = await openllm_gpt.acompletion_text(messages, stream=True)
+ resp = await openllm_llm.acompletion_text(messages, stream=True)
assert resp == resp_cont
- resp = await openllm_gpt.aask(prompt)
+ resp = await openllm_llm.aask(prompt)
assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_qianfan_api.py b/tests/metagpt/provider/test_qianfan_api.py
index 76271b1e8..30ac06911 100644
--- a/tests/metagpt/provider/test_qianfan_api.py
+++ b/tests/metagpt/provider/test_qianfan_api.py
@@ -2,14 +2,45 @@
# -*- coding: utf-8 -*-
# @Desc : the unittest of qianfan api
+from typing import Dict, Union, AsyncIterator
import pytest
+from qianfan.resources.typing import JsonBody, QfResponse
+
from metagpt.provider.qianfan_api import QianFanLLM
-from tests.metagpt.provider.req_resp_const import prompt, messages, resp_cont_tmpl
+from tests.metagpt.provider.mock_llm_config import mock_llm_config_qianfan
+from tests.metagpt.provider.req_resp_const import resp_cont_tmpl, prompt, messages, llm_general_chat_funcs_test, get_qianfan_response
+
+name = "ERNIE-Bot-turbo"
+resp_cont = resp_cont_tmpl.format(name=name)
-resp_cont = resp_cont_tmpl.format(name="ERNIE-Bot-turbo")
+def mock_qianfan_do(self, messages: list[dict], model: str, stream: bool = False, system: str = None) -> QfResponse:
+ return get_qianfan_response(name=name)
-def test_qianfan_acompletion(mocker):
- assert True, True
+async def mock_qianfan_ado(self, messages: list[dict], model: str, stream: bool = True, system: str = None) -> Union[QfResponse, AsyncIterator[QfResponse]]:
+ resps = [get_qianfan_response(name=name)]
+ if stream:
+ async def aresp_iterator(resps: list[JsonBody]):
+ for resp in resps:
+ yield resp
+ return aresp_iterator(resps)
+ else:
+ return resps[0]
+
+
+@pytest.mark.asyncio
+async def test_qianfan_acompletion(mocker):
+ mocker.patch("qianfan.resources.llm.chat_completion.ChatCompletion.do", mock_qianfan_do)
+ mocker.patch("qianfan.resources.llm.chat_completion.ChatCompletion.ado", mock_qianfan_ado)
+
+ qianfan_llm = QianFanLLM(mock_llm_config_qianfan)
+
+ resp = qianfan_llm.completion(messages)
+ assert resp.get("result") == resp_cont
+
+ resp = await qianfan_llm.acompletion(messages)
+ assert resp.get("result") == resp_cont
+
+ await llm_general_chat_funcs_test(qianfan_llm, prompt, messages, resp_cont)
diff --git a/tests/metagpt/provider/test_spark_api.py b/tests/metagpt/provider/test_spark_api.py
index 32a839393..8aa8bc7a8 100644
--- a/tests/metagpt/provider/test_spark_api.py
+++ b/tests/metagpt/provider/test_spark_api.py
@@ -50,19 +50,19 @@ async def test_spark_aask(mocker):
async def test_spark_acompletion(mocker):
mocker.patch("metagpt.provider.spark_api.GetMessageFromWeb.run", mock_spark_get_msg_from_web_run)
- spark_gpt = SparkLLM(mock_llm_config)
+ spark_llm = SparkLLM(mock_llm_config)
- resp = await spark_gpt.acompletion([])
+ resp = await spark_llm.acompletion([])
assert resp == resp_cont
- resp = await spark_gpt.aask(prompt, stream=False)
+ resp = await spark_llm.aask(prompt, stream=False)
assert resp == resp_cont
- resp = await spark_gpt.acompletion_text([], stream=False)
+ resp = await spark_llm.acompletion_text([], stream=False)
assert resp == resp_cont
- resp = await spark_gpt.acompletion_text([], stream=True)
+ resp = await spark_llm.acompletion_text([], stream=True)
assert resp == resp_cont
- resp = await spark_gpt.aask(prompt)
+ resp = await spark_llm.aask(prompt)
assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_zhipuai_api.py b/tests/metagpt/provider/test_zhipuai_api.py
index 064562bff..3dada367c 100644
--- a/tests/metagpt/provider/test_zhipuai_api.py
+++ b/tests/metagpt/provider/test_zhipuai_api.py
@@ -11,11 +11,12 @@ from tests.metagpt.provider.req_resp_const import (
messages,
prompt,
resp_cont_tmpl,
+ llm_general_chat_funcs_test
)
-llm_name = "ChatGLM-4"
-resp_cont = resp_cont_tmpl.format(name=llm_name)
-default_resp = get_part_chat_completion(llm_name)
+name = "ChatGLM-4"
+resp_cont = resp_cont_tmpl.format(name=name)
+default_resp = get_part_chat_completion(name)
async def mock_zhipuai_acreate_stream(**kwargs):
@@ -47,22 +48,12 @@ async def test_zhipuai_acompletion(mocker):
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.acreate", mock_zhipuai_acreate)
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.acreate_stream", mock_zhipuai_acreate_stream)
- zhipu_gpt = ZhiPuAILLM(mock_llm_config_zhipu)
+ zhipu_llm = ZhiPuAILLM(mock_llm_config_zhipu)
- resp = await zhipu_gpt.acompletion(messages)
+ resp = await zhipu_llm.acompletion(messages)
assert resp["choices"][0]["message"]["content"] == resp_cont
- resp = await zhipu_gpt.aask(prompt, stream=False)
- assert resp == resp_cont
-
- resp = await zhipu_gpt.acompletion_text(messages, stream=False)
- assert resp == resp_cont
-
- resp = await zhipu_gpt.acompletion_text(messages, stream=True)
- assert resp == resp_cont
-
- resp = await zhipu_gpt.aask(prompt)
- assert resp == resp_cont
+ await llm_general_chat_funcs_test(zhipu_llm, prompt, messages, resp_cont)
def test_zhipuai_proxy():
From 997e25e97d291d83e0fc587abde46bf383308a59 Mon Sep 17 00:00:00 2001
From: better629
Date: Wed, 7 Feb 2024 18:42:22 +0800
Subject: [PATCH 026/104] simplify provider ut code
---
tests/metagpt/provider/mock_llm_config.py | 7 +------
tests/metagpt/provider/req_resp_const.py | 16 +++++-----------
tests/metagpt/provider/test_fireworks_llm.py | 13 ++-----------
.../metagpt/provider/test_google_gemini_api.py | 13 ++-----------
tests/metagpt/provider/test_ollama_api.py | 16 +++++++---------
tests/metagpt/provider/test_open_llm_api.py | 13 ++-----------
tests/metagpt/provider/test_qianfan_api.py | 18 ++++++++++++++----
tests/metagpt/provider/test_spark_api.py | 18 ++++++------------
tests/metagpt/provider/test_zhipuai_api.py | 2 +-
9 files changed, 40 insertions(+), 76 deletions(-)
diff --git a/tests/metagpt/provider/mock_llm_config.py b/tests/metagpt/provider/mock_llm_config.py
index e61e32e8b..e0afaa51e 100644
--- a/tests/metagpt/provider/mock_llm_config.py
+++ b/tests/metagpt/provider/mock_llm_config.py
@@ -53,9 +53,4 @@ mock_llm_config_spark = LLMConfig(
base_url="wss://spark-api.xf-yun.com/v3.1/chat",
)
-mock_llm_config_qianfan = LLMConfig(
- api_type="qianfan",
- access_key="xxx",
- secret_key="xxx",
- model="ERNIE-Bot-turbo"
-)
+mock_llm_config_qianfan = LLMConfig(api_type="qianfan", access_key="xxx", secret_key="xxx", model="ERNIE-Bot-turbo")
diff --git a/tests/metagpt/provider/req_resp_const.py b/tests/metagpt/provider/req_resp_const.py
index 20d8e0914..73939e1c6 100644
--- a/tests/metagpt/provider/req_resp_const.py
+++ b/tests/metagpt/provider/req_resp_const.py
@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*-
# @Desc : default request & response data for provider unittest
-from typing import Dict
+
from openai.types.chat.chat_completion import (
ChatCompletion,
ChatCompletionMessage,
@@ -12,7 +12,7 @@ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
from openai.types.chat.chat_completion_chunk import Choice as AChoice
from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.completion_usage import CompletionUsage
-from qianfan.resources.typing import QfResponse, default_field
+from qianfan.resources.typing import QfResponse
from metagpt.provider.base_llm import BaseLLM
@@ -80,6 +80,7 @@ def get_openai_chat_completion_chunk(name: str, usage_as_dict: bool = False) ->
)
return openai_chat_completion_chunk
+
# For gemini
gemini_messages = [{"role": "user", "parts": prompt}]
@@ -92,20 +93,13 @@ qf_jsonbody_dict = {
"result": "",
"is_truncated": False,
"need_clear_history": False,
- "usage": {
- "prompt_tokens": 7,
- "completion_tokens": 15,
- "total_tokens": 22
- }
+ "usage": {"prompt_tokens": 7, "completion_tokens": 15, "total_tokens": 22},
}
def get_qianfan_response(name: str) -> QfResponse:
qf_jsonbody_dict["result"] = resp_cont_tmpl.format(name=name)
- return QfResponse(
- code=200,
- body=qf_jsonbody_dict
- )
+ return QfResponse(code=200, body=qf_jsonbody_dict)
# For llm general chat functions call
diff --git a/tests/metagpt/provider/test_fireworks_llm.py b/tests/metagpt/provider/test_fireworks_llm.py
index e28f7500b..1c1aa9caa 100644
--- a/tests/metagpt/provider/test_fireworks_llm.py
+++ b/tests/metagpt/provider/test_fireworks_llm.py
@@ -16,6 +16,7 @@ from tests.metagpt.provider.mock_llm_config import mock_llm_config
from tests.metagpt.provider.req_resp_const import (
get_openai_chat_completion,
get_openai_chat_completion_chunk,
+ llm_general_chat_funcs_test,
messages,
prompt,
resp_cont_tmpl,
@@ -70,14 +71,4 @@ async def test_fireworks_acompletion(mocker):
resp = await fireworks_llm.acompletion(messages)
assert resp.choices[0].message.content in resp_cont
- resp = await fireworks_llm.aask(prompt, stream=False)
- assert resp == resp_cont
-
- resp = await fireworks_llm.acompletion_text(messages, stream=False)
- assert resp == resp_cont
-
- resp = await fireworks_llm.acompletion_text(messages, stream=True)
- assert resp == resp_cont
-
- resp = await fireworks_llm.aask(prompt)
- assert resp == resp_cont
+ await llm_general_chat_funcs_test(fireworks_llm, prompt, messages, resp_cont)
diff --git a/tests/metagpt/provider/test_google_gemini_api.py b/tests/metagpt/provider/test_google_gemini_api.py
index dae9d123b..50c15ee19 100644
--- a/tests/metagpt/provider/test_google_gemini_api.py
+++ b/tests/metagpt/provider/test_google_gemini_api.py
@@ -13,6 +13,7 @@ from metagpt.provider.google_gemini_api import GeminiLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config
from tests.metagpt.provider.req_resp_const import (
gemini_messages,
+ llm_general_chat_funcs_test,
prompt,
resp_cont_tmpl,
)
@@ -77,14 +78,4 @@ async def test_gemini_acompletion(mocker):
resp = await gemini_llm.acompletion(gemini_messages)
assert resp.text == default_resp.text
- resp = await gemini_llm.aask(prompt, stream=False)
- assert resp == resp_cont
-
- resp = await gemini_llm.acompletion_text(gemini_messages, stream=False)
- assert resp == resp_cont
-
- resp = await gemini_llm.acompletion_text(gemini_messages, stream=True)
- assert resp == resp_cont
-
- resp = await gemini_llm.aask(prompt)
- assert resp == resp_cont
+ await llm_general_chat_funcs_test(gemini_llm, prompt, gemini_messages, resp_cont)
diff --git a/tests/metagpt/provider/test_ollama_api.py b/tests/metagpt/provider/test_ollama_api.py
index 01d53251c..af2e929e9 100644
--- a/tests/metagpt/provider/test_ollama_api.py
+++ b/tests/metagpt/provider/test_ollama_api.py
@@ -9,7 +9,12 @@ import pytest
from metagpt.provider.ollama_api import OllamaLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config
-from tests.metagpt.provider.req_resp_const import messages, prompt, resp_cont_tmpl
+from tests.metagpt.provider.req_resp_const import (
+ llm_general_chat_funcs_test,
+ messages,
+ prompt,
+ resp_cont_tmpl,
+)
resp_cont = resp_cont_tmpl.format(name="ollama")
default_resp = {"message": {"role": "assistant", "content": resp_cont}}
@@ -47,11 +52,4 @@ async def test_gemini_acompletion(mocker):
resp = await ollama_llm.aask(prompt, stream=False)
assert resp == resp_cont
- resp = await ollama_llm.acompletion_text(messages, stream=False)
- assert resp == resp_cont
-
- resp = await ollama_llm.acompletion_text(messages, stream=True)
- assert resp == resp_cont
-
- resp = await ollama_llm.aask(prompt)
- assert resp == resp_cont
+ await llm_general_chat_funcs_test(ollama_llm, prompt, messages, resp_cont)
diff --git a/tests/metagpt/provider/test_open_llm_api.py b/tests/metagpt/provider/test_open_llm_api.py
index b2e759d06..aa38b95a6 100644
--- a/tests/metagpt/provider/test_open_llm_api.py
+++ b/tests/metagpt/provider/test_open_llm_api.py
@@ -12,6 +12,7 @@ from tests.metagpt.provider.mock_llm_config import mock_llm_config
from tests.metagpt.provider.req_resp_const import (
get_openai_chat_completion,
get_openai_chat_completion_chunk,
+ llm_general_chat_funcs_test,
messages,
prompt,
resp_cont_tmpl,
@@ -52,14 +53,4 @@ async def test_openllm_acompletion(mocker):
resp = await openllm_llm.acompletion(messages)
assert resp.choices[0].message.content in resp_cont
- resp = await openllm_llm.aask(prompt, stream=False)
- assert resp == resp_cont
-
- resp = await openllm_llm.acompletion_text(messages, stream=False)
- assert resp == resp_cont
-
- resp = await openllm_llm.acompletion_text(messages, stream=True)
- assert resp == resp_cont
-
- resp = await openllm_llm.aask(prompt)
- assert resp == resp_cont
+ await llm_general_chat_funcs_test(openllm_llm, prompt, messages, resp_cont)
diff --git a/tests/metagpt/provider/test_qianfan_api.py b/tests/metagpt/provider/test_qianfan_api.py
index 30ac06911..28341425c 100644
--- a/tests/metagpt/provider/test_qianfan_api.py
+++ b/tests/metagpt/provider/test_qianfan_api.py
@@ -2,14 +2,20 @@
# -*- coding: utf-8 -*-
# @Desc : the unittest of qianfan api
-from typing import Dict, Union, AsyncIterator
-import pytest
+from typing import AsyncIterator, Union
+import pytest
from qianfan.resources.typing import JsonBody, QfResponse
from metagpt.provider.qianfan_api import QianFanLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_qianfan
-from tests.metagpt.provider.req_resp_const import resp_cont_tmpl, prompt, messages, llm_general_chat_funcs_test, get_qianfan_response
+from tests.metagpt.provider.req_resp_const import (
+ get_qianfan_response,
+ llm_general_chat_funcs_test,
+ messages,
+ prompt,
+ resp_cont_tmpl,
+)
name = "ERNIE-Bot-turbo"
resp_cont = resp_cont_tmpl.format(name=name)
@@ -19,12 +25,16 @@ def mock_qianfan_do(self, messages: list[dict], model: str, stream: bool = False
return get_qianfan_response(name=name)
-async def mock_qianfan_ado(self, messages: list[dict], model: str, stream: bool = True, system: str = None) -> Union[QfResponse, AsyncIterator[QfResponse]]:
+async def mock_qianfan_ado(
+ self, messages: list[dict], model: str, stream: bool = True, system: str = None
+) -> Union[QfResponse, AsyncIterator[QfResponse]]:
resps = [get_qianfan_response(name=name)]
if stream:
+
async def aresp_iterator(resps: list[JsonBody]):
for resp in resps:
yield resp
+
return aresp_iterator(resps)
else:
return resps[0]
diff --git a/tests/metagpt/provider/test_spark_api.py b/tests/metagpt/provider/test_spark_api.py
index 8aa8bc7a8..9c278267d 100644
--- a/tests/metagpt/provider/test_spark_api.py
+++ b/tests/metagpt/provider/test_spark_api.py
@@ -9,7 +9,11 @@ from tests.metagpt.provider.mock_llm_config import (
mock_llm_config,
mock_llm_config_spark,
)
-from tests.metagpt.provider.req_resp_const import prompt, resp_cont_tmpl
+from tests.metagpt.provider.req_resp_const import (
+ llm_general_chat_funcs_test,
+ prompt,
+ resp_cont_tmpl,
+)
resp_cont = resp_cont_tmpl.format(name="Spark")
@@ -55,14 +59,4 @@ async def test_spark_acompletion(mocker):
resp = await spark_llm.acompletion([])
assert resp == resp_cont
- resp = await spark_llm.aask(prompt, stream=False)
- assert resp == resp_cont
-
- resp = await spark_llm.acompletion_text([], stream=False)
- assert resp == resp_cont
-
- resp = await spark_llm.acompletion_text([], stream=True)
- assert resp == resp_cont
-
- resp = await spark_llm.aask(prompt)
- assert resp == resp_cont
+ await llm_general_chat_funcs_test(spark_llm, prompt, prompt, resp_cont)
diff --git a/tests/metagpt/provider/test_zhipuai_api.py b/tests/metagpt/provider/test_zhipuai_api.py
index 3dada367c..8ec9ab4f9 100644
--- a/tests/metagpt/provider/test_zhipuai_api.py
+++ b/tests/metagpt/provider/test_zhipuai_api.py
@@ -8,10 +8,10 @@ from metagpt.provider.zhipuai_api import ZhiPuAILLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_zhipu
from tests.metagpt.provider.req_resp_const import (
get_part_chat_completion,
+ llm_general_chat_funcs_test,
messages,
prompt,
resp_cont_tmpl,
- llm_general_chat_funcs_test
)
name = "ChatGLM-4"
From eb1e1b9ef22fe58720dfa71f01b812eecaf32b87 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Wed, 7 Feb 2024 21:54:40 +0800
Subject: [PATCH 027/104] mv examples
---
examples/ci/{data_analysis.py => data_visualization.py} | 0
examples/{ => ci}/email_summary.py | 0
examples/ci/machine_learning.py | 0
examples/{ => ci}/ml_engineer_with_tools.py | 6 +++---
4 files changed, 3 insertions(+), 3 deletions(-)
rename examples/ci/{data_analysis.py => data_visualization.py} (100%)
rename examples/{ => ci}/email_summary.py (100%)
create mode 100644 examples/ci/machine_learning.py
rename examples/{ => ci}/ml_engineer_with_tools.py (74%)
diff --git a/examples/ci/data_analysis.py b/examples/ci/data_visualization.py
similarity index 100%
rename from examples/ci/data_analysis.py
rename to examples/ci/data_visualization.py
diff --git a/examples/email_summary.py b/examples/ci/email_summary.py
similarity index 100%
rename from examples/email_summary.py
rename to examples/ci/email_summary.py
diff --git a/examples/ci/machine_learning.py b/examples/ci/machine_learning.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/ml_engineer_with_tools.py b/examples/ci/ml_engineer_with_tools.py
similarity index 74%
rename from examples/ml_engineer_with_tools.py
rename to examples/ci/ml_engineer_with_tools.py
index 1c90f2946..1c73a1dd0 100644
--- a/examples/ml_engineer_with_tools.py
+++ b/examples/ci/ml_engineer_with_tools.py
@@ -9,8 +9,8 @@ async def main(requirement: str, auto_run: bool = True, use_tools: bool = True):
if __name__ == "__main__":
- data_path = "your_path_to_icr/icr-identify-age-related-conditions" # 替换 'your_path_to_icr' 为实际数据存放的路径
- train_path = f"{data_path}/your_train_data.csv" # 替换 'your_train_data.csv' 为你的训练数据文件名
- eval_path = f"{data_path}/your_eval_data.csv" # 替换 'your_eval_data.csv' 为你的评估数据文件名
+ data_path = "your_path_to_icr/icr-identify-age-related-conditions"
+ train_path = f"{data_path}/your_train_data.csv"
+ eval_path = f"{data_path}/your_eval_data.csv"
requirement = f"This is a medical dataset with over fifty anonymized health characteristics linked to three age-related conditions. Your goal is to predict whether a subject has or has not been diagnosed with one of these conditions.The target column is Class. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report f1 score on the eval data. Train data path: {train_path}, eval data path:{eval_path}."
asyncio.run(main(requirement))
From c7ee54ace108f24571a5434ded2452a102be7c86 Mon Sep 17 00:00:00 2001
From: better629
Date: Wed, 7 Feb 2024 22:50:30 +0800
Subject: [PATCH 028/104] add dashscope including QWEN and its ut code
---
examples/llm_hello_world.py | 2 +-
metagpt/configs/llm_config.py | 1 +
metagpt/provider/__init__.py | 2 +
metagpt/provider/dashscope_api.py | 246 +++++++++++++++++++
metagpt/utils/token_counter.py | 29 +++
requirements.txt | 1 +
tests/metagpt/provider/mock_llm_config.py | 2 +
tests/metagpt/provider/req_resp_const.py | 28 +++
tests/metagpt/provider/test_dashscope_api.py | 61 +++++
9 files changed, 371 insertions(+), 1 deletion(-)
create mode 100644 metagpt/provider/dashscope_api.py
create mode 100644 tests/metagpt/provider/test_dashscope_api.py
diff --git a/examples/llm_hello_world.py b/examples/llm_hello_world.py
index e22edbdf2..9340f7d5a 100644
--- a/examples/llm_hello_world.py
+++ b/examples/llm_hello_world.py
@@ -21,7 +21,7 @@ async def main():
logger.info(
await llm.aask(
- "who are you", system_msgs=["act as a robot, answer 'I'am robot' if the question is 'who are you'"]
+ "who are you", system_msgs=["act as a robot, just answer 'I'am robot' if the question is 'who are you'"]
)
)
diff --git a/metagpt/configs/llm_config.py b/metagpt/configs/llm_config.py
index 1b05b5270..36f5d7ae7 100644
--- a/metagpt/configs/llm_config.py
+++ b/metagpt/configs/llm_config.py
@@ -25,6 +25,7 @@ class LLMType(Enum):
AZURE = "azure"
OLLAMA = "ollama"
QIANFAN = "qianfan" # Baidu BCE
+ DASHSCOPE = "dashscope" # Aliyun LingJi DashScope
def __missing__(self, key):
return self.OPENAI
diff --git a/metagpt/provider/__init__.py b/metagpt/provider/__init__.py
index 8c0aab836..44e6d3f3b 100644
--- a/metagpt/provider/__init__.py
+++ b/metagpt/provider/__init__.py
@@ -17,6 +17,7 @@ from metagpt.provider.metagpt_api import MetaGPTLLM
from metagpt.provider.human_provider import HumanProvider
from metagpt.provider.spark_api import SparkLLM
from metagpt.provider.qianfan_api import QianFanLLM
+from metagpt.provider.dashscope_api import DashScopeLLM
__all__ = [
"FireworksLLM",
@@ -30,4 +31,5 @@ __all__ = [
"HumanProvider",
"SparkLLM",
"QianFanLLM",
+ "DashScopeLLM",
]
diff --git a/metagpt/provider/dashscope_api.py b/metagpt/provider/dashscope_api.py
new file mode 100644
index 000000000..58031e452
--- /dev/null
+++ b/metagpt/provider/dashscope_api.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Desc :
+
+import json
+from http import HTTPStatus
+from typing import Any, AsyncGenerator, Dict, List, Union
+
+import dashscope
+from dashscope.aigc.generation import Generation
+from dashscope.api_entities.aiohttp_request import AioHttpRequest
+from dashscope.api_entities.api_request_data import ApiRequestData
+from dashscope.api_entities.api_request_factory import _get_protocol_params
+from dashscope.api_entities.dashscope_response import (
+ GenerationOutput,
+ GenerationResponse,
+ Message,
+)
+from dashscope.client.base_api import BaseAioApi
+from dashscope.common.constants import SERVICE_API_PATH, ApiProtocol
+from dashscope.common.error import (
+ InputDataRequired,
+ InputRequired,
+ ModelRequired,
+ UnsupportedApiProtocol,
+)
+from tenacity import (
+ after_log,
+ retry,
+ retry_if_exception_type,
+ stop_after_attempt,
+ wait_random_exponential,
+)
+
+from metagpt.logs import log_llm_stream, logger
+from metagpt.provider.base_llm import BaseLLM, LLMConfig
+from metagpt.provider.llm_provider_registry import LLMType, register_provider
+from metagpt.provider.openai_api import log_and_reraise
+from metagpt.utils.cost_manager import CostManager
+from metagpt.utils.token_counter import DashScore_TOKEN_COSTS
+
+
+def build_api_arequest(
+ model: str, input: object, task_group: str, task: str, function: str, api_key: str, is_service=True, **kwargs
+):
+ (
+ api_protocol,
+ ws_stream_mode,
+ is_binary_input,
+ http_method,
+ stream,
+ async_request,
+ query,
+ headers,
+ request_timeout,
+ form,
+ resources,
+ ) = _get_protocol_params(kwargs)
+ task_id = kwargs.pop("task_id", None)
+ if api_protocol in [ApiProtocol.HTTP, ApiProtocol.HTTPS]:
+ if not dashscope.base_http_api_url.endswith("/"):
+ http_url = dashscope.base_http_api_url + "/"
+ else:
+ http_url = dashscope.base_http_api_url
+
+ if is_service:
+ http_url = http_url + SERVICE_API_PATH + "/"
+
+ if task_group:
+ http_url += "%s/" % task_group
+ if task:
+ http_url += "%s/" % task
+ if function:
+ http_url += function
+ request = AioHttpRequest(
+ url=http_url,
+ api_key=api_key,
+ http_method=http_method,
+ stream=stream,
+ async_request=async_request,
+ query=query,
+ timeout=request_timeout,
+ task_id=task_id,
+ )
+ else:
+ raise UnsupportedApiProtocol("Unsupported protocol: %s, support [http, https, websocket]" % api_protocol)
+
+ if headers is not None:
+ request.add_headers(headers=headers)
+
+ if input is None and form is None:
+ raise InputDataRequired("There is no input data and form data")
+
+ request_data = ApiRequestData(
+ model,
+ task_group=task_group,
+ task=task,
+ function=function,
+ input=input,
+ form=form,
+ is_binary_input=is_binary_input,
+ api_protocol=api_protocol,
+ )
+ request_data.add_resources(resources)
+ request_data.add_parameters(**kwargs)
+ request.data = request_data
+ return request
+
+
+class AGeneration(Generation, BaseAioApi):
+ @classmethod
+ async def acall(
+ cls,
+ model: str,
+ prompt: Any = None,
+ history: list = None,
+ api_key: str = None,
+ messages: List[Message] = None,
+ plugins: Union[str, Dict[str, Any]] = None,
+ **kwargs,
+ ) -> Union[GenerationResponse, AsyncGenerator[GenerationResponse, None]]:
+ if (prompt is None or not prompt) and (messages is None or not messages):
+ raise InputRequired("prompt or messages is required!")
+ if model is None or not model:
+ raise ModelRequired("Model is required!")
+ task_group, function = "aigc", "generation" # fixed value
+ if plugins is not None:
+ headers = kwargs.pop("headers", {})
+ if isinstance(plugins, str):
+ headers["X-DashScope-Plugin"] = plugins
+ else:
+ headers["X-DashScope-Plugin"] = json.dumps(plugins)
+ kwargs["headers"] = headers
+ input, parameters = cls._build_input_parameters(model, prompt, history, messages, **kwargs)
+
+ api_key, model = BaseAioApi._validate_params(api_key, model)
+ request = build_api_arequest(
+ model=model,
+ input=input,
+ task_group=task_group,
+ task=Generation.task,
+ function=function,
+ api_key=api_key,
+ **kwargs,
+ )
+ response = await request.aio_call()
+ is_stream = kwargs.get("stream", False)
+ if is_stream:
+
+ async def aresp_iterator(response):
+ async for resp in response:
+ yield GenerationResponse.from_api_response(resp)
+
+ return aresp_iterator(response)
+ else:
+ return GenerationResponse.from_api_response(response)
+
+
+@register_provider(LLMType.DASHSCOPE)
+class DashScopeLLM(BaseLLM):
+ def __init__(self, llm_config: LLMConfig):
+ self.config = llm_config
+ self.use_system_prompt = False # only some models support system_prompt
+ self.__init_dashscope()
+ self.cost_manager = CostManager(token_costs=self.token_costs)
+
+ def __init_dashscope(self):
+ self.model = self.config.model
+ self.api_key = self.config.api_key
+ self.token_costs = DashScore_TOKEN_COSTS
+ self.aclient: AGeneration = AGeneration
+
+ # check support system_message models
+ support_system_models = [
+ "qwen-", # all support
+ "llama2-", # all support
+ "baichuan2-7b-chat-v1",
+ "chatglm3-6b",
+ ]
+ for support_model in support_system_models:
+ if support_model in self.model:
+ self.use_system_prompt = True
+
+ def _const_kwargs(self, messages: list[dict], stream: bool = False) -> dict:
+ kwargs = {
+ "api_key": self.api_key,
+ "model": self.model,
+ "messages": messages,
+ "stream": stream,
+ "result_format": "message",
+ }
+ if self.config.temperature > 0:
+ # different model has default temperature. only set when it"s specified.
+ kwargs["temperature"] = self.config.temperature
+ return kwargs
+
+ def _check_response(self, resp: GenerationResponse):
+ if resp.status_code != HTTPStatus.OK:
+ raise RuntimeError(f"code: {resp.code}, request_id: {resp.request_id}, message: {resp.message}")
+
+ def get_choice_text(self, output: GenerationOutput) -> str:
+ return output.get("choices", [{}])[0].get("message", {}).get("content", "")
+
+ def completion(self, messages: list[dict]) -> GenerationOutput:
+ resp: GenerationResponse = self.aclient.call(**self._const_kwargs(messages, stream=False))
+ self._check_response(resp)
+
+ self._update_costs(dict(resp.usage))
+ return resp.output
+
+ async def _achat_completion(self, messages: list[dict]) -> GenerationOutput:
+ resp: GenerationResponse = await self.aclient.acall(**self._const_kwargs(messages, stream=False))
+ self._check_response(resp)
+ self._update_costs(dict(resp.usage))
+ return resp.output
+
+ async def acompletion(self, messages: list[dict], timeout=3) -> GenerationOutput:
+ return await self._achat_completion(messages)
+
+ async def _achat_completion_stream(self, messages: list[dict]) -> str:
+ resp = await self.aclient.acall(**self._const_kwargs(messages, stream=True))
+ collected_content = []
+ usage = {}
+ async for chunk in resp:
+ self._check_response(chunk)
+ content = chunk.output.choices[0]["message"]["content"]
+ usage = dict(chunk.usage) # each chunk has usage
+ log_llm_stream(content)
+ collected_content.append(content)
+ log_llm_stream("\n")
+ self._update_costs(usage)
+ full_content = "".join(collected_content)
+ return full_content
+
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_random_exponential(min=1, max=60),
+ after=after_log(logger, logger.level("WARNING").name),
+ retry=retry_if_exception_type(ConnectionError),
+ retry_error_callback=log_and_reraise,
+ )
+ async def acompletion_text(self, messages: list[dict], stream=False, timeout: int = 3) -> str:
+ if stream:
+ return await self._achat_completion_stream(messages)
+ resp = await self._achat_completion(messages)
+ return self.get_choice_text(resp)
diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py
index b69ec73d3..cac706f6c 100644
--- a/metagpt/utils/token_counter.py
+++ b/metagpt/utils/token_counter.py
@@ -90,6 +90,35 @@ QianFan_EndPoint_TOKEN_COSTS = {
"yi_34b_chat": QianFan_MODEL_TOKEN_COSTS["Yi-34B-Chat"],
}
+"""
+DashScore Token price https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing
+Different model has different detail page. Attention, some model are free for a limited time.
+"""
+DashScore_TOKEN_COSTS = {
+ "qwen-turbo": {"prompt": 0.0011, "completion": 0.0011},
+ "qwen-plus": {"prompt": 0.0028, "completion": 0.0028},
+ "qwen-max": {"prompt": 0.0, "completion": 0.0},
+ "qwen-max-1201": {"prompt": 0.0, "completion": 0.0},
+ "qwen-max-longcontext": {"prompt": 0.0, "completion": 0.0},
+ "llama2-7b-chat-v2": {"prompt": 0.0, "completion": 0.0},
+ "llama2-13b-chat-v2": {"prompt": 0.0, "completion": 0.0},
+ "qwen-72b-chat": {"prompt": 0.0, "completion": 0.0},
+ "qwen-14b-chat": {"prompt": 0.0011, "completion": 0.0011},
+ "qwen-7b-chat": {"prompt": 0.00084, "completion": 0.00084},
+ "qwen-1.8b-chat": {"prompt": 0.0, "completion": 0.0},
+ "baichuan2-13b-chat-v1": {"prompt": 0.0011, "completion": 0.0011},
+ "baichuan2-7b-chat-v1": {"prompt": 0.00084, "completion": 0.00084},
+ "baichuan-7b-v1": {"prompt": 0.0, "completion": 0.0},
+ "chatglm-6b-v2": {"prompt": 0.0011, "completion": 0.0011},
+ "chatglm3-6b": {"prompt": 0.0, "completion": 0.0},
+ "ziya-llama-13b-v1": {"prompt": 0.0, "completion": 0.0}, # no price page, judge it as free
+ "dolly-12b-v2": {"prompt": 0.0, "completion": 0.0},
+ "belle-llama-13b-2m-v1": {"prompt": 0.0, "completion": 0.0},
+ "moss-moon-003-sft-v1": {"prompt": 0.0, "completion": 0.0},
+ "chatyuan-large-v2": {"prompt": 0.0, "completion": 0.0},
+ "billa-7b-sft-v1": {"prompt": 0.0, "completion": 0.0},
+}
+
TOKEN_MAX = {
"gpt-3.5-turbo": 4096,
diff --git a/requirements.txt b/requirements.txt
index c893bd713..ff5f97252 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -68,3 +68,4 @@ anytree
ipywidgets==8.1.1
Pillow
qianfan==0.3.1
+dashscope==1.14.1
diff --git a/tests/metagpt/provider/mock_llm_config.py b/tests/metagpt/provider/mock_llm_config.py
index e0afaa51e..bb5508d80 100644
--- a/tests/metagpt/provider/mock_llm_config.py
+++ b/tests/metagpt/provider/mock_llm_config.py
@@ -54,3 +54,5 @@ mock_llm_config_spark = LLMConfig(
)
mock_llm_config_qianfan = LLMConfig(api_type="qianfan", access_key="xxx", secret_key="xxx", model="ERNIE-Bot-turbo")
+
+mock_llm_config_dashscope = LLMConfig(api_type="dashscore", api_key="xxx", model="qwen-max")
diff --git a/tests/metagpt/provider/req_resp_const.py b/tests/metagpt/provider/req_resp_const.py
index 73939e1c6..802962013 100644
--- a/tests/metagpt/provider/req_resp_const.py
+++ b/tests/metagpt/provider/req_resp_const.py
@@ -3,6 +3,12 @@
# @Desc : default request & response data for provider unittest
+from dashscope.api_entities.dashscope_response import (
+ DashScopeAPIResponse,
+ GenerationOutput,
+ GenerationResponse,
+ GenerationUsage,
+)
from openai.types.chat.chat_completion import (
ChatCompletion,
ChatCompletionMessage,
@@ -102,6 +108,28 @@ def get_qianfan_response(name: str) -> QfResponse:
return QfResponse(code=200, body=qf_jsonbody_dict)
+# For DashScope
+def get_dashscope_response(name: str) -> GenerationResponse:
+ return GenerationResponse.from_api_response(
+ DashScopeAPIResponse(
+ status_code=200,
+ output=GenerationOutput(
+ **{
+ "text": "",
+ "finish_reason": "",
+ "choices": [
+ {
+ "finish_reason": "stop",
+ "message": {"role": "assistant", "content": resp_cont_tmpl.format(name=name)},
+ }
+ ],
+ }
+ ),
+ usage=GenerationUsage(**{"input_tokens": 12, "output_tokens": 98, "total_tokens": 110}),
+ )
+ )
+
+
# For llm general chat functions call
async def llm_general_chat_funcs_test(llm: BaseLLM, prompt: str, messages: list[dict], resp_cont: str):
resp = await llm.aask(prompt, stream=False)
diff --git a/tests/metagpt/provider/test_dashscope_api.py b/tests/metagpt/provider/test_dashscope_api.py
new file mode 100644
index 000000000..48a676bc7
--- /dev/null
+++ b/tests/metagpt/provider/test_dashscope_api.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Desc : the unittest of DashScopeLLM
+
+from typing import AsyncGenerator, Union
+
+import pytest
+from dashscope.api_entities.dashscope_response import GenerationResponse
+
+from metagpt.provider.dashscope_api import DashScopeLLM
+from tests.metagpt.provider.mock_llm_config import mock_llm_config_dashscope
+from tests.metagpt.provider.req_resp_const import (
+ get_dashscope_response,
+ llm_general_chat_funcs_test,
+ messages,
+ prompt,
+ resp_cont_tmpl,
+)
+
+name = "qwen-max"
+resp_cont = resp_cont_tmpl.format(name=name)
+
+
+@classmethod
+def mock_dashscope_call(
+ cls, messages: list[dict], model: str, api_key: str, result_format: str, stream: bool = False
+) -> GenerationResponse:
+ return get_dashscope_response(name)
+
+
+@classmethod
+async def mock_dashscope_acall(
+ cls, messages: list[dict], model: str, api_key: str, result_format: str, stream: bool = False
+) -> Union[AsyncGenerator[GenerationResponse, None], GenerationResponse]:
+ resps = [get_dashscope_response(name)]
+
+ if stream:
+
+ async def aresp_iterator(resps: list[GenerationResponse]):
+ for resp in resps:
+ yield resp
+
+ return aresp_iterator(resps)
+ else:
+ return resps[0]
+
+
+@pytest.mark.asyncio
+async def test_dashscope_acompletion(mocker):
+ mocker.patch("dashscope.aigc.generation.Generation.call", mock_dashscope_call)
+ mocker.patch("metagpt.provider.dashscope_api.AGeneration.acall", mock_dashscope_acall)
+
+ dashscore_llm = DashScopeLLM(mock_llm_config_dashscope)
+
+ resp = dashscore_llm.completion(messages)
+ assert resp.choices[0]["message"]["content"] == resp_cont
+
+ resp = await dashscore_llm.acompletion(messages)
+ assert resp.choices[0]["message"]["content"] == resp_cont
+
+ await llm_general_chat_funcs_test(dashscore_llm, prompt, messages, resp_cont)
From e22f5c72ec7936b7a8f156208dc5be628a21de13 Mon Sep 17 00:00:00 2001
From: better629
Date: Wed, 7 Feb 2024 23:01:40 +0800
Subject: [PATCH 029/104] update spell-mistake
---
tests/metagpt/provider/mock_llm_config.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/metagpt/provider/mock_llm_config.py b/tests/metagpt/provider/mock_llm_config.py
index bb5508d80..e75acf68f 100644
--- a/tests/metagpt/provider/mock_llm_config.py
+++ b/tests/metagpt/provider/mock_llm_config.py
@@ -55,4 +55,4 @@ mock_llm_config_spark = LLMConfig(
mock_llm_config_qianfan = LLMConfig(api_type="qianfan", access_key="xxx", secret_key="xxx", model="ERNIE-Bot-turbo")
-mock_llm_config_dashscope = LLMConfig(api_type="dashscore", api_key="xxx", model="qwen-max")
+mock_llm_config_dashscope = LLMConfig(api_type="dashscope", api_key="xxx", model="qwen-max")
From 4dde9b71304bd3260ae0f675567fd0a0bfc2b085 Mon Sep 17 00:00:00 2001
From: better629
Date: Wed, 7 Feb 2024 23:15:56 +0800
Subject: [PATCH 030/104] stream using log_llm_stream
---
metagpt/provider/fireworks_api.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/metagpt/provider/fireworks_api.py b/metagpt/provider/fireworks_api.py
index e62a7066e..d0394296d 100644
--- a/metagpt/provider/fireworks_api.py
+++ b/metagpt/provider/fireworks_api.py
@@ -16,7 +16,7 @@ from tenacity import (
)
from metagpt.configs.llm_config import LLMConfig, LLMType
-from metagpt.logs import logger
+from metagpt.logs import log_llm_stream, logger
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import OpenAILLM, log_and_reraise
from metagpt.utils.cost_manager import CostManager
@@ -96,10 +96,11 @@ class FireworksLLM(OpenAILLM):
finish_reason = choice.finish_reason if hasattr(choice, "finish_reason") else None
if choice_delta.content:
collected_content.append(choice_delta.content)
- print(choice_delta.content, end="")
+ log_llm_stream(choice_delta.content)
if finish_reason:
# fireworks api return usage when finish_reason is not None
usage = CompletionUsage(**chunk.usage)
+ log_llm_stream("\n")
full_content = "".join(collected_content)
self._update_costs(usage.model_dump())
From d2f10ea90ca1ffa8e5b571017f2ab548db8d1897 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Thu, 8 Feb 2024 00:05:00 +0800
Subject: [PATCH 031/104] add ocr example, modify email example
---
examples/ci/email_summary.py | 14 ++++--
examples/ci/ocr_receipt.py | 19 ++++++++
metagpt/tools/libs/email_login.py | 14 ++----
requirements.txt | 1 +
tests/metagpt/tools/libs/test_email_login.py | 49 ++------------------
5 files changed, 37 insertions(+), 60 deletions(-)
create mode 100644 examples/ci/ocr_receipt.py
diff --git a/examples/ci/email_summary.py b/examples/ci/email_summary.py
index dd8dd8c8e..d6fa283ca 100644
--- a/examples/ci/email_summary.py
+++ b/examples/ci/email_summary.py
@@ -4,17 +4,23 @@
@Author : Tuo Zhou
@File : email_summary.py
"""
+import os
from metagpt.roles.ci.code_interpreter import CodeInterpreter
async def main():
- # For email response prompt
email_account = "your_email_account"
- # prompt = f"""I will give you your Outlook email account({email_account}) and password(email_password item in the environment variable). You need to find the latest email in my inbox with the sender's suffix @qq.com and reply to him "Thank you! I have received your email~"""""
- prompt = f"""I will give you your Outlook email account({email_account}) and password(email_password item in the environment variable).
+ # your password will stay only on your device and not go to LLM api
+ os.environ["email_password"] = "your_email_password"
+
+ ### Prompt for automatic email reply, uncomment to try this too ###
+ # prompt = f"""I will give you your Outlook email account ({email_account}) and password (email_password item in the environment variable). You need to find the latest email in my inbox with the sender's suffix @gmail.com and reply "Thank you! I have received your email~"""""
+
+ ### Prompt for automatic email summary ###
+ prompt = f"""I will give you your Outlook email account ({email_account}) and password (email_password item in the environment variable).
Firstly, Please help me fetch the latest 5 senders and full letter contents.
- Then, summarize each of the 5 emails into one sentence(you can do this by yourself, no need import other models to do this) and output them in a markdown format."""
+ Then, summarize each of the 5 emails into one sentence (you can do this by yourself, no need to import other models to do this) and output them in a markdown format."""
ci = CodeInterpreter(use_tools=True)
diff --git a/examples/ci/ocr_receipt.py b/examples/ci/ocr_receipt.py
new file mode 100644
index 000000000..dc2db2d01
--- /dev/null
+++ b/examples/ci/ocr_receipt.py
@@ -0,0 +1,19 @@
+from metagpt.roles.ci.code_interpreter import CodeInterpreter
+
+
+async def main():
+ # Notice: pip install metagpt[ocr] before using this example
+ image_path = "image.jpg"
+ language = "English"
+ requirement = f"""This is a {language} invoice image.
+ Your goal is to perform OCR on images using PaddleOCR, then extract the total amount from ocr text results, and finally save as table. Image path: {image_path}.
+ NOTE: The environments for Paddle and PaddleOCR are all ready and has been fully installed."""
+ ci = CodeInterpreter()
+
+ await ci.run(requirement)
+
+
+if __name__ == "__main__":
+ import asyncio
+
+ asyncio.run(main())
diff --git a/metagpt/tools/libs/email_login.py b/metagpt/tools/libs/email_login.py
index 8fd77274c..79734665e 100644
--- a/metagpt/tools/libs/email_login.py
+++ b/metagpt/tools/libs/email_login.py
@@ -1,6 +1,5 @@
from imap_tools import MailBox
-from metagpt.logs import logger
from metagpt.tools.tool_registry import register_tool
from metagpt.tools.tool_type import ToolType
@@ -44,15 +43,8 @@ def email_login_imap(email_address, email_password):
# Determine the correct IMAP server
imap_server = IMAP_SERVERS.get(domain)
- if not imap_server:
- logger.error(f"IMAP server for {domain} not found.")
- return None
+ assert imap_server, f"IMAP server for {domain} not found."
# Attempt to log in to the email account
- try:
- mailbox = MailBox(imap_server).login(email_address, email_password)
- logger.info("Login successful")
- return mailbox
- except Exception as e:
- logger.error(f"Login failed: {e}")
- return None
+ mailbox = MailBox(imap_server).login(email_address, email_password)
+ return mailbox
diff --git a/requirements.txt b/requirements.txt
index 6cb25d52b..1357f2e5d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -67,3 +67,4 @@ google-generativeai==0.3.2
anytree
ipywidgets==8.1.1
Pillow
+imap_tools==1.5.0 # Used by metagpt/tools/libs/email_login.py
diff --git a/tests/metagpt/tools/libs/test_email_login.py b/tests/metagpt/tools/libs/test_email_login.py
index c18d15c7d..e98820f70 100644
--- a/tests/metagpt/tools/libs/test_email_login.py
+++ b/tests/metagpt/tools/libs/test_email_login.py
@@ -1,48 +1,7 @@
-import os
-
-import pytest
-
from metagpt.tools.libs.email_login import email_login_imap
-# Configuration for the test IMAP servers
-TEST_IMAP_SERVERS = {"outlook.com": "imap-mail.outlook.com"}
-# Setup correct and incorrect email information
-correct_email_address = "englishgpt@outlook.com"
-correct_email_password = os.environ.get("outlook_email_password")
-incorrect_email_address = "test@unknown.com"
-incorrect_email_password = "incorrect_password"
-
-
-@pytest.fixture
-def imap_server_setup(mocker):
- # Use the mocker fixture to mock the MailBox class
- mock_mailbox = mocker.patch("metagpt.tools.libs.email_login.MailBox")
- mock_mail_instance = mocker.Mock()
- mock_mail_instance.login.return_value = mock_mail_instance
- mock_mailbox.return_value = mock_mail_instance
- return mock_mail_instance
-
-
-def test_email_login_imap_success(imap_server_setup):
- # Mock successful login
- mailbox = email_login_imap(correct_email_address, correct_email_password)
- assert mailbox is not None
- # Correctly assert that the login method of the MailBox mock was called with the correct arguments
- imap_server_setup.login.assert_called_with(correct_email_address, correct_email_password)
-
-
-def test_email_login_imap_failure_due_to_incorrect_server(imap_server_setup):
- # Attempt to login with an incorrect server
- mailbox = email_login_imap(incorrect_email_address, incorrect_email_password)
- assert mailbox is None
-
-
-def test_email_login_imap_failure_due_to_wrong_credentials(imap_server_setup):
- # Configure mock to throw an exception to simulate login failure due to incorrect credentials
- imap_server_setup.login.side_effect = Exception("Login failed")
- # Attempt to login which should simulate a failure
- mailbox = email_login_imap(correct_email_address, incorrect_email_password)
- assert mailbox is None
- # Verify that the login method was called with the expected arguments
- imap_server_setup.login.assert_called_with(correct_email_address, incorrect_email_password)
+def test_email_login(mocker):
+ mock_mailbox = mocker.patch("metagpt.tools.libs.email_login.MailBox.login")
+ mock_mailbox.login.return_value = mocker.Mock()
+ email_login_imap("test@outlook.com", "test_password")
From 8e6d722b8fe42e8e6504b24c053a55fefbe77c55 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Thu, 8 Feb 2024 00:17:31 +0800
Subject: [PATCH 032/104] format
---
tests/metagpt/provider/test_openai.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/tests/metagpt/provider/test_openai.py b/tests/metagpt/provider/test_openai.py
index b9db4dfb6..96c08a867 100644
--- a/tests/metagpt/provider/test_openai.py
+++ b/tests/metagpt/provider/test_openai.py
@@ -1,5 +1,3 @@
-import json
-
import pytest
from openai.types.chat import (
ChatCompletion,
From 0949f166ae7a2fb7fe145360f187c800bfb86c1c Mon Sep 17 00:00:00 2001
From: yzlin
Date: Thu, 8 Feb 2024 00:26:59 +0800
Subject: [PATCH 033/104] format
---
metagpt/tools/tool_type.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/metagpt/tools/tool_type.py b/metagpt/tools/tool_type.py
index bee9a98eb..e9c9a3482 100644
--- a/metagpt/tools/tool_type.py
+++ b/metagpt/tools/tool_type.py
@@ -1,8 +1,8 @@
from enum import Enum
from metagpt.prompts.tool_types import (
- EDA_PROMPT,
DATA_PREPROCESS_PROMPT,
+ EDA_PROMPT,
FEATURE_ENGINEERING_PROMPT,
IMAGE2WEBPAGE_PROMPT,
MODEL_EVALUATE_PROMPT,
From 88b9bf91513ab4407720dfc5a899d317257f75bd Mon Sep 17 00:00:00 2001
From: shenchucheng
Date: Thu, 8 Feb 2024 00:46:04 +0800
Subject: [PATCH 034/104] add ut mocker for mermaid ink
---
tests/conftest.py | 45 +++++++++++++++++++++++++++--
tests/data/mermaid_rsp_cache.json | 4 +++
tests/metagpt/utils/test_mermaid.py | 2 +-
tests/mock/mock_aiohttp.py | 13 +++++++++
4 files changed, 60 insertions(+), 4 deletions(-)
create mode 100644 tests/data/mermaid_rsp_cache.json
diff --git a/tests/conftest.py b/tests/conftest.py
index 42b460357..06113443a 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -14,6 +14,7 @@ import re
import uuid
from typing import Callable
+import aiohttp.web
import pytest
from metagpt.const import DEFAULT_WORKSPACE_ROOT, TEST_DATA_PATH
@@ -168,9 +169,8 @@ def new_filename(mocker):
yield mocker
-@pytest.fixture(scope="session")
-def search_rsp_cache():
- rsp_cache_file_path = TEST_DATA_PATH / "search_rsp_cache.json" # read repo-provided
+def _rsp_cache(name):
+ rsp_cache_file_path = TEST_DATA_PATH / f"{name}.json" # read repo-provided
if os.path.exists(rsp_cache_file_path):
with open(rsp_cache_file_path, "r") as f1:
rsp_cache_json = json.load(f1)
@@ -181,6 +181,16 @@ def search_rsp_cache():
json.dump(rsp_cache_json, f2, indent=4, ensure_ascii=False)
+@pytest.fixture(scope="session")
+def search_rsp_cache():
+ yield from _rsp_cache("search_rsp_cache")
+
+
+@pytest.fixture(scope="session")
+def mermaid_rsp_cache():
+ yield from _rsp_cache("mermaid_rsp_cache")
+
+
@pytest.fixture
def aiohttp_mocker(mocker):
MockResponse = type("MockResponse", (MockAioResponse,), {})
@@ -228,3 +238,32 @@ def search_engine_mocker(aiohttp_mocker, curl_cffi_mocker, httplib2_mocker, sear
aiohttp_mocker.rsp_cache = httplib2_mocker.rsp_cache = curl_cffi_mocker.rsp_cache = search_rsp_cache
aiohttp_mocker.check_funcs = httplib2_mocker.check_funcs = curl_cffi_mocker.check_funcs = check_funcs
yield check_funcs
+
+
+@pytest.fixture
+def http_server():
+ async def handler(request):
+ return aiohttp.web.Response(
+ text="""
+ MetaGPT
MetaGPT
""",
+ content_type="text/html",
+ )
+
+ async def start():
+ server = aiohttp.web.Server(handler)
+ runner = aiohttp.web.ServerRunner(server)
+ await runner.setup()
+ site = aiohttp.web.TCPSite(runner, "localhost", 0)
+ await site.start()
+ host, port = site._server.sockets[0].getsockname()
+ return site, f"http://{host}:{port}"
+
+ return start
+
+
+@pytest.fixture
+def mermaid_mocker(aiohttp_mocker, mermaid_rsp_cache):
+ check_funcs: dict[tuple[str, str], Callable[[dict], str]] = {}
+ aiohttp_mocker.rsp_cache = mermaid_rsp_cache
+ aiohttp_mocker.check_funcs = check_funcs
+ yield check_funcs
diff --git a/tests/data/mermaid_rsp_cache.json b/tests/data/mermaid_rsp_cache.json
new file mode 100644
index 000000000..14c717e65
--- /dev/null
+++ b/tests/data/mermaid_rsp_cache.json
@@ -0,0 +1,4 @@
+{
+ "aiohttp-get-https://mermaid.ink/svg/CmNsYXNzRGlhZ3JhbQogICAgY2xhc3MgTWFpbiB7CiAgICAgICAgLVNlYXJjaEVuZ2luZSBzZWFyY2hfZW5naW5lCiAgICAgICAgK21haW4oKSBzdHIKICAgIH0KICAgIGNsYXNzIFNlYXJjaEVuZ2luZSB7CiAgICAgICAgLUluZGV4IGluZGV4CiAgICAgICAgLVJhbmtpbmcgcmFua2luZwogICAgICAgIC1TdW1tYXJ5IHN1bW1hcnkKICAgICAgICArc2VhcmNoKHF1ZXJ5OiBzdHIpIHN0cgogICAgfQogICAgY2xhc3MgSW5kZXggewogICAgICAgIC1Lbm93bGVkZ2VCYXNlIGtub3dsZWRnZV9iYXNlCiAgICAgICAgK2NyZWF0ZV9pbmRleChkYXRhOiBkaWN0KQogICAgICAgICtxdWVyeV9pbmRleChxdWVyeTogc3RyKSBsaXN0CiAgICB9CiAgICBjbGFzcyBSYW5raW5nIHsKICAgICAgICArcmFua19yZXN1bHRzKHJlc3VsdHM6IGxpc3QpIGxpc3QKICAgIH0KICAgIGNsYXNzIFN1bW1hcnkgewogICAgICAgICtzdW1tYXJpemVfcmVzdWx0cyhyZXN1bHRzOiBsaXN0KSBzdHIKICAgIH0KICAgIGNsYXNzIEtub3dsZWRnZUJhc2UgewogICAgICAgICt1cGRhdGUoZGF0YTogZGljdCkKICAgICAgICArZmV0Y2hfZGF0YShxdWVyeTogc3RyKSBkaWN0CiAgICB9CiAgICBNYWluIC0tPiBTZWFyY2hFbmdpbmUKICAgIFNlYXJjaEVuZ2luZSAtLT4gSW5kZXgKICAgIFNlYXJjaEVuZ2luZSAtLT4gUmFua2luZwogICAgU2VhcmNoRW5naW5lIC0tPiBTdW1tYXJ5CiAgICBJbmRleCAtLT4gS25vd2xlZGdlQmFzZQo=-{}": "b''",
+ "aiohttp-get-https://mermaid.ink/img/CmNsYXNzRGlhZ3JhbQogICAgY2xhc3MgTWFpbiB7CiAgICAgICAgLVNlYXJjaEVuZ2luZSBzZWFyY2hfZW5naW5lCiAgICAgICAgK21haW4oKSBzdHIKICAgIH0KICAgIGNsYXNzIFNlYXJjaEVuZ2luZSB7CiAgICAgICAgLUluZGV4IGluZGV4CiAgICAgICAgLVJhbmtpbmcgcmFua2luZwogICAgICAgIC1TdW1tYXJ5IHN1bW1hcnkKICAgICAgICArc2VhcmNoKHF1ZXJ5OiBzdHIpIHN0cgogICAgfQogICAgY2xhc3MgSW5kZXggewogICAgICAgIC1Lbm93bGVkZ2VCYXNlIGtub3dsZWRnZV9iYXNlCiAgICAgICAgK2NyZWF0ZV9pbmRleChkYXRhOiBkaWN0KQogICAgICAgICtxdWVyeV9pbmRleChxdWVyeTogc3RyKSBsaXN0CiAgICB9CiAgICBjbGFzcyBSYW5raW5nIHsKICAgICAgICArcmFua19yZXN1bHRzKHJlc3VsdHM6IGxpc3QpIGxpc3QKICAgIH0KICAgIGNsYXNzIFN1bW1hcnkgewogICAgICAgICtzdW1tYXJpemVfcmVzdWx0cyhyZXN1bHRzOiBsaXN0KSBzdHIKICAgIH0KICAgIGNsYXNzIEtub3dsZWRnZUJhc2UgewogICAgICAgICt1cGRhdGUoZGF0YTogZGljdCkKICAgICAgICArZmV0Y2hfZGF0YShxdWVyeTogc3RyKSBkaWN0CiAgICB9CiAgICBNYWluIC0tPiBTZWFyY2hFbmdpbmUKICAgIFNlYXJjaEVuZ2luZSAtLT4gSW5kZXgKICAgIFNlYXJjaEVuZ2luZSAtLT4gUmFua2luZwogICAgU2VhcmNoRW5naW5lIC0tPiBTdW1tYXJ5CiAgICBJbmRleCAtLT4gS25vd2xlZGdlQmFzZQo=-{}": "b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01\\x00\\x01\\x00\\x00\\xff\\xe2\\x01\\xd8ICC_PROFILE\\x00\\x01\\x01\\x00\\x00\\x01\\xc8\\x00\\x00\\x00\\x00\\x040\\x00\\x00mntrRGB XYZ \\x07\\xe0\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00acsp\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xf6\\xd6\\x00\\x01\\x00\\x00\\x00\\x00\\xd3-\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\tdesc\\x00\\x00\\x00\\xf0\\x00\\x00\\x00$rXYZ\\x00\\x00\\x01\\x14\\x00\\x00\\x00\\x14gXYZ\\x00\\x00\\x01(\\x00\\x00\\x00\\x14bXYZ\\x00\\x00\\x01<\\x00\\x00\\x00\\x14wtpt\\x00\\x00\\x01P\\x00\\x00\\x00\\x14rTRC\\x00\\x00\\x01d\\x00\\x00\\x00(gTRC\\x00\\x00\\x01d\\x00\\x00\\x00(bTRC\\x00\\x00\\x01d\\x00\\x00\\x00(cprt\\x00\\x00\\x01\\x8c\\x00\\x00\\x00q\\xf5X\\xe3\\x0f+_\\xc9#\\xfa\\x88}\\x0f\\x96\\xbf\\x92G\\xf5\\x10\\xfa\\x1fA\\xc4\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x1e+\\xcbx\\xf8\\xfd-\\x85\\xa4\\xb5nE\\x83\\x1d\\xc9/+\\xf1!\\t5(\\xff\\x00\\xf8#\\x1e\\xd1\\x92v\\xb5v\\xed\\x1d\\x9bv\\x84\\xc6;W>\\xea\\xe6m[\\x95\\xf1\\xe1VG\\\\\\x89\\x0e\\x1b\\xe6L\\xa8\\xd0\\x84\\x11\\xa8\\xf4K\\x8aQ\\x99\\x17\"I\\x9f\\xc43T\\xe5\\xa6d\\x86&\\xf7os\\x87\\xd9\\x07\\x1a\\xda\\xc4\\x88u\\xa9\\xc8,n\\x1a\\xa7\\x93^\\x94\\xac\\xd8i\\xd2}]\\xfe\\x89\\xdf\\xde\\xfen\\xda\\x96\\x9dTz\\x1a\\x91\\xae\\xbe\\xe3\\xdf6\\x9f\\xdaGg;\\x19\\xb3\\x81\\x031\\xc9\\x13M&s%!\\x8da\\xc8y\\x06\\xd9\\xabt\\x96\\xa5\\xb6\\xda\\x92\\x82\\xd4\\xb4\\xd5FC\\x8co\\xfb\\x0c\\xde\\xb3\\x92d\\xb8\\x9b0\\xde{\\x01\\xae\\xc3\\xdf\\xba\\xa9a\\xb6\\x8dM/ v\\x02!n\\xeb\\xeeR\\xc9Q\\xcd\\xed\\x0b\\x99\\x1a\\xd3\\xff\\x001\\x0fNm\\xebg>\\xc7\\xe0S\\xe4x\\xfe\\xd3\\xd1W;\\x03\\x89\\x12\\xa6\\xa7\\x17aQ\\x18v\\xd5L\\x1brSh\\xa5n\\xa9\\t\\xef\\x0b\\xef\\\\2I\\xa3\\xe2=y\\xf8#\\x17\\x16\\x98\\x9c\\xd1\\xaf\\xe5\\xfd[\\xb4:\\xba\\xe7oqKo\\xd8\\x16\\x03OiM&5\\xedl\\x9b)(u\\x99J\\x90\\xebD\\xd1\\xae;\\x91]B\\x0e:\\x92{\\xab5o/]\\xdd4\\xe6e\\xaf\\xc6%\\xdb\\x17c\\xb9\\xcd\\xe5ME.m\\x1eT\\xebW\\t\\x88ir$\\x96P\\xeb\\xa7\\xeek\\xbcq\\xb4\\xa0\\x9c?\\x89\\x06d\\xa3\\xd4\\xb9s!\\x89\\xec\\xbf\\r\\xca\\x13\\x9avF\\x97/\\x1b\\xba\\x8a\\xd5\\x0e+g\\x06\\xd5\\xc9U\\xef6P\\x1d(m\\xb4\\x84>jO\\xdc\\xcdF\\x83$\\x92\\xb4\\xde\\xf8\\xb5\\x15,wf\\x19ln\\xc2[ \\xa4V%t\\xd6G]\\x99\\xb1:Er\\xab^)q\\x90V\\xb2\\x14o-\\xbd\\xdd\\xf4$\\x9bQ+x\\xc8\\x8buDz\\xe8c{\\\\Mf\\xde}\\xa3\\xdd-\\x0e\\xfe\\x00\\x01\\xeed\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x109\\xbf\\xfb\\x84\\xbf<\\x87\\xff\\x00\\x92\\xd0\\x9e\\x109\\xbf\\xfb\\x84\\xbf<\\x87\\xff\\x00\\x92\\xd0\\xeb\\x83\\xfc\\x94\\xfc\\xe1\\xaa\\x7ft4\\xc0\\x00\\x1f\\t\\xb0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00Vv\\x9f\\xfd\\x1ae\\xbf\\xa2%\\xfe\\xe5b\\xcc+;O\\xfe\\x8d2\\xdf\\xd1\\x12\\xff\\x00r\\xb1\\xe8\\xf0\\xff\\x00\\xcdG\\xce>\\xab\\x1caSohX\\xf96\\x92\\xf8C\\xe2/\\xfd\\x97>\\xa8\\xfa\\xf5\\x85\\x8f\\xf8\\x87\\xec\\\\\\xfa\\xa2\\xc0\\xd7\\xf2H\\xfe\\xa2\\x1fC\\xeb\\xdf\\x0b\\xa6}c\\xd9\\xcfEw\\xd6\\x16?\\xe2\\x1f\\xb1s\\xea\\x87\\xac,\\x7f\\xc4?b\\xe7\\xd5\\x16 \\x0b\\xe1t\\xcf\\xac{&\\x8a\\xef\\xac,\\x7f\\xc4?b\\xe7\\xd5\\x0fXX\\xff\\x00\\x88~\\xc5\\xcf\\xaa,@\\x17\\xc2\\xe9\\x9fX\\xf64W}ac\\xfe!\\xfb\\x17>\\xa8z\\xc2\\xc7\\xfcC\\xf6.}Qb\\x00\\xbe\\x17L\\xfa\\xc7\\xb1\\xa2\\xbb\\xeb\\x0b\\x1f\\xf1\\x0f\\xd8\\xb9\\xf5C\\xd6\\x16?\\xe2\\x1f\\xb1s\\xea\\x8b\\x10\\x05\\xf0\\xbag\\xd6=\\x8d\\x15\\xdfXX\\xff\\x00\\x88~\\xc5\\xcf\\xaa\\x1e\\xb0\\xb1\\xff\\x00\\x10\\xfd\\x8b\\x9fTX\\x80/\\x85\\xd3>\\xb1\\xech\\xae\\xfa\\xc2\\xc7\\xfcC\\xf6.}P\\xf5\\x85\\x8f\\xf8\\x87\\xec\\\\\\xfa\\xa2\\xc4\\x01|.\\x99\\xf5\\x8fcEw\\xd6\\x16?\\xe2\\x1f\\xb1s\\xea\\x87\\xac,\\x7f\\xc4?b\\xe7\\xd5\\x16 \\x0b\\xe1t\\xcf\\xac{\\x1a+\\xbe\\xb0\\xb1\\xff\\x00\\x10\\xfd\\x8b\\x9fT=ac\\xfe!\\xfb\\x17>\\xa8\\xb1\\x00_\\x0b\\xa6}c\\xd8\\xd1]\\xf5\\x85\\x8f\\xf8\\x87\\xec\\\\\\xfa\\xa1\\xeb\\x0b\\x1f\\xf1\\x0f\\xd8\\xb9\\xf5E\\x88\\x02\\xf8]3\\xeb\\x1e\\xc6\\x8a\\xef\\xac,\\x7f\\xc4?b\\xe7\\xd5\\x0fXX\\xff\\x00\\x88~\\xc5\\xcf\\xaa,@\\x17\\xc2\\xe9\\x9fX\\xf64W}ac\\xfe!\\xfb\\x17>\\xa8z\\xc2\\xc7\\xfcC\\xf6.}Qb\\x00\\xbe\\x17L\\xfa\\xc7\\xb1\\xa2\\xbb\\xeb\\x0b\\x1f\\xf1\\x0f\\xd8\\xb9\\xf5C\\xd6\\x16?\\xe2\\x1f\\xb1s\\xea\\x8b\\x10\\x05\\xf0\\xbag\\xd6=\\x8d\\x15\\xdfXX\\xff\\x00\\x88~\\xc5\\xcf\\xaa\"r\\x8c\\xce\\x9a\\xce\\xb1\\xa8\\xb1\\xa6w\\xaf\\xb96!%\\x1d\\xd2\\xcb_\\xf6\\x86\\xcf\\xdee\\xa7\\xc4/\\x02\\x077\\xff\\x00p\\x97\\xe7\\x90\\xff\\x00\\xf2Z\\x1d0\\xa7\\x0biM\\xa9\\x9e1\\xf1\\xff\\x00MSk\\xc3L\\x00\\x01\\xf9\\xc6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01Y\\xda\\x7f\\xf4i\\x96\\xfe\\x88\\x97\\xfb\\x95\\x8b0\\xac\\xed?\\xfa4\\xcb\\x7fDK\\xfd\\xca\\xc7\\xa3\\xc3\\xff\\x005\\x1f8\\xfa\\xacq\\x87\\x95\\xaf\\xe4\\x91\\xfdD\"s\\x0c\\xc6\\x97\\x00\\xc6\\xe7d\\x19\\x15\\x93\\x154\\xd0Q\\xdeH\\x97 \\xf4J\\x08\\xcc\\x88\\xbd\\xdc\\xcc\\xcc\\xcc\\x88\\x88\\xb533\"\"31,\\xd7\\xf2H\\xfe\\xa2\\x18\\xd6YM7\\xb4\\x96\\xcf.\\xa8l(\\xaf\\xf6o2$\\xe8\\xb2k\\xa7\\xdb5\\x19\\xd39\\x0c<\\x97\\x9au-\\xb6\\xea\\xd2\\xb4\\x12\\xdbI\\x1aVi3%|G\\xcc\\xbd\\xd5L\\xc4i\\xc5\\xc5z\\xd9\\xc6\\xd5\\xf1\\x8d\\xac\\xd7L\\x9b\\x8cNzk\\x10\\xdf\\xf4y\\x1e\\x91\\x06DE\\xb6\\xe6\\xe9+t\\xd0\\xf2\\x10\\xafr\\x88\\xf5\\xd3Nb\\xda8\\xf3i\\x9d\\xa2\\xb6\\x87W\\x8a\\xe4XD\\xf6++3\\xda\\xdb\\xeaJiwt\\xd2\\xd4\\xc4%\\xc4\\xb1Z\\xb7$!n6\\xe2\\xa2\\xac\\xd2\\xda\\xdb=\\xe4\\xb9\\xdd\\x9a\\xc9e\\xbd\\xc8{\\x1e\\x85\\xb4\\r\\x8ccy\\x95\\xa6ecs\\x17\\x08\\x91V\\xd4X\\xf0\\xeb\\xb2\\xe7n\\xee\\x13f\\xec\\x96\\xd9d\\xe3H\\x91\\x19\\xa3h\\x9c\\xefwL\\x94\\xa5%&D\\xa2\\xd0\\xb5\\x1c#\\x1b\\xfd\\xad\\x9dn\\x03\\x88\\x1a\\xc8\\xf6\\x83\\xb3\\xea\\r\\xbfc6\\xb6W5\\xeb\\x81\\x80\\xf1\\x05Zfd\\xce[\\xcc\\xaeyM\\xcbA\\xa9\\x13\\r\\xb6\\xd6\\x933i\\n\\xdd\\xd5D\\x85\\'T\\xabC\\x17,\\x96\\x05\\xe6+\\x8fl\\x9b\\x1ec:\\xc9\\x99\\x93\\xb4;(\\xb1\\xaf29Vn8\\xf3d\\x88N\\xbemE%\\x99\\xa2*\\x9eZI\\x04m\\xa4\\xb9~3\\xe6,c_\\xe1\\xf9{-\\x9d%\\x93\\xe6\\xb4\\xb8s\\xd4\\x8d\\\\L\\xf47.\\xacQU\\x00\\xbb\\xa5\\xaf\\xbe\\x94\\xb4-io\\xd9I\\xee\\xea\\x96\\xd6{\\xca\\xd1<\\xbd\\xfc\\xc8y\\xb6\\x89\\xb4\\\\\\x7fe8\\x94\\xcc\\x9b(\\x9c\\xaa\\xdaH\\x8ai\\x0fHDw_4\\xa9\\xc7\\x12\\xda\\x08\\x90\\xd2T\\xb5\\x19\\xadi.D~\\xf1\\xcf\\xbb|\\xd9jq\\xdam\\x93\\xe3\\xd1r\\xcc\\xa6Csv\\x8b\\rI\\xb1\\xb0\\xb5T\\xb9\\xd1R\\xa8R\\xc8\\xd2\\xd3\\xee\\x92\\x94\\x92\\xe4fFz\\x99\\x1a\\x8c\\xc8\\xcb\\x96\\x99\\xe6\\xd9]\\xbb\\xa4\\xd9\\xde\\xdb04d\\x167p\\xf1\\xfc\\x8b\\x16v\\x9e^C!sd1\\xe9Rb8m\\xb8\\xea\\x8c\\x96\\xe2\\x12\\xe1\\x19\\x96\\xa7\\xae\\x8a2\\xd4f\\xacZ\\xa9\\xbe\\x9f\\x96\\xb9\\x11wTl\\xe7oX^\\xd5\\xedd\\xd7cSld\\xcb\\x8c\\xcf\\xa48\\x99\\x94\\xb3\\xa0\\xa4\\x91\\xbcI\\xd4\\x97!\\x94$\\xcfU\\x17\"3?\\x8fM\\x08\\xc6\\x829\\xd7n\\xf6[J\\xc2\\xbb2g6W\\xb9=rrF\\x15\\x1dpl\\xb1x\\x8f@8\\xe89\\x0c\\xa7\\x998\\xeb\\x86j\\xe6\\xady\\xe8dz\\x19\\x1f=ky\\xa3\\xf9\\xa6\\xc9v\\x81\\x9ac\\x98U\\xfd\\xfeG.^\\xce\\xa6\\xdfB\\x8dy9v\\x0e\"\\xc9\\x89\\x08i.3\\xdek\\xbaj\\'\\xb5\\xee\\x93\\xa2\\rIN\\x89/p\\xd6\\xd2i\\xd2\\xa8K:\\xad\\xe7\\x91\\x1d\\x97\\x1dp\\xf7[BMJ=5\\xd0\\x8b\\x99\\x88\\xccK+\\xab\\xceq\\xaa\\xdc\\x82\\x92I\\xcd\\xa8\\xb1a2b\\xc86\\x96\\xdfx\\xda\\x8bR=\\xd5\\x91(\\xbf\\xa8\\xc8\\x8cr>\\xc9XVq\\x98L\\x7f\\x1b\\xcd\\xf3\\xdc\\x8f\\ns\\x04t\\xe7\\xdaY\\xda\\xceh\\xe3Z\\xb8\\xebF]\\xc2\\xd4i\\xddt\\xd0\\xda\\xcdIG&\\xf4-\\xdd\\xdd\\xf3#\\xfel\\x8aE\\xfe\\xd4\\xec\\xb6+Iq\\x99\\xe5,\\xc0\\xb1\\xd9\\x9b\\x96\\xd6\\x07\\x02\\xe1\\xe8\\xefM\\x92\\x99\\x11P\\x97\\x1cy*\\xdf\\xde\\xfb\\xaa\\x8fy*%\\x1f\\xb8\\xcc\\xd2fG#\\x1a\\xf6\\xd1l\\xec\\xf0\\x1c-\\x0fi[D\\xcc(\\xf6Y\\x80D\\xb5\\x99>M\\x8d\\x86I\\x16]\\x83\\x97\\xab\\xa8\\x97d\\x8a\\xd9Ji\\x86\\xbd1\\xb6\\x1dZW\\xdd\\x9e\\xfa\\xf7\\x12J_w\\xf7\\xc5\\xedk\\xd2\\x9d\\x9fq\\xbc\\xff\\x00\\x15\\xa5\\xbb\\x83\\x9c\\xccn[>\\x9f\\xdeT%v\\xcb\\xb4\\x93\\x1e1\\xb6\\x9d\\xe6\\x9e\\x92\\xb6\\x19S\\xa6N\\x12\\xcd&\\xa4\\x9a\\xb7TDfzj7F.y\\xb4G\\xe7\\x14\\x98\\xb3U\\x01\\xcc2%d\\x1b9\\xed\\x10\\xed\\x86wg\\x95H\\xa9\\xbe\\xb8S\\x18\\xac\\x9a\\xbbS:e\\x12\\xa2\\x997]&\\x11}\\xe3\\xbb\\xc8qIwt\\xf7\\xd4I\\xd5E\\xa1\\x91\\xd0v0\\xde\\xdb6\\xb3\\x8eb\\x9bJ\\xac\\xb4$J\\xb4\\x9c\\xdc\\xd9.\\xc8\\xcc\\x9f]y\\xc6)\\x06\\x97\\xe2|\\x16P\\xbb\\xa4n\\xa0\\x96\\xd9h\\xe6\\xf9)$\\xa3Y\\x9e\\xba\\xe7k\\xad\\xad\\xa9gn\\x00\\xe2\\\\\\xaf:\\xc9\\x8b>\\xad\\xcf\\xf1\\x19\\xd9Jqe\\xe7\\x91\\xb1\\xf9\\x12n2ST9\\x88T\\xd2\\x89!\\xa6+\\t\\xb3A4J5\\x92\\\\5\\xa5\\xc24k\\xa1\\x8b\\xd6*W3s]\\xba\\xe5\\x932<\\x8e\\xcd\\x18}\\xdb\\xaa\\xa7\\xc7\\xda\\xb5}\\xb8d\\xa6\\xebXx\\xd0\\xa6\\xd0\\xa2\\xef\\x10\\xb5(\\xbe\\xe6\\xadPG\\xa9\\x92H\\xd4\\xa34cD\\xcd\\xacY\\xd3\\x166\\x11\\xeak\\xe5N\\x94\\xe7u\\x163Jy\\xd743\\xddBH\\xcdG\\xa1s=\\x08\\x8f\\xdc<\\x98\\xbeK[\\x99\\xe3UY\\x054\\x9fL\\xa8\\xb5\\x8a\\xd4\\xd8r;\\xb5#\\xbde\\xc4\\x12\\xd0\\xad\\xd5\\x11(\\xb5J\\x88\\xf42#/\\x8c\\x88s\\xce\\xccp\\xa9\\x97\\x9b\\x0e\\x85\\xb4\\x8bl\\xfb\\'\\xc8n\\xee\\xf1\\x87l\\xa6Gr\\xd1GV\\xa5\\xbf\\x11J6\\x91\\x10\\x8b\\xbbB[5\\xe8\\x9d\\xd2%j\\x8ef|\\xc8R\\xf6\\'_m\\xb3Jn\\xccVP\\xb2\\xdc\\x82\\xc2&]\\\\\\xcdm\\x95E\\x8c\\xe3z\\t4uK\\x90\\xd7r\\xce\\x84\\x96M\\xb54\\x84\\x91\\xa0\\x88\\xd4\\x9dw\\x8dFfa\\xb5\\x9b\\xc6\\x9aO\\xfa,\\xec\\xd0\\x01\\xc3\\xd8B\\xb6\\xdd\\xb6\\x9aiY\\xee;a\\xe8W\\x0e\\\\\\xcan1I\\xcc\\x9fb\\x0c41-m\\xfa#\\xb5I\\x84\\xa6\\xcc\\xb7\\x11\\xbaf\\xa7\\rj\\xde\\xdf\\xdf-H\\x8bu\\xd7\\x92b-r\"\\xee\\xe1\\x01\\xc5\\x99\\xc4\\xbc\\x95\\xdc;\\xb4Nl\\xc6q\\x94B\\xb4\\xc2\\xb2\\x19\\'I\\x16=\\xa3\\x88\\x87\\x1d,\\xc4\\x88\\xfe\\xe2\\x98#\\xddu\\n5\\xa8\\x8d\\x0eo$\\x88\\xfd\\x92I\\x99\\x99\\xf86\\xfb\\x95\\xe47\\xf9^\\xd3\\xaa\\\\\\xc9\\xf3*<\\xa15u\\xea\\xc0\\xe91\\xa7\\xa44\\xcc\\xf2y\\x82\\xef\\x14\\xa2h\\xb7]3\\x90n6\\xa3p\\xfd\\x84\\xa4\\x8c\\xb4\\xd3xs\\x9ck|?5\\xf6[;%\\x19\\x85B\\xf3\\x17qT\\xcb3\\xben\\x02l\\xd7\\x17\\xba^\\x85\\x1dN)\\xb4\\xaf\\x7fM\\xcf\\xbfJ\\x8bw]yk\\xa6\\x9c\\xc4\\xc8\\xe3\\x9d\\xb3\\xdcdxFQ\\xb66\\xe1d\\x97\\xb1\\x16\\x8d\\x947p\\xdcr\\xb7\\x90\\xe3P\\xa7\\x13\\x92\\x197c\\x92\\x96}\\xd2\\xb4a\\x1e\\xd2t3=TfffbK\"\\xc8\\xb2-\\x86et\\xb6\\x15\\xd9\\r\\xf6S\\xf0\\xd6\\tws.\\xbe\\xf2z\\xe56\\xec\\xe8L\\xc7y\\xa7\\x1aA\\xf2gx\\xddZ\\r\\r\\x12Q\\xa1\\x96\\x89-\\x08]\\xb5\\xafx\\xfc\\xbd\\x92\\xce\\xb4\\x109\\xbf\\xfb\\x84\\xbf<\\x87\\xff\\x00\\x92\\xd0\\xe6\\x1d\\x87c{c\\xb9{g\\xb9\\xb2.NMm\\xa2X\\x9du\"vf\\xf5\\x8c{\\x08\\xaf5\\xbc\\xb2j\\x01\\xc2Cq\\xd6JRT\\x82idI\\xdd4\\x99\\xa8\\x8c\\xcct\\xf6o\\xfe\\xe1/\\xcf!\\xff\\x00\\xe4\\xb4=>\\x1a\\xbc\\xf5\\xd36\\xb6\\xb0\\xb1\\x16\\xaa\\x1a`\\x00\\x0f\\x8c\\xd8\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00+;O\\xfe\\x8d2\\xdf\\xd1\\x12\\xff\\x00r\\xb1f\\x15\\x9d\\xa7\\xff\\x00F\\x99o\\xe8\\x89\\x7f\\xb9X\\xf4x\\x7f\\xe6\\xa3\\xe7\\x1fU\\x8e0\\xf2\\xb5\\xfc\\x92?\\xa8\\x84\\x16w\\x81\\xd1m/\\x18\\x97\\x8e\\xe4\\x90~\\x12\\xa7\\x94hS\\xb1\\xfb\\xd5\\xb5\\xa9\\xa1d\\xb4\\x19)\\nJ\\x88\\xc9II\\x91\\x91\\x91\\xeaD?&\\xf3-\\x1bO\\xfe\\x87u\\xee/\\xfe\\xcf\\xf8\\x8f\\xae3\\xfa\\x0e\\xeb\\xa3\\xfe#\\xebN\\x05s\\xa4\\xc3\\x9eYWi;<\\xec\\xeb\\x1f\\xc2\\xae1(\\x98\\xa45P\\xdc\\x9e\\xf5\\x94yJ\\\\\\x95\\xccW-\\x14\\xeb\\xae\\xa9N-E\\xa1n\\x99\\xa8\\xcd:\\x16\\x9a\\x0f=Gf\\xdd\\x9c\\xd2c7\\xb8\\xfb\\x18\\xe9\\xbfUx\\xd3L\\xd85>t\\x99k}\\r\\x99\\x9bI\\xef\\x1eqKI \\xd4f\\x9d\\xd5\\x16\\xe9\\xf3-\\x0cZ\\xb8\\xcf\\xe8;\\xae\\x8f\\xf8\\x87\\x19\\xfd\\x07u\\xd1\\xff\\x00\\x11\\x9d\\xdaz~\\x85\\xaaT+\\xfb1\\xec\\xd6\\xb1\\x9b\\x96\\xd8\\xc7W\\xbduV\\xf5-\\x93\\xce\\xd9Ku\\xe9\\xb1\\x1d\\xd3}\\x0e\\xba\\xa7Mk=\\x08\\x89+Q\\x9a\\xd0Z\\x92T\\x923!h\\xcbv_\\x8bgXsx\\xad\\xf536tM\\xa5\\xa2j+\\xcaV\\xad\\x1bzwjC\\x84d\\xb4-:rZTJ/\\xc6?~3\\xfa\\x0e\\xeb\\xa3\\xfe!\\xc6\\x7fA\\xddt\\x7f\\xc4#\\xc3U\\x11h\\xa7\\xe8ZU\\xdaN\\xcf\\xb8\\x16?\\x06\\xb6$*WI\\xba\\xfbt_G[\\xf6\\x12\\x9ft\\xa7!\\xa3i/)\\xc7\\x1cR\\xdc2mF\\x9d\\x16f\\x9d4\\xe5\\xc8\\xb4\\xfdr\\xdd\\x82\\xe0\\xb9\\xcc|\\xa9\\x9b\\xaaEKo(\\\\7-\\xf7&\\xc8h\\xe4*.\\xe9\\xc7248\\x93l\\xd1\\xb8\\x9f\\xbc\\xdd\\xd7Nz\\x89\\xde3\\xfa\\x0e\\xeb\\xa3\\xfe!\\xc6\\x7fA\\xddt\\x7f\\xc47j\\xadl\\xbfB\\xd2\\xa3V\\xf6R\\xd9\\x9d]U\\xc5cT\\xd6/@\\xb7a\\x11\\xe6\\xb12\\xfe\\xc6J\\\\B\\\\K\\x89\"\\xef$+p\\xc9hI\\xea\\x9d\\x0f\\x96\\x9a\\xe8fB\\xc9\\x9el\\xae\\x9f/;\\xab?\\x83bI\\xc8\\xe6c\\xf2\\xf1\\xe6\\xde\\x9e\\xe3\\xc7\\x1dQ_\\xd1Ji\\xc6\\xd0\\xb4\\xea\\x83ZRfi\\xd1z\\x11\\x91(\\xb5\\x12\\xbcg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\x9b\\xb4\\xc4Z)\\xfa\\x16\\x96\\x1d\\xb1\\x0e\\xce\\xf9\\x96\\x11\\x9c\\x15\\xa5\\xcc\\xe8\\x14\\xf4\\t\\xae~\\x14\\x8a:|\\x82\\xda\\xd1\\x9b\\x15\\xac\\xd1\\xba\\xe2\\xfd9fL\\xee\\x12U\\xbaM\\x91\\x9f\\xb6dj\\xd0l\\x18\\xae\\xc7q\\x0c*m\\x0c\\xbaZ\\x8fC\\x91EP\\xaa\\x1a\\xe5\\xfaK\\xcew\\x10T\\xb6\\xd6mh\\xa5\\x99+\\xdai\\xb3\\xdeV\\xaa\\xf6}\\xfc\\xcfY\\x1e3\\xfa\\x0e\\xeb\\xa3\\xfe!\\xc6\\x7fA\\xddt\\x7f\\xc4J|-T\\xc5\\xa2\\x9f\\xa1iV\\xed;;\\xec\\xf2\\xeb\\x12o\\x1a\\x99\\x8e!\\xda\\x96l^\\xb6e\\x05)\\xf4\\xbc\\xc4\\xb7][\\xae<\\xd3\\xe4\\xb2u\\xb5\\x1a\\xdcY\\xfb\\n-\\tZ\\x16\\x85\\xc8Y\\xb0l\\x06\\x8bf\\xd4\\x08\\xa5\\xc7`\\x9c\\n\\xf4\\xb8\\xb7\\xb7\\x14\\xfb\\x8f-n,\\xf5R\\xd4\\xe3\\x8aR\\xd4\\xa3?y\\xa8\\xcc\\xc7\\xcf\\x19\\xfd\\x07u\\xd1\\xff\\x00\\x10\\xe3?\\xa0\\xee\\xba?\\xe25\\x1e\\x1e\\xa8\\x9b\\xc5?B\\xd2\\xaf\\xb7\\xb0\\x0c\\t\\xad\\xa0q\\xaf\\xc0=\\xe6G\\xe9*\\x9a\\x99\\x0e\\xcc}\\xc6\\x91 \\xd1\\xb8o%\\x858m%\\xcd\\xd3\\xd3|\\x90J\\xfc\\xa3\\xcf[\\xd9\\xbfg4\\xd9\\xaf\\x15\\xc1\\xc6\\xd1\\x12\\xe7\\xd2\\xd5<\\x8d\\x99r\\x13\\x18\\xa4\\xa8\\x8c\\x94\\xf1F\\';\\x92p\\xf5=TH\\xd7\\x99\\xf3\\x16\\x8e3\\xfa\\x0e\\xeb\\xa3\\xfe!\\xc6\\x7fA\\xddt\\x7f\\xc47i\\xe9\\xfa\\x16\\xa9K\\xb4\\xec\\xb3\\xb2\\xfb\\xabi\\xf62\\xf1t\\xb9*l\\xcf\\x84\\x1c\\xdd\\x9d%\\r\\xb7+|\\x96r\\x1am.\\x12\\x19t\\xd4Df\\xe3d\\x95\\x1e\\xa7\\xa9\\x9e\\xa7\\xad\\xef\\x1e\\xc2\\xa9qY\\xf7\\xd3j\\xe1z,\\x9b\\xd9\\xbf\\x08X\\xaf\\xbdZ\\xfb\\xf7\\xfb\\xb45\\xbf\\xa2\\x8c\\xc9>\\xc3h-\\x13\\xa1r\\xd7ML\\xcc~\\x1cg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88G\\x86\\xaa5\\x8a~\\x85\\xa5O\\xa4\\xec\\xc3\\xb3-O\\x91\\x16\\xa7\\xcdG\\xee-O\\xe2\\x1a\\x9a\\xa2/\\x7f\\x82-\\xc09\\x9fk\\xfbj\\xda\\\\-\\x85E\\xc9\\xea\\xf1\\xba\\xba+I7\\xf5q\\x1b\\\\\\\\\\x81\\xa9\\xcc?\\r\\xe9\\x0c\\xa7}\\xb7\\x93\\x1c\\xc8\\xc9\\xc3_r~\\xc9\\x1aIJZL\\xf7RJ\\xb6\\xe4\\xbbq\\xcch3\\xfc\\x9b\\xd4\\x95\\xba\\x9d\\xc3#5\\x166\\xb4\\xfeG5\\xb3k\\x01\\xf9C[\\xeeDarZC\\x12T\\x84\\x9b\\xad6\\xbd\\xf4\\xa1zsI+B\\xde\"=K]\\x0b_\\xc4C\\xf5\\x1dP\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x109\\xbf\\xfb\\x84\\xbf<\\x87\\xff\\x00\\x92\\xd0\\x9e\\x109\\xbf\\xfb\\x84\\xbf<\\x87\\xff\\x00\\x92\\xd0\\xeb\\x83\\xfc\\x94\\xfc\\xe1\\xaa\\x7ft4\\xc0\\x00\\x1f\\t\\xb0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00Vv\\x9f\\xfd\\x1ae\\xbf\\xa2%\\xfe\\xe5b\\xcc+;O\\xfe\\x8d2\\xdf\\xd1\\x12\\xff\\x00r\\xb1\\xe8\\xf0\\xff\\x00\\xcdG\\xce>\\xab\\x1ca\\xe5k\\xf9$\\x7fQ\\n\\xe5N\\xce\\xe9is\\x0b\\x9c\\x924}\\xdb\\x0bFc0\\xf2M)\\xee\\xd0Lw\\x9b\\x86\\x82$\\xeaF}\\xea\\xb5=O]\\x0b\\xdd\\xa0\\x92k#\\xa9\\xee\\xd1\\xff\\x00\\xaaB\\xf7\\x17\\xff\\x00p\\x8f1\\xf5\\xc4u>)\\x0b\\xa8G\\x98\\xfas\\x87T\\xcf\\x07+K\\x06\\xaa\\xec\\xe9\\x92A\\xd9F\\r\\x8c96\\xa8\\xe7\\xd1gE\\x93\\xc9q.\\xb9\\xdd./\\xc2o\\xcb\\xdcA\\xf7z\\x9b\\x9d\\xdb\\xa9-\\x0c\\x89;\\xc4e\\xbd\\xa73\\xfcv\\x9d\\xd9\\xaf#\\xcdo\\xb6\\x8fe\\x16m:\\xd9\\xbd\\xb3\\xa0\\xb3\\x85]`\\xa7U\\x1eYW\\xa4\\xbb\\xc8\\xd3\\x12\\x94ri\\xc3/\\xf8w\\xfd\\xc4f_\\x10\\xdf\\xf8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0e#\\xa9\\xf1H]B<\\xc7\\x1d\\xda\\xf1kO\\xe4Yus\\xe4]\\x80gsa\\xed\\xb5vnb\\xb0\\xe4\\xed\\x07\\x19E\\\\X\\xb5*}\\x0cW\\xc8n<\\x86\\x10\\x95)M\\xea\\xe3fO!F\\xe1%&FFD\\xde\\x9a\\x18\\xba\\xec\\xebc\\xf78\\x8e\\xd2fd3$\\xc1r\\x13\\xd8\\x85U\\x02[a\\xc5\\x9b\\x85\"*\\x9f7\\x14dh\"\\xdc>\\xf5;\\xa7\\xae\\xa7\\xa1\\xeaE\\xf1\\xe9\\xdcGS\\xe2\\x90\\xba\\x84y\\x87\\x11\\xd4\\xf8\\xa4.\\xa1\\x1eb\\xc7\\x87\\x98\\x98\\x9bJj\\xe6Z\\x9d\\x8e\\xdblr\\x07f\\x89W\\x0eF\\x96\\xbc%\\xd94V\\xb2 \\xa9Ke\\t\\x9d\\x19L\\xa1\\xd2R\\x92\\x93\\xdc\\xef\\xd2\\xc2L\\xcc\\x8b\\xef\\xc6\\x85M@\\xeeC\\xda\\xe6\\xff\\x00+i\\'\\xf0m\\x06(\\xc68n\\x9f\\xb9r\\xdf\\x92r\\x9dA~T4\\x98\\xe6\\x7f\\xdf\\x10\\xd6\\x0f\"\\xa8Q\\x19\\x1d\\x9c##\\xf8\\x8eB<\\xc0\\xb2*\\x84\\xfb\\xac\\xe1\\x17\\xc7\\xcaB<\\xc4\\x8f\\x0f1\\xa4D\\xfeE\\x97T>m\\x1b2\\x91;\\x1a)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6\\x19*\\xe4ZR #\\xb8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0e#\\xa9\\xf1H]B<\\xc3%\\\\\\x8bJD\\x04w\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xc4u>)\\x0b\\xa8G\\x98d\\xab\\x91iH\\x80\\x8e\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0c\\x95r-)\\x11\\x03\\x9b\\xff\\x00\\xb8K\\xf3\\xc8\\x7f\\xf9-\\x0fo\\x11\\xd4\\xf8\\xa4.\\xa1\\x1eb\\x17/\\xbc\\xae\\x95L\\x86\\x99\\xb0\\x8a\\xf3\\xaa\\x99\\x13u\\r\\xbc\\x95(\\xff\\x00\\xda[\\xf7\\x11\\x18\\xeb\\x85EQ\\x89N\\x9f\\x18j\\x98\\x9b\\xc3Y\\x00\\x01\\xf9\\xf6\\xc0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\xf0\\xebH}\\xa5\\xb6\\xe2\\x12\\xe3k#J\\x90\\xa2\\xd4\\x94G\\xef#/\\x8c\\x87\\xd8\\x00\\x8b\\xe1Z_\\x07\\x81\\xd3#\\xc88V\\x97\\xc1\\xe0t\\xc8\\xf2\\x12\\x80:m+\\xea\\x95\\xbc\\xa2\\xf8V\\x97\\xc1\\xe0t\\xc8\\xf2\\x0e\\x15\\xa5\\xf0x\\x1d2<\\x84\\xa0\\x06\\xd2\\xbe\\xa9/(\\xbe\\x15\\xa5\\xf0x\\x1d2<\\x83\\x85i|\\x1e\\x07L\\x8f!(\\x01\\xb4\\xaf\\xaaK\\xca/\\x85i|\\x1e\\x07L\\x8f \\xe1Z_\\x07\\x81\\xd3#\\xc8J\\x00m+\\xea\\x92\\xf2\\x8b\\xe1Z_\\x07\\x81\\xd3#\\xc88V\\x97\\xc1\\xe0t\\xc8\\xf2\\x12\\x80\\x1bJ\\xfa\\xa4\\xbc\\xa2\\xf8V\\x97\\xc1\\xe0t\\xc8\\xf2\\x0e\\x15\\xa5\\xf0x\\x1d2<\\x84\\xa0\\x06\\xd2\\xbe\\xa9/(\\xbe\\x15\\xa5\\xf0x\\x1d2<\\x85\\x03o\\x98\\xedT]\\x8e\\xe5n\\xb3[\\x0e;\\xa8\\x86f\\x97Z\\x8e\\x92R}\\xa2\\xe6FE\\xa8\\xd4\\xc6{\\xda\\x08\\xb7\\xb61\\x96\\x16\\xee\\xfe\\xb0\\xcf\\xd9\"3\\xd7\\xdaO\\xe2\\r\\xa5}R^V\\xee\\x15\\xa5\\xf0x\\x1d2<\\x83\\x85i|\\x1e\\x07L\\x8f!(\\x01\\xb4\\xaf\\xaaK\\xca/\\x85i|\\x1e\\x07L\\x8f \\xe1Z_\\x07\\x81\\xd3#\\xc8J\\x00m+\\xea\\x92\\xf2\\x8b\\xe1Z_\\x07\\x81\\xd3#\\xc88V\\x97\\xc1\\xe0t\\xc8\\xf2\\x12\\x80\\x1bJ\\xfa\\xa4\\xbc\\xa2\\xf8V\\x97\\xc1\\xe0t\\xc8\\xf2\\x0e\\x15\\xa5\\xf0x\\x1d2<\\x84\\xa0\\x06\\xd2\\xbe\\xa9/(\\xbe\\x15\\xa5\\xf0x\\x1d2<\\x83\\x85i|\\x1e\\x07L\\x8f!(\\x01\\xb4\\xaf\\xaaK\\xca/\\x85i|\\x1e\\x07L\\x8f \\xe1Z_\\x07\\x81\\xd3#\\xc8J\\x00m+\\xea\\x92\\xf2\\x8b\\xe1Z_\\x07\\x81\\xd3#\\xc8}7\\x8c\\xd3\\xb4\\xe2V\\x8a\\xa8(ZL\\x94\\x95&2\\x08\\xc8\\xcb\\xdcdz\\t \\r\\xa5}R^@\\x00\\x1c\\xd0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x9ev\\x83\\xd0\\xb6/\\x96\\xeb\\xa1\\x97\\xa1\\x9e\\xbb\\xda\\xe9\\xf7\\xc9\\xfc\\\\\\xc6\\x863\\xde\\xd0)\\xdf\\xd8\\xceXZ\\x12\\xb5\\x86|\\x8f^~\\xd2\\x7f\\x170\\x1a\\x10\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0c\\xef\\xb4\\x06\\x8f\\xec[-&\\xcc\\x97\\xacE\\'\\xd9\\xe7\\xcc\\x96De\\xff\\x00\\xc8\\xd1\\x07\\x11\\xf6\\xc3^W\\xd9\\xd8\\xaer,q\\xb5\\xd9l\\xd70R\\x99\\xbc\\xa7>e[`\\xb35z[\\x06_\\xc9\\x93\\xa6Z\\xa8\\xb44\\xa9{\\xdb\\xc6Jq\\x06\\x90\\xed\\xb3Q$\\xc8\\x8c\\xc8\\x8dG\\xa1\\x11\\x9f\\xbc\\xc7\\xf4s\\xdfe\\xea,\\xaf:c\\xd6\\xee\\xd1\\xcc\\x8b#\\xbb`\\xd1KR\\x9dI\\x8aj\\xe5\\x19(\\x89\\xb4|K{u\\x0bR\\x8c\\xcdF\\x94\\xb6Fe\\xed$t \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xcb\"\\xc6\\x9dwgx\\xeb\\xb7\\xb6\\xac\\x13V/0\\xdbQ\\xe4n!\\x08I\\x91\\x11\\x11h=|;\\'\\xe7\\x1d\\xe7Y\\xf6C\\x1b\\xfeu\\x90\\xfe\\x97\\x93\\xfed&\\x87\\xdf\\xae\\xb9\\xa6m\\x1fHJ\\xaa\\x98\\x94/\\x0e\\xc9\\xf9\\xc7y\\xd6}\\x90\\xe1\\xd9?8\\xef:\\xcf\\xb2&\\x80ciW\\xe4C9\\xaa\\xe6\\x85\\xe1\\xd9?8\\xef:\\xcf\\xb2\\x1c;\\'\\xe7\\x1d\\xe7Y\\xf6D\\xd0\\xaa\\xedGi\\xd4\\x1b\\x1d\\xc1\\xac\\xf2\\xdc\\x9aJ\\xe2\\xd4W\\xa5&\\xe1\\xb4\\xd9\\xb8\\xe2\\xd4\\xa5\\x12\\x10\\x84$\\xb9\\xa9JR\\x88\\x88\\xbf/=\\x0bS\\x12qf\"\\xf3\\xf4\\x835I\\x0e\\x1d\\x93\\xf3\\x8e\\xf3\\xac\\xfb!\\xc3\\xb2~q\\xdeu\\x9fdU\\xb6A\\xb6v6\\xbe\\xd5\\xa9\\xb7\\x88\\xe5\\xb8\\x83\\xd5\\xcah\\x97\\x1f,\\xaa8.:N\\x12\\x8d*l\\xb7\\x94J/d\\xf5\\xe7\\xcb\\x96\\xa3C\\x08\\xc5\\x9a\\xa2\\xf1\\xf4\\x835H^\\x1d\\x93\\xf3\\x8e\\xf3\\xac\\xfb!\\xc3\\xb2~q\\xdeu\\x9fdM\\x00\\xbbJ\\xbf\"\\x0c\\xd5sB\\xf0\\xec\\x9f\\x9cw\\x9dg\\xd9\\x1e\\x1b\\xdd\\x9fE\\xca*dU\\xdc\\xd8\\xd8\\xdb\\xd6I\"K\\xd0\\xa7:\\x87\\x99t\\x88\\xc8\\xc8\\x94\\x85 \\xc9\\\\\\xc8\\x8f\\x99{\\xc8\\x87\\xb3;\\xcbc`8FC\\x93\\xcce\\xd9\\x11)k\\xa4Y<\\xcb\\x1aw\\x8bC-)\\xc5%:\\x99\\x16\\xa6I2-L\\x8bQ\\xf8\\xec\\xe73gh\\xdb?\\xc6\\xb2\\xb8\\xd1\\x9c\\x87\\x1e\\xf2\\xb65\\x93q\\xdd25\\xb4\\x97\\x9aK\\x84\\x95\\x19r3\"V\\x9c\\x84\\xda\\xcd\\xed\\xf6\\x835OG\\x0e\\xc9\\xf9\\xc7y\\xd6}\\x90\\xe1\\xd9?8\\xef:\\xcf\\xb20J.\\xdc\\xd59\\\\\\x87\\x8a\\x8bd\\xdbT\\xbc\\x82\\xd4\\xd7 \\xaa\\xca\\xb7\\x1fi\\xf8\\x9d\\xe2\\x17\\xb8\\xbf\\xba%\\xf3-\\x08\\xcb\\x9f\\xc6_\\x88t\\xa8\\xcd8\\xf9\\xff\\x00l\\xf6\\\\\\xd5B\\x17\\x87d\\xfc\\xe3\\xbc\\xeb>\\xc8p\\xec\\x9f\\x9cw\\x9dg\\xd9\\x13B\\xaf;h1ag\\x94x\\xb1VZ\\xcav\\xda\\x1b\\xf3Z\\xb4\\x8d\\x10\\xd7\\x01\\x94\\xb7\\xbb\\xaa]x\\x8fD\\xa9[\\xc5\\xbaZ\\x1e\\xbf\\x8c\\xb5-w8\\xb3\\x1c~\\x90\\x99\\xaa{\\xb8vO\\xce;\\xce\\xb3\\xec\\x87\\x0e\\xc9\\xf9\\xc7y\\xd6}\\x914\\x01\\xb4\\xab\\xf2 \\xcdW4/\\x0e\\xc9\\xf9\\xc7y\\xd6}\\x90\\xe1\\xd9?8\\xef:\\xcf\\xb2&\\x806\\x95~D\\x19\\xaa\\xe6\\x85\\xe1\\xd9?8\\xef:\\xcf\\xb2\\x1c;\\'\\xe7\\x1d\\xe7Y\\xf6D\\xd0\\x06\\xd2\\xaf\\xc8\\x835\\\\\\xd0\\xbc;\\'\\xe7\\x1d\\xe7Y\\xf6G\\xeb\\x8c\\xa6en}\\x1a\\x12\\xad\\xa7\\xce\\x8a\\xfdd\\x97\\x94\\xd4\\xc7\\xbb\\xc2%\\xa1\\xd6\\t&\\\\\\x8bNKW\\xff\\x00\"TG\\xd6\\xff\\x00I\\xb5\\xdf\\xa1\\xe6~\\xfa(MSU5D\\xf2\\x9f\\x87\\x93T\\xd536\\x95\\xf8\\x00\\x07\\xc3P\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00f\\xf8\\xdf\\xf3\\xac\\x87\\xf4\\xbc\\x9f\\xf3!4!q\\xbf\\xe7Y\\x0f\\xe9y?\\xe6Bh}\\xccO\\xdc\\xc5\\\\@\\x00\\x1c\\xd9\\x06i\\xda?\\x1c\\xc1\\xf2\\xcd\\x8f^U\\xed\\x16\\xd5\\x14x\\xac\\x83e/Y\\xaaA0q]\\xef\\x91\\xdc\\xb8\\x97\\x0c\\x8c\\x92\\xa2ssC22\\xfc|\\xb5\\x1aX\\xf1]QV\\xe4\\xb5\\x8f\\xd6\\xdb\\xd7\\xc5\\xb5\\xae|\\xb7]\\x895\\x84\\xbc\\xd3\\x85\\xf8\\x94\\x85\\x11\\x91\\xff\\x00\\xdc\\x86j\\x8c\\xd10?\\xcf\\x9c\\xcf\\xb4.\\xd0\\xa1lcn\\x18}&m\\x1bh\\x9c+\\x06\\xbd\\xc8\\x1b@\\xa3\\xdd\\xef}\\x12C\\xc9C\\xc8um\\x1a\\x92o!\\xbd\\xff\\x00\\xba \\xf5\\xd1+Q\\x9e\\xa5\\xcb\\xf9\\xb1\\xeag\\xb6I};=\\xc5\\xed\\xf6}+\\x17\\xa7\\xc5\\xa7\\xd8d\\x18\\xd6\\x19\\x95\\xce\\xb3\\x93p\\xd2X52\\xf3\\x8d\\xbe\\xde\\x8d\\xb8N\\x92H\\xdc=\\xd3\"R\\xb9|G\\xdfX\\xf6\\x11\\x8eb4\\xeeT\\xd1PU\\xd2\\xd5\\xb9\\xa9\\xae\\rt6\\xe3\\xb0\\xadKC\\xd5\\x08I$\\xf5.G\\xc8x\\xf1\\x8d\\x97\\xe1\\xb8K\\x93\\x17\\x8e\\xe2TT+\\x98\\x93L\\x95VV\\xb3\\x18\\xdf#\\xf7\\x92\\xcd\\t-\\xe2\\xfe\\xb1\\xe4\\xd8Ux\\x99\\x9e\\r]\\xfezvt\\x9a\\xc6-\\xda\\x13\\t*)8\\x9dMvg\\x8a\\xd8J\\x99A\\x89\\xda\\xcb\\x9b\\xdd\\x11G7\\x99)\\x86\\xfb\\xab#\\x90[\\xa6Z\\xa4\\x92~\\xca\\xcb\\x9f\\xbc\\xec\\x9b\\x13\\xd9\\xe4]\\x9d\\x7f\\xf4\\xfb\\x99\\xb5,6\\xb9\\xdfY\\x8fc\\x93\\x90WIu\\xc7$2\\xc1\\xcaQ/\\xba#3J\\t\\r\\xb7\\xbc[\\xa4FF\\x93?\\x8c\\xf5\\xee\\n\\x9d\\x90`t\\x0f0\\xf5f\\x13\\x8e\\xd7;\\x1d\\xd5\\xbe\\xcb\\x91*XiM\\xb8\\xb4\\xee-i4\\xa0\\xb4R\\x92f\\x932\\xe6dz\\x1f!9C\\x8eT\\xe2\\xb4\\xd1\\xea)j\\xe1S\\xd4\\xc7I\\xa5\\x98\\x10#\\xa1\\x86\\x1a#33$\\xb6\\x82$\\x91\\x19\\x99\\x9f\"\\xf7\\x99\\x85\\x1e\\x1e\\xdcg\\x9f\\xfd^\\xde\\xc6g\\x17\\xd0\\xe2\\x1b\\x0c\\xa4\\xec\\xf1\\x9c\\xda\\xe0\\x17\\xb0m\\xb3[-\\x9cY\\xb9`\\xeao\\x1c\\x952RN&\\xaf:\\xfb\\nu[\\xab\\'\\r$fi#I\\xa8\\xd3\\xc8\\xb9\\n\\xd6\\xcc\\xf6qK\\xb1\\xf9\\xfd\\x92\\xf2\\xacQ3+.2\\xf8\\xadE\\xbes\\xd3\\x9eq\\x16\\r\\xb9^\\x977\\\\B\\xd4i\\xd1*?d\\x92DI\\xd0\\xb4/d\\xb4\\xed\\x8a\\xcd\\x8f`T\\xaa\\xb3:\\xfc#\\x1c\\x80v\\x8c\\xae<\\xf3\\x8dS\\x1d\\xbfKie\\xa2\\xdbwu\\x05\\xbe\\x95\\x11\\xf3%jG\\xf1\\x89.\\x05\\xc6\\xfb\\xaa6\\xf8z\\xab\\xbb\\xa1\\xd3\\xe0\\x94z\\x13ZWh\\x8d\\xc2\\xf4r\\xdd\\xfb\\x96\\x89-\\xd2\\xdc\\xd3\\x97/p\\xbb\\x0e\\x13\\xa6\\x9e\\xe5\\xdc5\\xd8\\xaf%\\xe1\\xe6\\\\~\\xc7oTx\\xdd#y%\\x99;\\x80\\xcef\\xbd\\xb7_5<\\xb4\\x92\\xbb\\xf7\\x14O\\xa7yF\\x95\\x16\\x85\\xf1h\\\\\\x8cx\\xf2\\xbd\\x95c\\xb9\\x9b\\xbd\\xaf2{H\\xd2\\x1d\\xbd\\xc6e\\xbf>\\x9a[S\\x1eh\\xe0\\xbe\\xdc\\x02t\\x9cm(Q$\\x94jm\\x1a\\xabML\\x92E\\xee\\x1d\\x96}\\x9dvP\\xa9\\xc74\\xf6c\\x86\\x9c\\xc3s\\xbe9\\x07A\\x13\\xbc\\xdf\\xd7]\\xfd\\xee\\xef]\\xedy\\xeb\\xef\\xd4Y\\xb8\\x17\\x1b\\xee\\xaf\\x1a\\xe1\\xea\\xae\\xee\\xf7_\\x85\\x91\\xe8Mic\\xaa7\\x0f\\xd2\\x0bw\\xee\\xba\\xa7\\xd9=\\xfdyr\\xf7\\t\\x18\\x13\\x96)\\xab\\xe1\\xecf\\xd6\\xef\\xf3\\x7fk\\xb2g\\xed//\\xc6\\x91{\\t\\x9c\\xf6\\xca\\xd7e\\xf5\\xb60Z\\x9d\\x92\\x95+t\\x13^A\\x11\\xcd58\\xb46\\xea\\x94\\xe1\\xefh\\x935\\x9e\\x9a\\x19hD4\\xdc[e\\xb1vY\\xb7\\xfd\\x87W\\x9aa=r\\x9d\\x9cM\\x8ba:\\x01\\xea\\xd4\\x952\\xc2R\\x95$\\xfd\\xc6E\\xaa\\xb4Q\\x11ok\\xa9\\xfb\\xc6\\xc7\\xb7^\\xc8\\x8b\\xda\\xdd\\xdd\\\\\\x9a\\x8c\\x8a\\x8f\\x1a\\xab\\x81Z\\x8a\\xa6\\xaa\\xe6\\xe1U\\xb6\\xcd\\xb0\\xcaM[\\xbe\\x8e\\xa7\\x92JgB=\\x08\\x92{\\xa5\\xa1hD4\\x9d\\x97l+\\x15\\xd9n\\x1f\\x8bRF\\x82\\xcd\\xb4\\x9cv\\x02\\xeb\\xe2[\\xd90\\xdb\\x93\\x12\\xd2\\xcc\\xcd\\xd4\\x93\\x9b\\xba\\xa5*3=R\\x9d\\x0bM\\x0b\\xe2\\x18\\xa7\\x06\\xac\\xf33\\x1f\\xf6\\xb7p\\x86\\xccpj\\x9c\\x0f\\xb3\\xe7f\\xcd\\xa6\\xd1\\xa2T\\x1c\\xd6\\xc72\\xaf\\xab\\x9df\\x99\\x8f)Ra\\xbb%\\xf6\\x97\\x19I5\\x1a{\\xbd\\xc6\\xd0\\x92I\\x11i\\xa7/y\\xeb\\x0f\\x95S\\xdcmci{^\\x93\\x90\\xe4XE\\x1ee[\\x92H\\xaf\\xa8\\x9d\\x96eS\\xaa\\xeci\\xd8B\\x88\\xa2*\\x13- \\xdbSfFFJ-Mfg\\xa9s#?\\xf4\\xa1\\xbd\\x9cbMQ\\xd5\\xd2\\xa3\\x17\\xa5E=T\\x84K\\xaf\\xafM{%\\x1e\\x1b\\xc9Q\\xa9\\x0e\\xb2\\xde\\xee\\xebk%)FJI\\x11\\x91\\x99\\x9e\\xbc\\xc7\\xe5{\\xb2\\xdc/(\\xbcb\\xea\\xe7\\x11\\xa1\\xb7\\xb8cBf\\xc2uc/HoOv\\xeb\\x8aI\\xa8\\xb4\\xf8\\xb41wi\\xb4E\\xff\\x00,fHa\\xd1m\\xe0\\xe24q\\xb2\\t,\\xcd\\xbef\\x0b\\r\\xd8I\\x8f\\xafv\\xec\\x92m$\\xea\\xd1\\xa9\\x11\\xee\\x9a\\xc9FZ\\x91r?q\\tp\\x01\\xef\\x8d\\x18\\x00\\x00\\x00G\\xd6\\xff\\x00I\\xb5\\xdf\\xa1\\xe6~\\xfa(\\x90\\x11\\xf5\\xbf\\xd2mw\\xe8y\\x9f\\xbe\\x8a5\\x1c*\\xf9O\\xd1\\xba8\\xaf\\xc0\\x00>+@\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xc7\\xeb\\xb1\\x1a\\x9b\\x8bL\\x8aL\\xb8\\x9d\\xf3\\xc7m \\x8d]\\xea\\xd3\\xc8\\x8c\\xb4\\xe4FD$=^\\xe3\\xff\\x00 \\xfd\\xb3\\x9fXz1\\xbf\\xe7Y\\x0f\\xe9y?\\xe6Bh~\\x8a\\xbclH\\x9bES\\xf0\\xf8\\xf9%UM\\xf8\\xab\\xbe\\xafq\\xff\\x00\\x90~\\xd9\\xcf\\xac\\x1e\\xafq\\xff\\x00\\x90~\\xd9\\xcf\\xac,@1\\xb7\\xc5\\xeb\\x9fYc4\\xf3W}^\\xe3\\xff\\x00 \\xfd\\xb3\\x9fX=^\\xe3\\xff\\x00 \\xfd\\xb3\\x9fXX\\x806\\xf8\\xbds\\xeb&i\\xe6\\xae\\xfa\\xbd\\xc7\\xfeA\\xfbg>\\xb0z\\xbd\\xc7\\xfeA\\xfbg>\\xb0\\xb1\\x00m\\xf1z\\xe7\\xd6L\\xd3\\xcd]\\xf5{\\x8f\\xfc\\x83\\xf6\\xce}`\\xf5{\\x8f\\xfc\\x83\\xf6\\xce}ab\\x00\\xdb\\xe2\\xf5\\xcf\\xac\\x99\\xa7\\x9a\\xbb\\xea\\xf7\\x1f\\xf9\\x07\\xed\\x9c\\xfa\\xc1\\xea\\xf7\\x1f\\xf9\\x07\\xed\\x9c\\xfa\\xc2\\xc4\\x01\\xb7\\xc5\\xeb\\x9fY3O5w\\xd5\\xee?\\xf2\\x0f\\xdb9\\xf5\\x83\\xd5\\xee?\\xf2\\x0f\\xdb9\\xf5\\x85\\x88\\x03o\\x8b\\xd7>\\xb2f\\x9ej\\xef\\xab\\xdc\\x7f\\xe4\\x1f\\xb6s\\xeb\\x07\\xab\\xdc\\x7f\\xe4\\x1f\\xb6s\\xeb\\x0b\\x10\\x06\\xdf\\x17\\xae}d\\xcd<\\xd5\\xdfW\\xb8\\xff\\x00\\xc8?l\\xe7\\xd6\\x0fW\\xb8\\xff\\x00\\xc8?l\\xe7\\xd6\\x16 \\r\\xbe/\\\\\\xfa\\xc9\\x9ay\\xab\\xbe\\xafq\\xff\\x00\\x90~\\xd9\\xcf\\xac\\x1e\\xafq\\xff\\x00\\x90~\\xd9\\xcf\\xac,@\\x1b|^\\xb9\\xf5\\x934\\xf3W}^\\xe3\\xff\\x00 \\xfd\\xb3\\x9fX=^\\xe3\\xff\\x00 \\xfd\\xb3\\x9fXX\\x806\\xf8\\xbds\\xeb&i\\xe6\\xae\\xfa\\xbd\\xc7\\xfeA\\xfbg>\\xb0\\xfe\\xe2\\xd8\\xfd}\\x0e\\xd3a\\x94\\x18\\xfd\\xc7{Q/\\x7f\\xdbR\\xb5\\xd1\\xe8\\xda{\\xcc\\xff\\x00\\x19\\x8b\\x08\\x8f\\xad\\xfe\\x93k\\xbfC\\xcc\\xfd\\xf4P\\x9c\\\\J\\xa9\\xaa*\\xaaf-?\\x1f&\\xe9\\x99\\x99\\xe2\\xbf\\x00\\x00\\xf8\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0c\\xdf\\x1b\\xfeu\\x90\\xfe\\x97\\x93\\xfed<\\xf2\\xf6\\x8f\\x8a@\\xcc\\xa2b2r:\\xb62\\x99m\\xf7\\xb1\\xe9\\\\\\x96\\x84\\xcby\\x1a)[\\xc9h\\xcfx\\xcbD,\\xf5\"\\xff\\x00\\x84\\xff\\x00\\x10\\xf4c\\x7f\\xce\\xb2\\x1f\\xd2\\xf2\\x7f\\xcc\\x84\\x8a\\xea\\xe199\\x13W\\x11\\x85LAn\\xa6B\\x9aI\\xb8\\x92\\xe7\\xc8\\x95\\xa6\\xa5\\xef?\\xfeG\\xdb\\xc5\\xbem<\\x98\\xab\\x8c\\xb9\\x03e;G\\xc9\\xb0m\\x9db\\xb5x\\xb4j\\xa7\\xe7d\\xfbF\\xbf\\xa9Z\\xed\\xc9\\xd3i\\x94\\xf7\\xf3\\x9d\\'\\x0b\\xbb234\\xa9\\xa4\\x9e\\xef\\xfcE\\xaauN\\xbb\\xe9\\xba\\xca\\xed)\\x96ct\\xf9E\\r\\xa5E=\\xa6\\xd1k\\xf2\\x98X\\xa5r`\\x9b\\xb1\\xeb\\xa6\\xbd1\\x86\\xdf\\x8e\\xf2\\xc9jZ\\xdaJ[Z\\xd4\\xb4\\xef(\\xfe\\xe6dG\\xedr\\xb1c\\xbd\\x99~\\x00\\x8d\\x875\\xc4\\x9d\\xff\\x00\\x0fe\\xd6\\x19V\\xbe\\x83\\xbb\\xe9\\x1e\\x95\\xe9_p\\xfeP\\xf7w}+\\xef\\xf9\\xeb\\xb9\\xf7\\xa5\\xaf/\\xe6_\\xd9\\x81\\xac\\xb2\\xc39\\xb1NM\"\\xb2\\xd6\\xf6\\xf2\\xb7\"\\xab\\x9b\\x16*Mu\\x13!Fi\\x96\\xd5\\xa2\\x94ix\\x8f\\xbb322In\\xb8i\\xff\\x00\\xa8x\"\\x9cH\\xa7O\\xcd=\\xcd\\x19<\\xed\\xa7d\\xfb\\x13\\xda\\xf6\\xd5\\xf3\\r\\xa05OomW\\x85S\\x9b,\\xe3\\x8d>\\xcb\\x12M\\xc9\\xd2[e\\x06\\x97\\r\\xc5\\xa4\\xcd\\xd77L\\xc8\\xd5\\xcbC\"\\xd7\\xd9\\x17=\\x9d\\xed\\xe7h\\xf9fVx\\xd3\\xf5\\x95\\xf2\\xa4X\\xd6\\xcaz\\x15\\xcb8\\xb5\\xd5|:\\xd9m\\xa4\\x8d\\xb6\\xa5\\x14\\xc4#\\xbdmz\\x9e\\x8amhV\\xa82\\xdd-\\xe2\\x12\\xaf\\xf6U\\x9b\\x99I\\xcd_\\xda\\x16k\\xc5\\x0ee\\x14Qi\\x1d:\\xfa\\xa4\\xd6\\x9c_G}\\xc7\\x9by\\xad\\x1ds\\xda%\\xb8J\"V\\xba):\\xeadd\\x92\\xb6\\xe3{<\\xdauUE\\xb4kM\\xab3s1\\xda\\xe5C\\xae\\x92x\\xdbL\\x94W\\x8fM\\xd9N\\xa5.\\x99\\xbc\\xe1i\\xee%!\\x07\\xa9\\xfb!M8\\x91>_\\xef\\xe6h\\xaf\\xec/nyN\\xdar7\\xa3\\x9e>\\xc5\\x05n8\\xc2\\xeb\\xb2s\\x96\\xda\\xd4\\xef\\xc3iV\\x8b\\x8b\\x14\\xc9{\\xbd\\xd3d[\\xe6\\xe2\\x89[\\xc4\\xebdZ\\x1e\\xf1\\x8fV\\xdclfA\\xda\\xd6\\xc5Yr\\x15T\\xea\\x89y\\x03\\xac\\xff\\x00\\xb56\\xff\\x00\\xa5\\xc6\\x92P\\xa4\\xad/4\\xb4:\\x94i\\xb8\\x95\\xa4\\xd2\\xb4,\\x8f\\x7f^FD?]\\x96\\xf6k\\x87\\xb1\\x9c\\x9e\\xb6\\xcf\\x15\\xbbz47+}\\x0f!\\x85)\\x93x\\xee\\xe4\\x91\\x9a\\xd19K\\xdf.\\xeeF\\xfa\\xdc\\xdeV\\x8a%\\xa5{\\xba\\x16\\xeaL\\xae\\x1b@\\xd9\\xa7\\x1dd\\xd8\\x1d\\xbf\\xc2>\\x83\\xc2\\xd6\\xea\\xb5\\xee{\\x8e\\xf3\\xd2\\xb5\\x8c\\xf3\\x1d\\xde\\xbb\\xc5\\xb9\\xfc\\xb6\\xf6\\xf6\\x8a\\xfb\\xdd4\\xe7\\xa9t\\x8ak\\x9a-W\\x14\\xd2\\xecG\\x1f\\xed\\x19\\xb4g\\xf1lo8\\xb4\\x81\\x8cp\\x95\\x86Vx\\xcc\\x88Q\\x1a\\x90SR\\x85X9\\t\\x12R\\xe2\\x9c4\\x16\\x8bJL\\xda\\xddV\\xa4Fd\\xb2\\xd7u5N\\xd2\\x9bL\\xcf6\\xa3\\xb1\\x1d\\xb5\\xc9\\xc7\\xe2c\\xd0\\xb6}G\\xe9\\xb4n\\xaa\\xc0\\x9f]\\x8c\\xf7\\x182D\\x87ZR\\x14M\\xb4\\x84\\xafy)%%F\\xad\\xc334\\x91\\x90\\xd8c\\xf6i\\xf4}\\x90S\\xe0\\xbcG\\xbd\\xf0~L\\x9c\\x8b\\xd3\\xfd\\x07N\\xf3KEO\\xee{\\xbe\\xf3\\x97\\xdfw{\\xfb\\xc7\\xee\\xde\\xdd\\xff\\x00\\x84Ws\\xae\\xc9\\xb7\\xb7\\xf5[A\\xc7\\xb1\\xed\\xa4+\\x1d\\xc3\\xb3G\\xe4N\\x9bN\\xfd#sW\\x1eS\\xfa\\x1b\\xebe\\xe3u\\x06\\x94-E\\xbch2=\\x0c\\xd5\\xba\\xa4\\xeb\\xa8\\xe5U8\\x93M\\xbc\\xbb\\xd9tFg\\xdd\\xa7\\xf28\\x19\\xed\\xf6-\\x88E\\x86\\x84cL\\xc7j[\\xf3\\xf1\\xeb{OL\\x92\\xe3\\t{\\xbaB\\xa0\\xb6\\xa4\\xb0IJ\\xd0F\\xb5\\x9a\\x8c\\xcdG\\xa24-O\\xdd\\x94\\xf6\\x9f\\xcb\\xb1\\n\\x9cB\\xf2n\\x0e\\xf1\\xc2\\xcc\\xea\\xda\\x8dMPl\\xb8\\xd4\\xe8\\xb9\\x02\\xfe\\xf2\\x0c\\xa3Y\\x91\\x13N\\x11\\x99\\x93\\x9b\\xa94\\x93+\\xd4\\x8fR\\x16\\xcb\\xcd\\x82dPs\\xdb\\xbc\\xa7\\x01\\xcf\\xd5\\x86H\\xc8\\x18\\x8e\\xdd\\xccY\\x15\\r\\xd8\\xb2\\xfb\\xac\\xb7\\xdd\\xb7!\\xa2R\\xd3\\xdd;\\xb8D\\x93\\xfb\\xe4\\xabu&i3!\\xfd\\xda/f\\x98\\x9b^\\xbf\\\\\\xcc\\xc6\\xfaE\\xa4(\\x95\\x05\\x02\\x9e+\\x0c\\x14u\\xd6\\xccQ\\x92\\x9c\\xb1\\'\\x12\\xa3%H5!\\xbd\\xc3$\\xa4\\x90I2\"=\\xe31\\xa9\\x8c]m\\xf9\\xf9\\x1eF\\x8d[\\x1aM\\xba1\\xfa\\xe2\\xbfr\\x13\\xd7}\\xc2=5u\\xcd\\xad\\xb8\\xc6\\xf6\\x85\\xbf\\xdd\\xa5jR\\x89:\\xeb\\xa6\\xa6g\\xa0\\x92\\x11\\x18\\x85m\\xb5>/W\\x06\\xf6\\xd9\\xbb\\xebx\\xd1\\xd2\\xd4\\xab6\\xa2\\xfa1JY\\x16\\x86\\xe7u\\xbe\\xbd\\xc3?y\\x91(\\xcb]t\\xd0\\xb9\\x14\\xb8\\xf5G\\x06@\\x00\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00G\\xd6\\xff\\x00I\\xb5\\xdf\\xa1\\xe6~\\xfa(\\x90\\x11\\xf5\\xbf\\xd2mw\\xe8y\\x9f\\xbe\\x8a5\\x1c*\\xf9O\\xd1\\xba8\\xaf\\xc0\\x00>+@\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xcd\\xf1\\xbf\\xe7Y\\x0f\\xe9y?\\xe6BhB\\xe3\\x7f\\xce\\xb2\\x1f\\xd2\\xf2\\x7f\\xcc\\x84l\\xed\\xa7\\xd2Wm\\x02\\x0e\\x18\\xf2,\\xce\\xeak]\\xf3Jn\\xaeJ\\xe2\\xee\\xee\\xad^\\xd4\\x92l\\xdaA\\xe8\\x85rR\\xc8\\xfd\\xc5\\xf1\\x96\\xbfo\\x16b*\\xd7\\xc9\\x8a\\xb8\\xbe(\\xf6\\xc7\\x80\\xe4\\xf7\\xaa\\xa4\\xa6\\xce1\\xbbk\\x94\\xa9HUt\\x1bh\\xefH%\\']\\xe26\\xd2\\xb3V\\xa5\\xa1\\xeb\\xcb\\x96\\x82\\xde?\\xcd\\xec\\x0c\\xa3\\xe6\\x98\\xbe\\xcd\\xf0X\\xf8J\\xa9\\xb2\\t\\xb9\\xc4\\xf9\\xf0s\\xe9\\xa8\\x8e\\xd3F\\x98\\x96\\xaf\\xc8y1\\xddJ\\x8d\\xd5=\\xdd\\xa4\\xdb&\\xd4I\\xd7EhfI\\xd4l}\\xa0v\\xf3\\x98`\\xf9.Wy\\x86\\xe4w7u8\\xac\\x98\\xad\\xd9\\xd4G\\xc7\\xe2*\\xa2)\\x9fu\\xde\\xc7~c\\x8bK\\xeat\\xd2\\xbd\\xef\\xb8k\\xb9\\xbe\\x82R}\\xe6<4\\xe3\\xff\\x00\\x8ej\\xa0\\x9a]|\\xd1.\\xc4]\\xf9\\xdb4\\xde\\xcd\\xac\\'\\xc7r]l\"}\\x99,\\xbf\\x15\\x06\\xe2^C)^\\x8b35\\xa9\\xbd{\\xbd\\xed4IhZB\\xec\\xdfi\\x1bE\\x85\\xea\\x1a\\xfb \\xcb\\xcb$\\x83\\xb4H\\x84\\x89\\xf5GY\\x1e3q]Ur\\xa5\\xb4\\xe3\\x0bBIz\\xea\\xde\\xea\\xc9jROx\\xcd$\\x8eDWk\\x17\\xb4\\xc1gT\\x00\\xe3\\x1d\\x97m\\xbb;\\xce2\\x9d\\x91Y;\\xb4\\x06g\\xab+\\xb4\\x9c\\x9b\\xac\\x1e\\x04\\x18\\xa8]C,\\xb2\\xf9\\xee){\\x86\\xf2R\\xda\\xd2\\xda\\\\7\\x0fU)I\\xdd4\\xeb\\xcfk\\xed1\\x97e\\x98\\xbdf\\x03\\x1b\\x0f\\xb7j\\x92\\xc6\\xf3.\\x83N\\xfc\\xa7\\xe2\\xa2BJ;\\xa8x\\xd7\\xec(\\xb9\\xe9\\xb8\\x95\\x16\\x86G\\xaaH\\xb5\"3\\x161b\\xaaf\\xa8\\x82\\xcd\\x8c\\x074m\\x83#\\xcdqI\\xb4\\xd8\\x869\\xb4\\\\\\x9e\\xe70f\\xb9\\xfb\\x17\\xe3\\xd5\\xe3U\\xb2\\xa4>\\xd1\\xba\\xa2i\\xf9*p\\x9aa\\xa6\\x88\\xc8\\xdb$\\xa3uk\\xdc3#3#\\x11\\xb8\\xde\\xd7s\\xcd\\xaf\\xce\\xd8\\xa4j\\xdc\\x90\\xb0\\xf6\\xf2\\xfcBm\\xbd\\xb2\\xe1W\\xb1!d\\xfbJ\\x88D\\xa6;\\xe4\\xa8\\x91\\xcd\\xc5\\x97\\xb4K-\\xd5\\x19\\x1aM[\\xaaL\\xda\\xc4M\\xacY\\xd3\\x93okk\\'\\xd7\\xc1\\x99a\\x16$\\xdb\\x15\\xad\\xa8Q\\x9f}(rJ\\xd2\\x83Z\\x92\\xdaL\\xf5Y\\x92\\x12\\xa5\\x19\\']\\x08\\x8c\\xfd\\xc4\\x16\\x97u\\xd4\\x85\\x14\\xecg\\xc5\\x80R\\xa4\"$s\\x94\\xf2[\\xef\\x9eY\\xe8\\x86\\x91\\xbce\\xbc\\xb5\\x1f\\xb9%\\xcc\\xfe!\\xc4yn\\xdc/+\\xd3\\xb2L\\x8f!\\x87\\'-\\xc8q\\x8c\\xcb%\\xa2Su\\x117^\\xb5~\\xcf\\xb3\\xbc\\xbdx\\xc5\\x06A\\xf0\\xa5\\xb2M\\xd2Os\\x0eG\\xa3\\xb8m\\x1e\\x8e\\x13r\\r\\xb2e\\xcd\\xd3#\\xd7uf4a\\xe6\\x8ap\\xea\\x8d5K\\xcb;N\\xc3\\xa9M\\x1bD\\xef\\xacm$;\\x9dEn-\\xab\\xce8\\xd6\\xf2I\\x10\\xca&\\xfbZ6D\\x95\\x1a\\x0bx\\xf5%\\x16\\xf1\\x99\\x91\\x11{#\\xc9_\\xd9\\xdb\\x13\\x86\\xe5\\x99H)\\x96\\x91l\\xb1\\x98\\x98\\x9c\\xa8sV\\x854\\xe4(\\xe4\\xe1 \\xf4J\\x08\\xfb\\xc5\\x13\\xaa\\xdeV\\xbar-\\t:\\r<\\x06\\xb2S\\xc9.\\xcc*\\xfb?\\xd3W\\xcc\\xab\\x98\\xfd\\xd5\\xe5\\xa4\\xca\\xfcY\\xfcI\\x12g>\\xd2\\xdcv#\\xabmf\\xe3\\x86M\\x16\\xf3\\xc5\\xdc\\xa0\\x89\\\\\\x8bMuI\\x99\\xea=06\\x13A_U\\xb3\\x1a\\xf6\\xe5\\xd9)\\x9d\\x9e\\x93eT\\xa5:\\xde\\xf3\\xdb\\x91\\x17\\x14\\xbb\\xff\\x00c\\xda\\xfb\\x9a\\xcc\\xfd\\x8d\\xcfkC\\xf7r\\x1a0\\x06Jy\\x17q\\xf6\\xcb\\xf6G\\xb5,;j\\xd0\\xa6TT\\xdec\\xd5\\xafZ\\xad\\xeb\\xd9w\\xb756\\x11\\'C5-KJ\\x14\\xccd\\xcb[\\xaa3I\\xa5N\\xa8\\xb4?\\xbe5|}1\\x9d\\xec\\xea\\xb7hNcK\\xb1~S\\'Ar\\xc5\\xe4_EZS\\xbe\\xfbIZR\\x95\\xef$\\xf5A\\x93\\x8a\\xd4\\x8bC\\xe4\\\\\\xc8Z@f\\x9c8\\xa6-\\xc5n\\xcd\\xb3\\xbd\\x84\\xd5g9\\x8by2or\\x0cv\\xd1U\\xe5U1tSS\\x1c\\xa7\\xc4%\\xa9ie\\xddP\\xa3-\\x14\\xb5\\x99-\\xb3B\\xcb|\\xf4P\\xf3`}\\x9d\\xb1\\xcd\\x9eM\\xc3$V\\xce\\xb6\\x7f\\x84\\xebf\\xd4\\xd77-\\xe6\\xd6\\x9fG\\x92\\xebn\\x1aW\\xa3dj\\xdc\\xee\\x90\\x84\\x1e\\xa5\\xec\\x97\\xb5\\xbc|\\xc6\\xa4\\x03Y)\\xbd\\xec\\x97e\\xb4\\xbd\\x9d1\\xba+\\xea[h\\xf3mW&\\xab!\\xb4\\xc9XK\\x8e\\xb6hT\\x99\\xe9u/!DM\\x91\\x9bi\\'\\x97\\xbaDde\\xa1j\\xa5s\\xd7\\xe1\\x9e\\xcd\\xf8\\xd4h\\xf1\\xe2G\\x9dk\\x1e\\xb6&T\\x8c\\xbe%{o5\\xdcE\\x94FjSM\\x91\\xb7\\xaaXR\\xd4\\xb5\\x9a5\\xd4\\x94\\xb5n\\x9aH\\xf4\\x1a\\xa8\\t\\xb3\\xa7\\x91v[?\\xb3\\xae7c\\xb3\\xfc\\xb7\\x0frm\\xaak2[\\xb7\\xaf\\xa6:\\x97[\\xef\\x9b}\\xd9I\\x92\\xa4\\xb6}\\xde\\x84\\x8d\\xf4\\x11\\x11\\x1aL\\xf7u\\xe6g\\xccb{_\\xd9&\\xd3d\\xed\\x8b \\xc9pJ{\\xe8\\x97r\\x94\\xcf\\xc1\\xb9\\x02\\xee\\xaa]\\xadgF\\x90\\x83\\xefY\\x91\\x19R\\x9bl\\x8c\\x95\\xabM)Dz\\xa9E\\xbak1\\xd7\\xc03V\\x155E\\xb8-\\xdf\\xc6\\xc9D\\x84\\xef\\x99\\x1a\\xf4-\\xe3/v\\xa3\\xfa\\x00; \\x00\\x00\\x00\\x00\\x00#\\xeb\\x7f\\xa4\\xda\\xef\\xd0\\xf3?}\\x14H\\x08\\xfa\\xdf\\xe96\\xbb\\xf4<\\xcf\\xdfE\\x1a\\x8e\\x15|\\xa7\\xe8\\xdd\\x1cW\\xe0\\x00\\x1f\\x15\\xa0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x07\\xe1:|j\\xc8\\x8e\\xcb\\x99!\\xa8\\x91ZN\\xf3\\x8f\\xbe\\xb2B\\x10_\\x8c\\xd4|\\x88\\xbf\\xacP\\x95\\xb6\\xda\\x8bs6\\xf1\\x1a\\xebL\\xe9\\xdfq;F\\xc1\\x1c?\\xeb\\xf4\\xc7T\\x88\\xe6_\\x8c\\x92\\xe2\\x95\\xff\\x00I\\xf2\\xd4?\\xec\\x93\\xa9\\x1e\\x9e\\xe1\\xf8E\\xec\\xbb\\xb3\\x187Pm\\x18\\xc6{\\xb9P,J\\xda\\x12\\n|\\xae\\xe2\\x1c\\xa2Y\\xaf\\xbce\\x9e\\xf7\\xbbkU\\x19\\x99\\xa5\\t$\\xab\\xe3#\\x1e-\\x9dv\\xb4}~E\\xe1\\xcdr\\xcf&\\x9b\\xb3\\x84\\xe5\\xc8\\xda\\x1ec\\x16\\xe1\\xcd\\xa6=\\x8f#\\xb8\\xb8s\\xb8j\\x03\\xb7\\x0b\\x8am\\x13\\'\\xabj\\xddB\\xcc\\xd2\\xa5\\xa5JI\\x92H\\x8c\\x92\\x94\\xa4\\xa56\\x99}\\x93l\\xea\\xabnX\\xb56c\\x91\\x14jW\\xf19\\x95S\\xe6Y9&l#\\x9b8\\x9b\\x90\\x84\\xbe\\xe1\\xa9jA\\x93_x\\xb3Qh\\xb5\\x16\\x86\\x932\\x1d:[\\x15\\xc3\\x0b\\x1fM\\x19S\\x7f\\xe9i\\xba\\xe2\\x12c\\xd2\\x9e\\xe5?\\xd2}\\'\\xbe\\xde\\xdf\\xde\\xfe[\\xda\\xdd\\xd7w\\xe2\\xd3NA\\x90\\xecW\\x0c\\xca\\xe4\\xe4R-i\\xbd)\\xec\\x84\\xa0\\x15\\x9a\\xbd)\\xe4zABt\\xdd\\x8b\\xc9+-\\xdd\\xc5\\x99\\x9f\\xb3\\xa6\\xf7\\xb9[\\xc5\\xc8M\\x8dV\\xd2\\x7f-?\\xe8\\xbc0)\\xdb;\\xb0-\\xb5\\xe78k[C\\xcf\\x1a\\xa3\\x87\\x88\\xc5\\xbd\\x8a\\x8e#\\x90n\\xb35\\xc7d\\xb4n\\x13\\xa6{\\xfb\\xa4L\\xa4\\xfb\\xad\\xee\\xefS=Re\\xa1\\x14\\x03\\x99\\xa6I\\xb6\\xcc\\'f\\xf0k\\'e23\\x870\\x98\\xd7\\xf6o\\xd4d\\xa7C\\x01\\x94\\xba[\\x89\\x90\\xf2\\x90\\xda\\xcd\\xd7T\\xe3n\\x1a[\\xdd4hJ\\xde\\xd0\\xb4\\x1df\\xbc\\x06\\x85\\xcc\\xae\\xc7$T\\x1dn\\xac+\\x9b\\xa9\\x93+\\xbes\\xee\\x91P\\xb7\\x16\\x86\\xf7w\\xb7KE:\\xe1\\xef\\x11\\x12\\xbd\\xaf\\x7f\"\\xd2\\x9f7\\xb3\\x1e\\xcd\\'\\xc2\\xc7\\xe2=\\x8d\\x11\\xc7\\xa2\\xafML$\"t\\x94\\x7f\\xb1\\xa4\\xf5(\\xee\\x9a\\\\#}\\xady\\xee;\\xbe\\\\\\xcf\\x973\\x16p\\xaa\\xf8\\x17a\\x18\\xbeK\\x93\\xed\\xa6_f\\xc6\\xac\\xf2\\xcb\\xba\\xa6r\\x0cJ\\xceu\\xe2i\\'.\\x12\\xac\\x1ci\\x10\\xc9&\\xa56e\\xba\\xad\\xe5\\x1a\\xb7\\x93\\xa2\\x8byD\\x93I(\\xc7\\xa7h\\xd9\\x8eI\\xb2\\xab|\\xefe02\\x0bg\\xed\\xf3\\x0f\\x83\\x8f\\n\\x9f:s\\xb2%G)JL9\\x84\\x97\\x96\\xa3Yz9\\xa1R\\x0b\\x9f.\\xf3^^\\xf1\\xd0\\xf8\\xd6\\xc50\\xcc=\\xfcm\\xeazb\\x84\\xbcq\\x89qj\\x88\\xa5<\\xa4\\xc5jJ\\xd2\\xb7\\xd0\\x94\\xa9fF\\x93R\\x13\\xa1\\x19\\x1e\\xe1$\\x89;\\xa5\\xc8W\\xed6Ok\\x97m\\xfa\\x8f6\\xbf:\\x82\\xa4\\xc4\\xa3\\xc9o\\x1eb*\\\\\\\\\\xc7\\x1e\\x94\\xd3H}\\xd9\\nV\\x89I$\\x90\\xb4\\xa1\\x08#\\xd7{x\\xd4G\\xec\\x898uDy\\xe9\\xf4\\xb4\\xfb\\x97\\x84\\xee\\xd4\\xb1\\x9c\\x9a~\\xc7\\xaf(\\xb0\\x8b\\x95\\xd6e\\n\\xad8\\xb5\\xb6s\\x1e5-.\\x92H\\x89Jp\\xc8\\xcfyDF[\\xfc\\xcc\\x8d[\\xdc\\xcc\\x878\\xc2\\xce\\xe4U\\xd5bT0\\xec\\xf3jl\\x8e\\x06\\xd2*+\\xf2\\x1a\\xac\\x9a\\xe9sd0\\x87\\xd8R\\x89\\x94\\xc8J\\x8c\\x9e\\x8c\\xe9\\x11,\\xb53#3=H\\xb9$\\xba\\xcf+\\xc5j\\xf3|v}\\x15\\xdcB\\x9dU9\\xb3jDsZ\\x91\\xbe\\x9f~\\x9b\\xc92Q{\\x8b\\x99\\x19\\x18\\xa4\\xc3\\xec\\xdd\\xb3\\x888}\\x962\\xde6\\x95\\xd5X\\xc9nl\\xb3~d\\x87d\\xba\\xfb{\\xbd\\xdb\\xa7%n\\x1b\\xc4\\xb4n\\xa7uD\\xbdS\\xa7-\\x07J\\xe8\\xaa\\xa9\\xbd)\\x12\\xc1\\xb6\\xfb\\x9dd\\xd5y\\x06\\xdd\\xd8\\xaa\\xc9\\xedj\\xce\\xb6>\\x19\\xe8\\'\\x12Z\\x8b\\xd0V\\xfd\\x8a\\xd0\\xf2\\x9aI\\x99\\xa5&\\xb4\\xe8J-4Y\\x11\\x12\\x88\\xcb\\x90\\x90\\x9d\\xb3\\xbb\\x02\\xdb^s\\x86\\xb5\\xb4<\\xf1\\xaa8x\\x8c[\\xd8\\xa8\\xe29\\x06\\xeb3\\\\vKF\\xe1:g\\xbf\\xbaD\\xcaO\\xba\\xde\\xee\\xf53\\xd5&Z\\x11lp\\xfb1\\xec\\xd6\\x0c\\x0b\\xf8\\x8d\\xe3\\xabSw\\xc7\\tV\\x8bz\\xc6S\\x8e\\xccTGM\\xd8\\xcaq\\xc5:k5%g\\xae\\xf6\\xba\\xab\\x91(\\xd4DD.+\\xc0h\\\\\\xca\\xecrEA\\xd6\\xea\\xc2\\xb9\\xba\\x992\\xbb\\xe7>\\xe9\\x15\\x0bqhow{t\\xb4S\\xae\\x1e\\xf1\\x11+\\xda\\xf7\\xf2-1\\xb2\\xaaf\\xf5~q\\xf7\\x85\\xbc9of\\xd6\\xb9\\x06\\xde2\\xcd\\x9c\\xc3\\xbe\\xcb\\xf2:\\xe8\\x96{/\\x8du5\\xaa\\x1b7 w\\xf3}!-\\xf7\\xe6\\xa6\\x8c\\x8d\\'\\xed\\xa8\\xf4I\\x91\\x1f\"=RZ\\x08\\xdd\\x93\\xdde\\x15\\x98_g\\xec\\xeaVm\\x92\\\\\\xdbe9\\x07\\xc0W\\x11\\xec\\xacV\\xec91\\xd4\\xcc\\xcd\\xd3\\xee>\\xf1\\x0bA\\xc6l\\xc9i\"Q\\x9e\\xf1\\xa8\\xd5\\xa9\\x8e\\xa4\\xc4\\xb6=\\x88`\\xb3\\xaafQ\\xd4z\\x0c\\x9a\\xaad\\xe3\\xf0\\xd7\\xe9/9\\xddAJ\\xc9igE\\xac\\xc9^\\xd2H\\xf7\\x8fU~]\\x07\\xe5_\\xb1\\\\2\\xaf\\x1e\\xc5(\\xe2\\xd3wUx\\xb4\\xe2\\xb2\\xa7c\\xd2\\x9e?E\\x90Iu$\\xbd\\xe3^\\xf2\\xf9>\\xef%\\x9a\\x8b\\xda\\xf7r-$aU\\xa4\\xcc\\xeb\\xff\\x00\\x9f\\xec\\xbb\\x97*\\xf3\\xbc\\x98\\xf6\\x8d\\x81g\\xb8\\xe4\\xdc\\xa4\\xb0|\\xa70U19\\x90\\xe4\\xa7%\\xa9\\xec9\\xe9\\x05\\xf7*\\xde\\xefr;iSZ\\xb6\\xb2Y/u\\x05\\xbc\\x93\\xde\\xd4Z6s\\xb3\\xdc\\x93j\\xf8>\\xd4\\xec\\x8fh\\x19k9\\x12rL\\x8a\\xb6\\x89L\\xdeHf=y7%\\xd40\\x9e\\xed\\x0b\"Y%D_\\x7f\\xae\\x89\\xd1)\\xd0\\x8bA\\xae\\xb3\\xd9ke\\xf1\\xed\\xdb\\xb3k\\x17Kr\\xd9\\x9cVq\\x8d3\\xa4\\x92!\\xc9\\'I\\xde\\xf2:;\\xcd\\xd6\\x0c\\xd6Z\\xa8\\x9a$\\x92\\xb9\\x91\\x91\\x91\\x99\\x0bev\\x19\\x1f\\x03\\xc6oc\\xe1u\\xf1cO\\x98\\xfc\\xcbF\\xd9\\x9d!\\xd3a\\xd9\\xef\\xa9N)N+\\xdbRP\\xa7\\x15\\xa9\\xee\\x91\\xe8F{\\xa9\\xf7\\x10S\\x85U\\xff\\x00\\xcb\\x81v-\\xb0\\xcd\\xac\\xd9\\xf6\\x83\\xdaE=\\xd3\\x12\\xe5\\xc0\\xa7\\xc6\\xb1\\x86\\xd3qZ\\xcb\\xcam\\xa5^J^\\xeb\\xac\\xba\\x82=\\x17\\xe8\\xe9\\x8c\\xbd\\tE\\xec\\x9b\\xe4e\\xa0\\xe9!\\x9bl\\x1fd\\xcb\\xd9>3n\\x89\\xce\\xc2\\x91\\x90\\xe4\\x17\\x12\\xef\\xee\\x1f\\xaeeMGT\\xa9\\x0b\\xdeRZJ\\x8c\\xd5\\xb8\\x94\\x92R[\\xc7\\xa9\\xee\\xeazk\\xa0\\xd2Gl8\\x98\\xa7\\xfc\\xb8\\xa4\\x80\\x00:\\xa0#\\xeb\\x7f\\xa4\\xda\\xef\\xd0\\xf3?}\\x14H\\n\\xad\\xcc\\\\\\x82V{S\\xc3v0+\\xa7\\xb7Y-j;8k\\x92\\xcb\\xc8\\xefc\\x11\\xb6d\\x87\\x1bRL\\xcc\\xc8\\xc9dg\\xa6\\x9fz\\xafp\\xd4p\\xab\\xe5?F\\xe8\\xe2\\xd6\\x00g^\\xb0\\xf2\\x9cs\\xd9\\xcapija?}e\\x8b=\\xf0\\xa3\\x04_\\x8c\\xd9\\xddnA\\x19\\xff\\x00\\xca\\x86\\x9c\\xd3\\xdd\\xbc|\\x8c\\xe7\\xf1]\\xa5\\xe2\\xd9\\xb3\\xceG\\xa5\\xbc\\x892kE\\xab\\xd0w\\xfb\\xb9L\\xff\\x00x\\xc2\\xf4q\\x1f\\xd4\\xa4\\x90\\xf8\\xad,\\xc0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x03\\xf3}\\xf6\\xa2\\xb0\\xe3\\xcf8\\x86Ym&\\xa5\\xb8\\xe2\\x89)I\\x17\\xbc\\xcc\\xcf\\xdcB\\x84\\xfe\\xdbh\\xa6<\\xb8\\xf8\\xc4k\\x1c\\xe2ZOM\\xdcv9=\\x1c\\x8f\\xf1*Z\\xcd1\\xd2\\x7f\\x91N\\x91\\xfeN@4\\x11\\xf8\\xcb\\x98\\xc5|gd\\xca}\\xb8\\xd1\\xdaI\\xa9\\xc7\\x9eY!\\x08\"\\xf7\\x99\\x99\\xf2\"\\x14\\x13ciYI\\xfbr*0H*\\xff\\x00\\x86:N\\xcey\\x97\\xf6\\xd4He\\xa5\\x7f\\xfb\\x1e/\\xca?h\\xbb\\x12\\xc6\\x9d\\x94\\xd4\\xdb\\xe4K\\xcc\\xac[2Rd\\xe4\\x8f\\x9c\\xb4\\xa1E\\xff\\x00\\x13l\\x19\\x13\\r+\\xf2\\xb6\\xda@~K\\xdbu-\\x9a\\xd4\\xce\\'\\n\\xcb:|\\x8ft\\x95@\\xc1..\\xbf\\x96[\\x86\\x88\\xfc\\xbe2\\'\\r_\\x90\\xc7\\xcf\\xa2m/*\\xfeq:\\xa7\\x04\\x84\\xaf\\xfd\\xba\\xf4|\\'?O\\xc7\\xde\\xb8\\x942\\xda\\xbf\\'t\\xe9~Q\\xa1\\xa1\\tm\\tB\\x12IJKBI\\x16\\x84D>\\x80P`\\xecK\\x17L\\xb6\\xa7\\\\\\xb3\\'/\\xb3i[\\xe8\\x99\\x92HT\\xd3m_\\xf36\\xd2\\xbe\\xe4\\xc9\\xff\\x00t\\x84\\x0b\\xe9$\\x92DDDD\\\\\\x88\\x8b\\xe2\\x1f\\xd0\\x01\\x8f\\xd7d\\x7f\\x06\\xdadL|\\x17e+Ki\\x07\\xdeF\\x8f\\xbe\\x83\\xe6_\\x1e\\xa2C\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\xf4c\\x7f\\xce\\xb2\\x1f\\xd2\\xf2\\x7f\\xcc\\x84\\xd0\\xfd\\x15uQ\\x13\\xad<\\xbe>IT\\xc5\\xf8+\\xbcg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\xb1\\x00\\xc6|>\\x9e\\xec^9+\\xbcg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\xb1\\x00g\\xc3\\xe9\\xee^9+\\xbcg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\xb1\\x00g\\xc3\\xe9\\xee^9+\\xbcg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\xb1\\x00g\\xc3\\xe9\\xee^9+\\xbcg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\xb1\\x00g\\xc3\\xe9\\xee^9+\\xbcg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\xb1\\x00g\\xc3\\xe9\\xee^9+\\xbcg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\xb1\\x00g\\xc3\\xe9\\xee^9+\\xbcg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\xb1\\x00g\\xc3\\xe9\\xee^9+\\xbcg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\xb1\\x00g\\xc3\\xe9\\xee^9+\\xbcg\\xf4\\x1d\\xd7G\\xfcC\\x8c\\xfe\\x83\\xba\\xe8\\xff\\x00\\x88\\xb1\\x00g\\xc3\\xe9\\xee^9+\\xbcg\\xf4\\x1d\\xd7G\\xfcG\\xf7\\x16\\xb9\\xf8_i\\xb0\\xff\\x00\\xd8f\\xc2\\xee\\xea%\\xff\\x00z\\']5\\x90\\xa4\\xdb\\x16%wh\\xddQ\\xda|\\x15v\\xb3\\xd157L\\xae\\x04\\xc5\\x9f\\xc7\\xb8\\xd3\\xc9J\\x9c/\\xfa\\x90JI\\xeaZ\\x19\\x91\\x90\\x0b\\xa0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\nM\\xce\\xd8\\xf1z\\xab\\x07\\xab\"\\xccw \\xbah\\xf7WUB\\xc2\\xe7Hm_\\x118M\\x11\\x93?\\xd6\\xe9\\xa0\\xbf(\\x0b\\xb0\\xf9Z\\xd2\\xda\\x14\\xa5(\\x92\\x94\\x96\\xa6\\xa3=\\x08\\x88g\\xa5g\\xb4|\\xac\\x8f\\xd0\\xaakphJ/fE\\xcb\\x85a;\\xf2\\xeb\\x1d\\x85\\xa5\\xa4\\x1e\\x9e\\xe5w\\xees>i=4?\\xeb{\\x14\\xa8\\xb3q/e\\xb6\\x16y\\xcc\\x822V\\xe5\\xe3\\xe4q\\x08\\xff\\x00$6\\x92\\x88\\xfc\\xbe#SjW\\xfdG\\xcc\\xcc?I{m\\xc6\\xdc\\x94\\xec:\\x03\\x99\\x9aX\\xb6\\xa3B\\xa2\\xe3LzZP\\xa2\\xf7\\xa5\\xc7\\xf5&\\x1a?\\xc8\\xe3\\x89\\x1f\\x9a\\xcfiYJ\\x8c\\x90T\\xf8,\\x03=\\tJ\\xd6\\xd2z\\x93\\xfdE\\xb8\\xcb*\\xff\\x00\\xbb\\xe5\\xf99\\xf2\\xbe\\xc4\\x86\\xc5|V\\xa3Ea\\xb8\\xd1\\xdaI!\\xb6YA!\\x08\"\\xf7\\x11\\x11r\"\\x1f\\xb0\\x0c\\xfd\\xad\\x88\\xe3\\x93\\x1fnVJ\\xa9\\xd9\\xc4\\xd4\\x19(\\x9d\\xc9_\\xf4\\x96\\x92\\xa2\\xf7-\\x11H\\x93\\x19\\xb5k\\xcfV\\xdaI\\xfb\\xbf\\x11i}i\\xa40\\xd2\\x1bm\\tm\\xb4\\x11%(AhI\"\\xf7\\x11\\x17\\xc4C\\xec\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x06o\\x8d\\xff\\x00:\\xc8\\x7fK\\xc9\\xff\\x002\\x14\\x8d\\xa8\\xf6\\x8c\\xc2\\xb6S\\x91c\\xf4\\xb6\\xf9\\x15\\x0ck\\x0b\\x1b\\x04E\\x97\\x1em\\xc3\\x11\\x9d\\x80\\xc2\\x99u\\xc2\\x92\\xe2\\x14z\\xeej\\xdaQ\\xcft\\x8c\\xdc/k\\xdcGw\\xc6\\xff\\x00\\x9dd?\\xa5\\xe4\\xff\\x00\\x99\\x0f.W\\xb3\\xea|\\xca}\\x0c\\xcb\\x16L\\xdf\\xa6\\xb0M\\x94sA\\'\\xdbp\\x9au\\xa2J\\xf5#\\xd5;\\xaf+\\x97.d\\\\\\xf9\\x0f\\xb5\\x8d\\x9a\\xff\\x00\\xe3\\xe4\\xc5_\\xb9\\x9d/\\xb4<\\xab\\xbd\\xaf\\'\\x0b\\xc41\\xc8y,F\\xa0\\xc0\\xb3\\x93n\\xab\\xc6c\\x93\\x91%\\x19\\xee\\xbf\\x11\\xbd\\xd5zSiAo)IRK\\x99\\x11jfZ\\xd1\\xf3\\x9e\\xdd\\xb8\\xfe%\\x90d\\x8c\\xc6\\x85M>\\x97\\x1c\\x96\\xe4+\\x07de0\\xe2Y\\xba\\xe3G\\xa3\\xfe\\x8b\\x01~\\xdb\\xc4\\x83\\xde\"\\xd5H5\\x9aL\\x90J\\xe4gc\\xdb\\xbe\\xc5\\xb3-\\xab\\xe4\\xb5\\x0c\\xd73\\x89VTWJ\\x87*\\x0eH\\xa2\\x90\\x8b\\xca\\xa3m\\xd4\\xad\\xe2\\x8f\\xba\\x9d\\xc5\\x12\\xd2\\x9d\\xcd\\rh-\\x14z\\x92\\xb9i\\xf9\\xe3{\\x1e\\xda.\\xcc\\xb2l\\x82\\x1e(\\xe6\\x1dg\\x86\\xdd^=t\\x97/\\xd1 \\xa7W\\x1c\\x87\\tr\\x19B[I\\xa1\\xe4\\xefo\\x9a\\rKA\\x96\\xf7=\\xe2!\\xe2\\x99\\xc5\\xbd\\xa0\\xd0\\xdb?j\\xf3\\xd8\\xcc\\xd6f\\xce\\xc7\\xabd\\xe2Ka\\x99Eb\\xe6K\\x1a<\\xf9\\x0c\\xac\\x88\\xd4\\xb8\\xd0\\x16\\x9d\\xf7\\xb7\\x08\\xcfR\\xdeI\\x9e\\xe9\\xe8F,v{m\\xc8_\\xdb5\\xa6\\xcf\\xf1\\xac)\\xab\\xa7+\\xab\\xe1Y\\xbfm*\\xdc\\xa2\\xc7C/\\xad\\xc4\\x99\\x19\\x13+V\\xf9wz\\xa5%\\xa9+\\xda\\xd4\\xd1\\xba[\\xd9\\xc6\\xd5{0\\xe6\\x99u\\x86\\xd6b\\xd3?\\x89\\xaa\\xbf=i\\xbdn\\xae\\x10\\xfa\\xeck\\xc9\\x11\\xd0\\xd1EBR\\x9d\\xd3ky\\xbd\\xe4\\xab|\\xb7;\\xc5\\x1e\\xe2\\xcc\\x8b]W\\x01\\xd9\\xb5\\xed\\x16\\xd6\\xf2L\\xc2\\xd5\\xca\\xe2f\\xde\\x82\\xa2\\xbb\\xd1\\xe1\\xbc\\xe3\\x8anDc\\x90o}\\xf2\\x13\\xaa\\x0c\\xdeN\\xe9\\xfb\\xcfC\\xd5)\\x16\\'\\x12j\\xb4\\xf0\\xff\\x00\\xdf\\xf4h\\x8c\\xc36\\xe7\\x92m&\\xec\\xe4\\xe2\\xd8\\x01\\xd8`I\\xb3r\\xb7\\x89d\\xdc7\\x1d\\xc7\\xbb\\xa7M\\xa7\\x9ff1\\xa0\\xcdm%iYjkJ\\x95\\xbaz$e\\x9b\\x0f\\xed\\x07\\x96cx>>\\xf6Q\\x8d\\xce\\xb6\\xc6,2\\xb9\\xd4J\\xcbe\\\\\\xa5\\xe9\\x08u\\xdbI\\x0c\\xc7\\xfb\\x82\\x88\\xd4l\\xa5F\\xdb;\\xc6\\xb24\\xee\\xf2F\\xe9\\x11\\x9e\\x81\\xb3\\r\\x99m?c\\xae\\xb5\\x89\\xd1\\xcb\\xc5,vx\\xcd\\xa3\\xd2\\xa3I\\xb09(\\xb3\\x8d\\x11\\xe9\\ny\\xd8\\xfd\\xda\\x13\\xdd\\xadDn-)p\\xd6_\\x16\\xa9=4\\x11\\xb1{:\\xe4\\x8cl\\x1a\\x8f\\tT\\xda\\xa3\\xb5\\x83\\x97\\xa6\\xfd\\xc7\\x89\\xd7;\\x83\\x8eW*\\x9d\\xba\\x93\\xee\\xf7\\xb7\\xfb\\xa5\\x12t4\\x91or\\xd7OhO\\xd4\\x9bN\\xb7\\xb4\\xfd\\x8d\\x13\\xf1\\xb6\\xb2\\x83\\xda\\xe6\\xd5fY\\xd8;\\x0f\\x13\\xd9\\xd5\\xe7\\x96\\xa5\\xa0\\xdd\\xeb\\x89\\x88\\xbf\\x1f\\xcf\\xa2h\\xdevu\\x94^e\\xb4rfd\\x18\\xab\\xf8\\x84\\xd6\\xe6\\xc8\\x8c\\xdc\\x19\\x12\\x91!N2\\x85\\x9aP\\xf1)\\x1c\\x88\\x96E\\xae\\x9f\\x17\\xe32\\xd0\\xce\\x90\\xf6\\xdd-\\xa7m\\xca\\xcfg\\x94X\\x8bVH\\xa6(NZXI\\xb8n+\\xcd\\xb5 \\xb7\\xbb\\xd6#\\xa9\\x06o\\xb6\\xda~\\xf9[\\xc9\\xe6F\\x92#1w\\xd9\\xccl\\xca-\\x1c\\x94g\\x12\\xea&\\xdb\\x1c\\xe9\\ne\\xcaf\\xdcC%\\x14\\xd6f\\xcaTK\\xe7\\xbeI\\xd3_\\x8b\\xf2\\x9f\\xbc\\xf2\\xcd\\xb3\\xecg4\\xda\\x96\\xd0\\xa8\\xa5Fo\\x13\\xad\\xa9\\xa8\\xb2\\x87:\\x16H\\x9fHM\\xf4$6\\xb4\\xad\\xf6Q\\xa2w\\x14\\x97tRy\\xac\\x93\\xba\\xb3\\xd5*2#\\x1b\\xaaj\\xcb\\x13\\x06\\x88\\xcd\\x97\\xed\\x8fhro6\\xd3\\'!\\xa3\\xaf\\x97\\x8f\\xe3\\x16\\xf3Q\\x1d\\xd6\\xed\\xbe\\xea\\xc13\\t\\x87[\\x8c\\x96\\xca1\\x12\\x92\\xa2Q\\xac\\xddR\\xb5%8e\\xba\\xa2I\\x19\\xcaE\\xdafm\\xb5}\\x80\\\\\\xe5\\xd1\\xf1\\xd2\\xc1\\xbb\\xfc}\\xbb\\xda\\x19\\x05k\\xe92\\x1cy-\\x9b\\xe8K\\xad\\xa5\\xa4\\x92Z5!\\x05\\xcdg\\xde6\\xe1\\x92\\x92\\x8dM%\\xf6\\xde\\xc83\\x9a\\\\\\x9fj1*\\xe4c\\xf2p\\xec\\xe1r\\'\\xa9\\xc9o>\\xdc\\xf8r\\x9c\\x82\\x98\\xfb\\x84\\x94\\xb6\\xa4-\\xb3[M\\xab{x\\x8c\\x88\\xd5\\xec\\x99\\xe8&m\\xabe\\xec\\xa3\\xb2yR\\xcc\\xee\\xe6\\xdb\\xd3\\xe2MS\\xa5\\xb8F\\xa5\\xa6T\\xc2\\x8a\\x98\\xed\\xb6\\xd6\\xa4F\\xae\\xf1\\xd3JS\\xa9\\x11\\x9e\\xf1r!\\xce3Zo3mWE\\xeff\\xb9\\xac}\\xa4l\\xf3\\x19\\xca\\xe2\\xa3\\xba\\x8fu[\\x1e\\xc1\\r\\x19\\xeam\\xf7\\xad\\xa5{\\xa7\\xf9K]?\\xec?,\\x0f(\\xbc\\xc9\\x8f \\xf8o\\x15\\x91\\x8b\\xfc\\x1fl\\xfc\\x18]\\xfc\\xa4?\\xf0\\x84dn\\xeeKN\\xef\\xde%z\\x9f\\xb0|\\xcbOx\\xf2\\xec[\\x07wf\\x9b\"\\xc31G\\xd4\\x95\\xc9\\xa7\\xa8\\x8b\\t\\xf5\\xa4\\xf5%:\\x86\\x92\\x95\\x99~CQ\\x18\\xf5`q\\xb3(\\xe7\\x90q\\x84\\xca\\x89d\\xbbg\\xd7Q\\xf0Kn#\\xbb\\xaf=\\xde\\xe5\\x0fo\\xfb\\xdd/kx\\xd3\\xcb\\xdd\\xa0\\xed\\x17\\xb4]\\x95\"\\xdbn\\x96\\xde\\xbc$l\\xea\\x83\\x11j\\xd9\\xca\\xf8\\xf0\\xe5\\xd8\\xcc\\x97p\\xdc\\'\\x12\\xcc\\x85\\xa9=\\xe4v\\x14\\x85\\x1c\\x84\\xb6I3Y\\x92\\x93\\xa1\\xfb%\\xa9\\x99\\x11\\xc1\\xec\\x8bi[C\\xc8\\xb6\\xd1\\xb5z\\x8b\\x9a\\x9a\\xf71\\x8aKt\\xc6\\x8f!6\\x9a\\xbb\\r\\xbfCi\\xc6\\xd0\\x86\\x8a:{\\xce\\xf3\\x7f\\xbcR\\x94\\xb24\\x1b\\x86\\x92\\xde$\\x91\\x9bo[\\x19\\xcd6\\xb9\\x94U\"\\x0bx\\x9du\\\\\\tp\\xe5\\xc2\\xc9\\x97\\xe9\\t\\xbd\\xabSn\\xa5o\\x13\\x1b\\xa9\\xdcQ,\\x92i\\xd0\\xd6\\x92\\xd1G\\xa9+\\x96\\x93x\\xfe\\xcd\\xf3C@\\x00\\x00\\x00\\x00\\x00\\x8f\\xbd\\xc7\\xaa\\xb2\\x8a\\xd7k\\xaek!\\xdb\\xd7\\xbb\\xfc\\xa4I\\xec!\\xf6\\x97\\xfdhQ\\x19\\x1f\\xff\\x00\\x02@\\x00g~\\xa7\\x1b\\xa3\\xf6\\xf0\\xfc\\x92\\xe7\\x122\\xfb\\xd8m?\\xe9\\x90\\x0f\\xfe\\x9fG\\x91\\xbe\\x96\\xd3\\xf9\\x196\\xbf\\xaf\\xde?\\xa7}\\xb4,Y\\xb2\\xf8W\\x1e\\x83\\x98GI\\x9e\\xf4\\xbcm\\xd2\\x89#N\\\\\\xfd\\x16J\\xf7\\x7f\\x1e\\xbb\\xaf\\x99\\xf2\\xe4\\x93\\xf7\\r\\x0c\\x00R\\xe8v\\xc1\\x8a\\xdfY\"\\xac\\xec\\x15St\\xbf\\xbd\\xa9\\xbaa\\xc8\\x12\\xd5\\xcc\\x8b\\xd8i\\xe2I\\xb8Z\\x99\\x16\\xf27\\x93\\xcc\\xb43\\xd4\\x85\\xd0G\\xde\\xe3\\xd5yEc\\xb5\\xd75\\xb0\\xed\\xeb\\xdd\\xe4\\xe4I\\xcc!\\xe6\\x97\\xfdhQ\\x19\\x1f\\xfd\\xc8S\\x11\\xb1\\xf4\\xd0(\\x97\\x87\\xe4\\xb78\\xaaRz\\xfa\\x02_\\xf4\\xda\\xf5\\x17\\xfc\\xbe\\x8f#\\x7f\\xbaO\\xe4aM\\x7f_3\\xd440\\x19\\xd7\\x14g\\xd8\\xaf+\\xdc^6Q\\x11>\\xfb\\x1cU\\xee\\xed\\xed>5*\\x1c\\x85\\x11\\xa4\\xbf\"\\x1euG\\xf1\\x17\\xe3\\x99\\xc6v\\xa9\\x8b\\xe5\\x96\\x07[\\n\\xd5\\x0c\\xdc%;\\xeb\\xa8\\x9e\\xda\\xe2NA~3\\x8e\\xe9%\\xcd?.\\xee\\x9f\\x94\\x05\\xb0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x04u\\xf6EU\\x8bV;cse\\x12\\xa6\\x03_\\x7f*k\\xe9e\\xb4\\xf2\\xd7\\x9a\\x94d_\\x11\\x80\\x91\\x01\\x9d\\x96\\xd4\\xecrS\\xdc\\xc2\\xf1;\\x0b\\xa6\\xd5\\xee\\xb5\\xb6\\xd6\\xae\\xbc\\xbf))\\xc4\\x9b\\xce\\x17\\xc6Ji\\x95\\xa4\\xf4\\xfb\\xe2\\xe5\\xab\\xd5\\xeeM\\x94{Ync)1\\x95\\xef\\xa8\\xc5\\xd2\\xaa\\xd8\\xfa~%\\xbeJT\\x95\\x19{\\xb5C\\x8d\\x91\\xf3\\xd5\\x1e\\xed\\x02s*\\xdav1\\x85\\xcan\\x1d\\xad\\xbbH\\xb2u;\\xcc\\xd5\\xc6B\\xe4\\xcex\\xbf\\x1bq\\x9a%:\\xbf\\xff\\x00jO\\xde \\xd5\\x96g9I\\xee\\xe3\\xd8\\xb38\\xf45\\x17+<\\xa9\\xcd\\x17\\xa7\\xfc\\xc8\\x86\\xca\\x8dj\\xfe\\xa7\\x1cd\\xff\\x00 \\xb3\\xe2\\xb86?\\x83\\xc5q\\x8a\\nhu(u[\\xcf*3$\\x95\\xbc\\xaf\\xf9\\x9c_\\xdf-_\\xf5(\\xcc\\xff\\x00(\\x9d\\x01\\x9e+d)\\xc8R\\x85fy\\r\\xa6Tz{P{\\xd3\\x85\\\\|\\xcf\\x91\\xc6d\\xd3\\xde\\'\\x9f\\xde\\xbc\\xa7E\\xd6\\x96\\x8a\\xb7\\x1b\\xaef\\xbe\\xa6\\xbe-\\\\\\x06KF\\xe2\\xc2e,\\xb4\\x82\\xfcIJH\\x88\\xbf\\xecC\\xdc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x002\\xba[\\x98\\x10\\xa7\\xe4-H\\x9d\\x19\\x87\\n\\xdaA\\x9a\\x1dy)?y|FbW\\x88\\xea|R\\x17P\\x8f1n\\x7f\\x1d\\xaa\\x94\\xf2\\xddz\\xb2\\x1b\\xce\\xac\\xf5R\\xdc\\x8e\\x85)G\\xf9L\\xc8|p\\xad/\\x83\\xc0\\xe9\\x91\\xe4>\\xa4\\xf8\\x9c*\\xb5\\x98\\x9e\\xc4\\xc4L\\xddT\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x16\\xbe\\x15\\xa5\\xf0x\\x1d2<\\x83\\x85i|\\x1e\\x07L\\x8f!7\\x8c\\x1eS\\xd92\\xc2\\xa9\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6-|+K\\xe0\\xf0:dy\\x07\\n\\xd2\\xf8<\\x0e\\x99\\x1eA\\xbc`\\xf2\\x9e\\xc6XU8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0e#\\xa9\\xf1H]B<\\xc5\\xaf\\x85i|\\x1e\\x07L\\x8f \\xe1Z_\\x07\\x81\\xd3#\\xc87\\x8c\\x1eS\\xd8\\xcb\\n\\xa7\\x11T\\x19\\x91\\xfc\\'\\x0bR\\xf7\\x1f\\xa4#\\xcc8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x16\\xbe\\x15\\xa5\\xf0x\\x1d2<\\x83\\x85i|\\x1e\\x07L\\x8f \\xde0yOc,*\\x9cGS\\xe2\\x90\\xba\\x84y\\x87\\x11\\xd4\\xf8\\xa4.\\xa1\\x1eb\\xd7\\xc2\\xb4\\xbe\\x0f\\x03\\xa6G\\x90\\xa0m\\xf3\\x1e\\xa9\\x89\\xb1\\xcc\\xad\\xe6ka\\xc7u\\x10\\xcc\\xd2\\xebQ\\xd0JO\\xb4\\\\\\xc8\\xc8\\x83x\\xc1\\xe5=\\x8c\\xb0\\x94\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x16\\xbe\\x15\\xa5\\xf0x\\x1d2<\\x83\\x85i|\\x1e\\x07L\\x8f \\xde0yOc,*\\x9cGS\\xe2\\x90\\xba\\x84y\\x87\\x11\\xd4\\xf8\\xa4.\\xa1\\x1eb\\xd7\\xc2\\xb4\\xbe\\x0f\\x03\\xa6G\\x90p\\xad/\\x83\\xc0\\xe9\\x91\\xe4\\x1b\\xc6\\x0f)\\xece\\x85S\\x88\\xea|R\\x17P\\x8f0<\\x8a\\xa1E\\xa1\\xd9\\xc2?\\x8f\\x9c\\x84y\\x8b_\\n\\xd2\\xf8<\\x0e\\x99\\x1eA\\xc2\\xb4\\xbe\\x0f\\x03\\xa6G\\x90o\\x18<\\xa7\\xb1\\x96\\x15N#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f1k\\xe1Z_\\x07\\x81\\xd3#\\xc88V\\x97\\xc1\\xe0t\\xc8\\xf2\\r\\xe3\\x07\\x94\\xf62\\xc2\\xa9\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6-|+K\\xe0\\xf0:dy\\x07\\n\\xd2\\xf8<\\x0e\\x99\\x1eA\\xbc`\\xf2\\x9e\\xc6XU8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x1ez+\\x18\\x96\\x1bM\\x81\\xe8\\xb2\\x99\\x93\\xb9Q/{\\xb9p\\x97\\xbb\\xab\\xd1t\\xd7As\\xe1Z_\\x07\\x81\\xd3#\\xc8~\\xf0\\xa9\\xab\\xeb\\\\7\"A\\x8d\\x15\\xc5\\x16\\xe9\\xad\\x96R\\x832\\xfcZ\\x91\\x04\\xf8\\x9c<\\xb3\\x14\\xc4\\xdebc\\xd5b\"\\x1e\\xd0\\x00\\x1f0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00C\\xe4\\xf8u\\x16k\\x00\\xa1_\\xd3\\xc1\\xb9\\x88J\\xdfKS\\xa3\\xa5\\xd2B\\xbe%\\'x\\x8fuE\\xf1\\x19he\\xa7!0\\x003\\xbfV7\\x18\\xd1\\x1a\\xb0\\xdc\\xc2\\xc2\\xb9\\xb2\\xe6\\x9a\\xcb\\xd3U\\xb4/\\xea#qd\\xfa\\x0b\\xf1\\x12^$\\x97\\xc4\\x9d\\x08\\x88\\x13\\xb4,\\x97\\x19=\\xcc\\xbb\\r\\x94Q\\xd2Fj\\xb6\\xc6VvQ\\x88\\x8b\\xe3S$\\x94\\xc9I\\x9f\\xe2KN\\x11s\\xf6\\xfd\\xda\\xe8\\x80\\x02\\x13\\x16\\xcdh3xK\\x95Aq\\x0e\\xdd\\x96\\xd5\\xb8\\xe9\\xc5x\\x96\\xa6\\x97\\xff\\x00\"\\xd2\\\\\\xd0\\xaf~\\xa9Q\\x11\\x96\\x9e\\xe16*\\xb9V\\xcb\\xb1\\x8c\\xc6j\\'\\xd8\\xd5!6\\xed\\'q\\xabxK\\\\Y\\xed\\x17\\xe2D\\x96\\x8d.$\\xb9\\x17\"V\\x87\\xa1jF!UC\\x9f\\xe2&j\\xa6\\xbd\\x8b\\x98W\\x972\\xaf\\xc8\\xd2Q\\xe5$\\xbf\\x12&2\\x8d\\xd3\"\\xf8\\x89\\xc6T\\xa3\\xe5\\xab\\x9e\\xf3\\x01\\xa2\\x00\\xa0\\xc7\\xdb-<\\x19MB\\xca\\xa2\\xcc\\xc2\\'\\xb8iB\\x13x\\x84\\xb7\\x19\\xd5\\x9f\\xb9-\\xcbI\\xa9\\x85\\x99\\x9f\"N\\xf9/\\xfe\\x92\\x17\\xd4\\xa8\\x94\\x92RL\\x8c\\x8c\\xb5#/\\x8c\\x07\\xf4\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00P\\xa6\\xedj5\\x84\\xc7\\xeb\\xb0\\xfa\\xe7\\xb3;&Tm\\xba\\xb8K&\\xe0FY\\x1e\\x86\\x97\\xa5\\xab\\xd8##\\xf7\\xa1\\xbe\\xf1\\xc2\\xf8\\xd0\\x02\\xfa)\\x97\\xfbZ\\xc7\\xe9-\\x1c\\xa7\\x8c\\xe4\\x8c\\x83 oM\\xeaz6N\\\\\\x96\\xf5\\xf7w\\xbb\\xbe\\xcb\\x04\\x7f\\x12\\x9eR\\x13\\xf9Dz\\xb6{{\\x98\\x12W\\x99\\xe4oz*\\x8b\\xda\\xa1\\xc7]\\\\8\\x9f\\xd4\\xe3\\xe4d\\xfb\\xdf\\x88\\xfd\\xa6\\xd0\\xa2\\xd7y\\xb3#\\xd0\\\\\\xe8\\xb1\\xea\\xbc^\\xb9\\x10)\\xeb\\xa2\\xd5\\xc1A\\x9a\\x93\\x1e\\x1b)i\\xb23\\xf7\\x9e\\x89\"-O\\xe3?\\x8c\\x05--\\xed\\x170=]r\\x16\\xcf\\xab\\x14G\\xa2c\\x9a,m\\x0c\\x8f\\xe3\\xdeR}\\x1d\\x95\\x17\\xe2\\xdd\\x90\\\\\\xbd\\xfc\\xf4)\\x1c\\x7fdx\\xdd\\r\\xa3v\\xeeEv\\xee\\xfd\\xbdwnn\\x9eT\\xc9h\\xd7\\xdf\\xdd\\xad\\xcd{\\xa2?\\xf9Z$\\'\\xf2\\x0b\\x98\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0c\\xf7\\xb4\\x11\\x91lc,\\xd7wOC?\\xbe\\xd7O\\xbeO\\xe2\\xe64!\\x9ev\\x835\\x96\\xc5\\xf2\\xd3ox\\x97\\xe8g\\xa6\\xef\\xbf]\\xe2\\x01\\xa1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0f\\xcaTV\\'Fv<\\x96[\\x91\\x1d\\xd4\\x9a\\x1ci\\xd4\\x92\\x90\\xb4\\x9f\\xbc\\x8c\\x8f\\x91\\x97\\xe4\\x14\\x05ly\\x9c|\\xcd\\xec\\x1a\\xe2V\\x14\\xe1s*\\xf8\\xc9)\\x15K\\xfc\\x8a\\x86\\xbfe\\x05\\xfd\\xc2\\x99Q\\xf2\\xd5Zr\\x1a \\x00\\xce\\xfd`d8\\x82I9\\xa68\\xe2\\xa2\\x11\\xe8w\\x98\\xdaW6)\\x17\\xfc\\xce\\xb0E\\xdf\\xb3\\xaf?\\xbdK\\xa8I\\x11\\x9a\\x9c!s\\xa0\\xc8\\xaa\\xb2\\xaa\\xb6\\xac\\xa9\\xac\\xa2[W\\xbb\\xfc\\x9c\\xa8O%\\xd6\\xd5\\xf8\\xf4RL\\xc8H\\x8afA\\xb2\\xba\\x9b[\\x17-\\xab^\\x95\\x8cd\\x0b=\\xe5[R\\xac\\x99q\\xd5\\x7f\\xfa\\xc826\\xdf.E\\xc9\\xd4/O\\x8bOx\\x0b\\x98\\x0c\\xec\\xf2\\xec\\xa7\\x04\\xd19ma^U\\'\\xff\\x00\\xee\\x0cz2\\xd4\\xa6\\xcb\\xf1\\xc8\\x87\\xaa\\x9cIi\\xa7\\xb6\\xd1\\xb8^\\xf34\\xb6D.\\xd4\\xb7u\\xf9%\\\\k*\\xa9\\xd1\\xec\\xab\\xe4\\xa7}\\x99Q\\\\\\'\\x1bp\\xbf\\x19(\\xb9\\x18\\x0fp\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\n\\xf6W\\x9a\\xc3\\xc5\\xbd\\x1e?q\"\\xd2\\xe2^\\xf7\\xa1\\xd4@$\\xaaL\\x93/y\\x91(\\xc9(Ar\\xd5\\xc5\\xa9(N\\xa4F\\xa23\"?\\xeei\\x95p\\xc6)\"\\xd6+)\\xb0\\x90\\xae\\xe9\\x98L%z&C\\xef--\\xb0\\x8d\\xeez%KZ\\x08\\xd5\\xcfB3>z\\x0f\\xe6+\\x8b\\xb1\\x8e\\x1b\\xef\\xc8}3\\xef\\xec\\x08\\x9c\\x9d`\\xb4\\x12\\\\\\x90i\\xd7D\\xa4\\xbd\\xe9i\\xbd\\xf3$#S$\\x92\\xb9\\x99\\xa9JR\\x82\\xbcx\\r\\xaev\\x93w:\\x98\\xda\\xab\\xd7\\xa1\\xa7\\x18\\xabqh\\x88\\x92\\xd3\\x9ad\\xbb\\xa9*Q\\xfe22CFZ\\x11\\xb6\\xad7\\x8e\\xf9\\x0e\\x1cz\\xf8\\xadF\\x8a\\xc3q\\xa34\\x92Cl\\xb2\\x82B\\x10\\x92\\xf7\\x11\\x11r\"\\xfc\\x83\\xf6\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x9d\\xf6\\x86\\xd3\\xd4\\xae]\\xa9j^\\x84~\\xef\\xed\\x10\\xd1\\x06y\\xda\\x0c\\xc8\\xb6/\\x96\\x99\\xa8\\xd2^\\x86|\\xd2Z\\x99{I\\x01\\xa1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xa4\\xdd\\xec\\xcd\\xa3\\xb6\\x91{\\x8b\\xce\\x99\\x16\\xaa\\xfac\\xb6\\x94r]&+\\xef\\x1f/\\xbaGQ\\xe8H\\x8f1Z\\xfbJQ\\x9e\\xebo\\xe8D\\xb3\\xddB\\xfe\\xe8iS\\xc1z\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x1d\\x91>\\xe4\\\\~\\xcd\\xe6\\x96m\\xba\\xdcWV\\x85\\x97\\xbd&H3#\\x19\\xed=4\\xc9\\xb5\\x10d;\\x91\\xdd\\x9b\\x8f0\\x87\\x15\\xa4\\xbd\\x0bSI\\x19\\xff\\x00\\xc2=xX\\x1bZf\\xa9\\xaa\\xcb\\xa5\\xaf-L\\x06o\\xc3\\xb2~q\\xdeu\\x9fd8vO\\xce;\\xce\\xb3\\xec\\x8e\\xbb\\xad=}\\xa5/O6\\x90\\x037\\xe1\\xd9?8\\xef:\\xcf\\xb2\\x1c;\\'\\xe7\\x1d\\xe7Y\\xf6Cu\\xa7\\xaf\\xb4\\x97\\xa7\\x9bH\\x01\\x9b\\xf0\\xec\\x9f\\x9cw\\x9dg\\xd9\\x0e\\x1d\\x93\\xf3\\x8e\\xf3\\xac\\xfb!\\xba\\xd3\\xd7\\xdaK\\xd3\\xcd\\xa4\\x00\\xcd\\xf8vO\\xce;\\xce\\xb3\\xec\\x87\\x0e\\xc9\\xf9\\xc7y\\xd6}\\x90\\xddi\\xeb\\xed%\\xe9\\xe6\\xd2\\x00f\\xfc;\\'\\xe7\\x1d\\xe7Y\\xf6C\\x87d\\xfc\\xe3\\xbc\\xeb>\\xc8n\\xb4\\xf5\\xf6\\x92\\xf4\\xf3i\\x003~\\x1d\\x93\\xf3\\x8e\\xf3\\xac\\xfb!\\xc3\\xb2~q\\xdeu\\x9fd7Zz\\xfbIzy\\xb4\\x80\\x19\\xbf\\x0e\\xc9\\xf9\\xc7y\\xd6}\\x90\\xe1\\xd9?8\\xef:\\xcf\\xb2\\x1b\\xad=}\\xa4\\xbd<\\xdc\\x9f\\xda[#\\xcb\\xbb\"[7\\x1a\\xa69\\xdbl\\xa6\\xe6\\xce5\\xb5T%$\\xff\\x00\\xf4k\\x06$\\xa2J\\xa2!De\\xba\\xc3\\xa6\\xd9\\xe8\\x8eDD\\xa5n\\x17\\xb0\\xb2_Hvn\\xc02X\\x152s\\xad\\xa2>s6\\x8b\\x92\\xb6\\x85\\xcbJ\\xc8\\xc9\\x15q\\x08\\xcdL\\xc1e\\'\\xfc\\x9aS\\xbcjY\\x11\\x16\\xabQ\\xef\\x1a\\x8c\\xb7\\x8eJ\\xdfg\\xf1r\\x06\\x19f\\xd2\\xca\\xc6\\xc9\\x96^D\\x96\\x9b\\x98\\xea\\x1dKn\\xa0\\xf7\\x90\\xe2II=\\x14\\x93\\xe6J.d~\\xe1\\xee\\xe1\\xd9?8\\xef:\\xcf\\xb2\\x1b\\xad=}\\xa4\\xbd<\\xda@\\x0c\\xdf\\x87d\\xfc\\xe3\\xbc\\xeb>\\xc8p\\xec\\x9f\\x9cw\\x9dg\\xd9\\r\\xd6\\x9e\\xbe\\xd2^\\x9em \\x06o\\xc3\\xb2~q\\xdeu\\x9fd8vO\\xce;\\xce\\xb3\\xec\\x86\\xebO_i/O6\\x90\\x037\\xe1\\xd9?8\\xef:\\xcf\\xb2\\x1c;\\'\\xe7\\x1d\\xe7Y\\xf6Cu\\xa7\\xaf\\xb4\\x97\\xa7\\x9bH\\x01\\x9b\\xf0\\xec\\x9f\\x9cw\\x9dg\\xd9\\x0e\\x1d\\x93\\xf3\\x8e\\xf3\\xac\\xfb!\\xba\\xd3\\xd7\\xdaK\\xd3\\xcd\\xa4\\x00\\xcd\\xf8vO\\xce;\\xce\\xb3\\xec\\x87\\x0e\\xc9\\xf9\\xc7y\\xd6}\\x90\\xddi\\xeb\\xed%\\xe9\\xe6\\xd2\\x00f\\xfc;\\'\\xe7\\x1d\\xe7Y\\xf6C\\x87d\\xfc\\xe3\\xbc\\xeb>\\xc8n\\xb4\\xf5\\xf6\\x92\\xf4\\xf3i\\x003~\\x1d\\x93\\xf3\\x8e\\xf3\\xac\\xfb!\\xc3\\xb2~q\\xdeu\\x9fd7Zz\\xfbIzy\\xb4\\x80\\x19\\xc5\"&\\xd5gUQN\\xe2\\xc6li1e)\\xc6\\xa6?\\xde\\'T\\x1b[\\xa6\\\\\\x8bC\\xf6\\x8f\\xff\\x00\\x91\\xa3\\x8f>6\\x16\\xcab/{\\xc5\\xfe\\xdfe\\x00\\x00y\\xd0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x19\\xe7hB\\xd7b\\xd9i\\x1a\\x89%\\xe8G\\xcd^\\xe2\\xf6\\x88hc=\\xed\\x05\\xcbc\\x19o4\\xa7\\xfd\\x8c\\xf9\\xa8\\xb5\"\\xf6\\x93\\xef \\x1a\\x10\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xf3YV\\xc4\\xb8\\xae\\x95\\x02|ff\\xc1\\x94\\xd2\\xd8\\x91\\x1aB\\tm\\xba\\xda\\x88\\xd2\\xa4)\\'\\xc9I23##\\xe4dc\\xd2\\x00(\\xb8U\\x94\\x9cn\\xfd\\xec\"\\xd6S\\xd3\\\\b1\\xcd\\xa8\\x9f)F\\xa7e\\xc2%\\x92\\x14\\x85\\xac\\xcc\\xcdn\\xb0\\xa56\\x85,\\xf9\\xa9.\\xb2\\xa33R\\x96b\\xf4(\\x1bb\\xff\\x00\\xd2*i\\xf2\\x96\\x8bI8\\xfd\\xa4y&\\xa2.g\\x19\\xc5\\x94y)>Fzw/-z{\\x8dM\\xa3S-5+\\xf8\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8b\\xca\\xbf\\x06.?3{\\xfd\\x06*\\x98\\xe7\\xe0\\xf5_\\xe6\\xad\\x7f\\xa0\\x85\\xaf*\\xfc\\x18\\xb8\\xfc\\xcd\\xef\\xf4\\x18\\xaac\\x9f\\x83\\xd5\\x7f\\x9a\\xb5\\xfe\\x82\\x1fO\\xc3\\xff\\x00\\x0c\\xfc\\xfe\\xc9W\\xedH\\x80\\x00\\xea\\xe6\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\n9mz\\x98\\xf6\\xda{.\\xf4i\\xdc@X\\xf7\\x12zOv\\x8fE\\xf4oI\\xf4}\\xcd\\xed\\xfd\\xee\\xf3\\x7f\\x9e\\x9b\\x9ai\\xff\\x00\\x16\\xbc\\x85\\xe0q\\xb6\\xd9\\xff\\x00\\xfe\\xaf\\xf6\\x81\\xff\\x00\\xf8*\\x7f\\xfez\\xc6w\\xb3\\x9c\\x16\\xa7f4]\\x93\\xb3lq\\x12\\xab\\xf2<\\x9alZ\\xdb\\x99~\\x98\\xf3\\x9e\\x9f\\x1d\\xe8\\xaa56\\xe2T\\xa3I\\xa5;\\xa5\\xbaZh\\x9d\\x0bM4!\\xe4\\xdbLU11\\xf9\\xa45gj\\xed\\xb7j\\xd0v\\x1f\\xb2\\xdb\\xfc\\xe2\\xca\\x14\\x8b\\x18U\\r!\\xc5\\xc5\\x8ai\\'\\x1c\\xdfq\\r\\x91\\x11\\xa8\\xc8\\x8b\\x9a\\xcb_\\xc9\\xaf\\xbc]\"\\xbeR\\xa32\\xf1\\x16\\xe98\\x82Y\\x11\\xfcZ\\x96\\xa3\\xfc\\xa7\\xda\\xcc\\\\\\x176\\xd8\\x8e\\xdc2\\xbc\\xee\\xe5\\xa9\\x1bo\\x85\\x91\\xbf\\n4\\x1b\\x0bU\\xb5\"\\x1b\\x08\\x96\\xd2\\x1bb_yu\\x8e\\x10\\x00\\x00\\xf0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xcbv\\xe1y_{\\xb0\\xfc\\xd9\\xda\\xd9\\xd1\\xe7\\xb7\\x1d\\xa7\\xa2<\\xa8\\xee%\\xc2m\\xe6\\xdc$8\\xda\\xb4\\xf7)*###\\xe6FCR\\x1cQ\\xdb\\x93\\r\\xca6R\\xcd\\xae\\xd3\\xb0G\\x15\\xf0-\\xc3H\\x81\\x98Rk\\xab\\x12\\x0b\\xefZ\\x99\\xbb\\xff\\x00\\n\\xc8\\xf7PkI\\x91\\x91\\xeer2S\\x9a\\x87d\\xc8\\xbc\\xae\\x89m\\x12\\xad\\xe9\\xd1\\x9a\\xb3\\x98\\x87\\x1d\\x8f\\rn\\xa4\\x9ey\\x08\\xd3}IF\\xba\\x9aS\\xbc\\x9dL\\x8bB\\xdeN\\xbe\\xf2\\x1e\\xe1\\x83\\xf6W\\xd9~GGI+<\\xda,\\x87,6\\x95\\x946\\x87&\\xaeBt:\\xf8\\xa4f\\xa6a\\xb6\\x9f\\xfd\\xb2-\\xed\\xe5%$\\x92\\xde=\\x0c\\x8fp\\x8co\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x002\\xdd\\xb7\\xe4U\\x97\\xbb\\x07\\xda;\\xd5\\xb3b\\xd9&\\x05|\\xe8\\xcf\\x93\\x0e\\x92\\xc9\\xa9\\r%D\\xb6\\x97\\xbaz\\xa5IQs.FCC\\x97}[\\x02\\xd6\\xbe\\xb2L\\xf8\\xecX\\xd8\\x13\\x87\\x12#\\x8e\\x91:\\xf96D\\xa7\\r\\t\\xf7\\xa8\\x92FZ\\x99r-\\xe2\\xd7\\xdeC\\x90{q\\xe0\\xd9&\\xcf \\xddm;\\x07q\\xef\\x83\\xad\\xa1\\x1dVgJ\\xd9\\x97u22\\x93\\xdd\\xb7+t\\xc8\\xf4q\\x04d\\x9d\\xf2\\xd4\\xc8\\xb7yn\\xf7\\x9b\\xdaoe=\\x9b\\xe4\\x91\\xea\\xe5m/h\\xef\\xb9;h\\xb9K(5\\x93\\xe8\\xdd:\\xc8\\x1a\\xef\\xb5\\r\\xb4\\xff\\x00\\xed\\x96\\xa7\\xbe\\xa4\\x96\\x9e\\xd1\\x91\\x19o$\\xcc\\xc3\\xa0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x04^U\\xf81q\\xf9\\x9b\\xdf\\xe81T\\xc7?\\x07\\xaa\\xff\\x005k\\xfd\\x04-yW\\xe0\\xc5\\xc7\\xe6o\\x7f\\xa0\\xc6iA\\x80\\xd0\\xbdEZ\\xe2\\xe0\\xea\\xb5FmF}\\xf3\\x9c\\xcc\\xd2_\\xf5\\x0f\\xad\\xe1b\\x99\\xc2\\x9c\\xd3my_\\xe1\\xf3\\x82m\\x97U\\xc4\\x05w\\xd5\\xee?\\xf2\\x0f\\xdb9\\xf5\\x83\\xd5\\xee?\\xf2\\x0f\\xdb9\\xf5\\x87\\xa6\\xd8]S\\xe9\\x1e\\xeez,@+\\xbe\\xafq\\xff\\x00\\x90~\\xd9\\xcf\\xac\\x1e\\xafq\\xff\\x00\\x90~\\xd9\\xcf\\xac\\x16\\xc2\\xea\\x9fH\\xf74X\\x80W}^\\xe3\\xff\\x00 \\xfd\\xb3\\x9fX=^\\xe3\\xff\\x00 \\xfd\\xb3\\x9fX-\\x85\\xd5>\\x91\\xeeh\\xb1\\x00\\xae\\xfa\\xbd\\xc7\\xfeA\\xfbg>\\xb0z\\xbd\\xc7\\xfeA\\xfbg>\\xb0[\\x0b\\xaa}#\\xdc\\xd1b\\x01]\\xf5{\\x8f\\xfc\\x83\\xf6\\xce}`\\xf5{\\x8f\\xfc\\x83\\xf6\\xce}`\\xb6\\x17T\\xfaG\\xb9\\xa2\\xc4\\x02\\xbb\\xea\\xf7\\x1f\\xf9\\x07\\xed\\x9c\\xfa\\xc1\\xea\\xf7\\x1f\\xf9\\x07\\xed\\x9c\\xfa\\xc1l.\\xa9\\xf4\\x8fsG\\xa6n\\x17\\x8fY\\\\H\\xb6\\x97CY*\\xd6D\\x05U\\xbd9\\xe8m\\xad\\xf7a\\xa9[\\xca\\x8c\\xa5\\x99o\\x1bF\\xa3\\xd4\\xdb3\\xdd3\\xe7\\xa0\\xfc\\xcb\\x01\\xc6\\n\\x1d$B\\xc7*J-\\x1a\\xd2\\xedS\\x1e\\x82\\xd6\\xe5z\\xd2[\\xa9S\\t\\xdd\\xd1\\xa3\"3\"4i\\xa1\\x0f\\xcb\\xd5\\xee?\\xf2\\x0f\\xdb9\\xf5\\x83\\xd5\\xee?\\xf2\\x0f\\xdb9\\xf5\\x86r`\\xf3\\x9fH\\xf7]9\\xbc9\\x0e\\xc6\\xf0\\x0c\\xba\\xceE\\x8d\\xee\\r\\x8d\\xddXHA6\\xf4\\xbb\\x1a\\x88\\xef\\xba\\xea\\x0bM\\x12\\xa5\\xad\\x06fE\\xa1r3\\xf8\\x88z\\xb2}\\x97\\xe1\\xb9\\xb4\\x98\\x922,J\\x8a\\xfd\\xf8i\\xdd\\x8c\\xed\\x9dk2T\\xc1k\\xae\\x885\\xa4\\xcd%\\xfdC\\xf4\\xf5{\\x8f\\xfc\\x83\\xf6\\xce}`\\xf5{\\x8f\\xfc\\x83\\xf6\\xce}`\\xc9\\x81\\xce}#\\xdc\\xd3\\x9b\\xd7\\x1b\\x0e\\xa0\\x87\\x90\\xaa\\xfa=\\x1dk\\x17\\x8a\\x8aP\\x95f\\xdcF\\xd3$\\xe3\\x91\\x91\\x93&\\xe9\\x16\\xf6\\xe1\\x19\\x11\\x92u\\xd3\\x91r\\x1fx\\xe6+K\\x87\\xd7\\xaa\\x05\\r<\\nH*uO\\x1cj\\xe8\\xa8\\x8e\\xd1\\xb8\\xa3\\xd5K\\xddA\\x11o\\x19\\xf33\\xf7\\x98\\xf0\\xfa\\xbd\\xc7\\xfeA\\xfbg>\\xb0z\\xbd\\xc7\\xfeA\\xfbg>\\xb0\\xb9py\\xcf\\xa4{\\x9asX\\x80W}^\\xe3\\xff\\x00 \\xfd\\xb3\\x9fX=^\\xe3\\xff\\x00 \\xfd\\xb3\\x9fX[auO\\xa4{\\xa6\\x8b\\x10\\n\\xef\\xab\\xdc\\x7f\\xe4\\x1f\\xb6s\\xeb\\x07\\xab\\xdc\\x7f\\xe4\\x1f\\xb6s\\xeb\\x05\\xb0\\xba\\xa7\\xd2=\\xcd\\x16 \\x15\\xdfW\\xb8\\xff\\x00\\xc8?l\\xe7\\xd6\\x0fW\\xb8\\xff\\x00\\xc8?l\\xe7\\xd6\\x0bauO\\xa4{\\x9a,@+\\xbe\\xafq\\xff\\x00\\x90~\\xd9\\xcf\\xac\\x1e\\xafq\\xff\\x00\\x90~\\xd9\\xcf\\xac\\x16\\xc2\\xea\\x9fH\\xf74X\\x80W}^\\xe3\\xff\\x00 \\xfd\\xb3\\x9fX=^\\xe3\\xff\\x00 \\xfd\\xb3\\x9fX-\\x85\\xd5>\\x91\\xeeh\\xb1\\x00\\xae\\xfa\\xbd\\xc7\\xfeA\\xfbg>\\xb0z\\xbd\\xc7\\xfeA\\xfbg>\\xb0[\\x0b\\xaa}#\\xdc\\xd1 \\xcf\\xf4\\x8b\\x8f\\xfeg7\\xfc\\xd9\\x1a\\x00\\xcbh\\xf1\\xca\\xea-\\xa3\\xd2*\\x0c~\\xe0\\xdd\\x850\\x97\\xed\\xa9Z\\xe8l\\xe9\\xef3\\xfccR\\x1e\\x0f\\x19k\\xd1\\x97\\x85\\xbe\\xf2\\xe9\\xf0\\x80\\x00\\x07\\xcf\\x00\\x00\\x01R\\xda\\xae\\xa7\\x83\\xccN\\xf2\\x92K~*\\x14hQ\\xa4\\xcd\\'!\\xb22\\xd4\\xb9\\xf3#2\\x15\\xfe\\x03\\xa3\\xf9\\x1a\\xba\\x87>\\xb0\\xb0mO\\xf0*O\\xe71?\\xf2Z\\x1f\\xc1\\xf6<=uQ\\x81\\x19f\\xda\\xcf\\xd2\\x92fb\"\\xc8\\x0e\\x03\\xa3\\xf9\\x1a\\xba\\x87>\\xb0p\\x1d\\x1f\\xc8\\xd5\\xd49\\xf5\\x84\\xf8\\x0e\\xdbl^\\xa9\\xf5c5\\\\\\xd0\\x1c\\x07G\\xf25u\\x0e}`\\xe0:?\\x91\\xab\\xa8s\\xeb\\t\\xf0\\r\\xb6/T\\xfa\\x99\\xaa\\xe6\\x80\\xe0:?\\x91\\xab\\xa8s\\xeb\\x07\\x01\\xd1\\xfc\\x8d]C\\x9fXO\\x80m\\xb1z\\xa7\\xd4\\xcdW4\\x07\\x01\\xd1\\xfc\\x8d]C\\x9fX8\\x0e\\x8f\\xe4j\\xea\\x1c\\xfa\\xc2|\\x03m\\x8b\\xd5>\\xa6j\\xb9\\xa08\\x0e\\x8f\\xe4j\\xea\\x1c\\xfa\\xc1\\xc0t\\x7f#WP\\xe7\\xd6\\x13\\xe0\\x1bl^\\xa9\\xf53U\\xcd\\x01\\xc0t\\x7f#WP\\xe7\\xd6\\x0e\\x03\\xa3\\xf9\\x1a\\xba\\x87>\\xb0\\xc6\\xf6\\xb1\\xda[/\\xc3\\xf6\\xdc\\x8d\\x9a\\xe1{.<\\xfe\\xcc\\xb1\\xf4\\xe4O8\\x9b\\xf6\\xab\\xd4\\x86=!L\\xa8\\x89.4d\\xa3%\\x12=\\xcb\\xd4\\xf7\\xfd\\xdc\\xb5\\x16\\xbd\\x99v\\x97\\xc36\\x81\\xb2*\\xdd\\xa0\\xd8X\\xc6\\xc3j\\xe5<\\xe47\\x91\\x90\\xcbj7\\xa3\\xc9B\\x94\\x95\\xb4kR\\x89&z\\xa0\\xcc\\xb4>e\\xcfB\\xe6E\\xca\\xb0\\xf9\\x93\\xb4\\\\N\\x1d\\x1d}\\xd3\\xf9=3\\x14\\xd6\\x0e\\x13P\\xec\\\\\\xb0i1\\xe4\\xac\\xc9FIm\\xc3V\\xea\\xcc\\xc9\\x0b=\\x08\\xcc\\xf4J\\xbf\\x11\\x8af\\xd3v\\xf3O\\x8d\\xec?/\\xda\\x06#cQ\\x987G\\x11\\xc7\\x93\\xe83\\x90\\xfcu\\xb8\\x9d=\\x85-\\xb3==\\xe5\\xa9{\\xc6\\xa7\\xc4W\\x11y\\xae}S5\\\\\\xd7^\\x03\\xa3\\xf9\\x1a\\xba\\x87>\\xb0p\\x1d\\x1f\\xc8\\xd5\\xd49\\xf5\\x84%v\\xd81\\xa4P@~\\xe3 \\xa4\\xac\\xb9r\\x8d\\xbb\\xc9\\x15\\xd2,Ze\\xc6\\xa3\\x9a\\tJt\\xd2\\xb5\\x11\\xa5\\xa232\\xdfW\\xb2_\\x19\\x88\\x8c\\x03\\xb4\\x0e;\\x91\\xec\\xd7\\x1b\\xca\\xb2k\\x1a,-\\xdb\\xa6$>\\xd4)W\\xf1d7\\xba\\xcb\\x86\\x87\\r\\xb7\\xd2\\xa2C\\xa4\\x92\\xdd5\\x1a~\\xf7|\\x88\\xf40\\xdek\\xe1\\x9e}L\\xd5s\\\\\\xb8\\x0e\\x8f\\xe4j\\xea\\x1c\\xfa\\xc1\\xc0t\\x7f#WP\\xe7\\xd6\\x1fMg\\xd8\\xc3\\xf8\\xb1\\xe4\\xcd\\xe4u.cd\\x93Y\\xdc\"sG\\x0c\\x92G\\xa1\\x9f}\\xbd\\xb9\\xa6\\xbc\\xb5\\xd7\\xde?,Kh\\xf8\\x9e~\\xa9I\\xc62j\\x8c\\x8b\\xd1I\\n|\\xea\\xa77$\\x9b%\\xeah5\\x1a\\x14z\\x12\\xb7U\\xa7\\xe3\\xd0\\xff\\x00\\x10\\xd6\\xdf\\x13\\xae}L\\xd5s}\\xf0\\x1d\\x1f\\xc8\\xd5\\xd49\\xf5\\x83\\x80\\xe8\\xfeF\\xae\\xa1\\xcf\\xac\\'\\xc0]\\xb6/T\\xfa\\x99\\xaa\\xe6\\x80\\xe0:?\\x91\\xab\\xa8s\\xeb\\x07\\x01\\xd1\\xfc\\x8d]C\\x9fXO\\x80m\\xb1z\\xa7\\xd4\\xcdW4\\x07\\x01\\xd1\\xfc\\x8d]C\\x9fX8\\x0e\\x8f\\xe4j\\xea\\x1c\\xfa\\xc2|\\x03m\\x8b\\xd5>\\xa6j\\xb9\\xa08\\x0e\\x8f\\xe4j\\xea\\x1c\\xfa\\xc1\\xc0t\\x7f#WP\\xe7\\xd6\\x13\\xe0\\x1bl^\\xa9\\xf53U\\xcd\\x01\\xc0t\\x7f#WP\\xe7\\xd6\\x0e\\x03\\xa3\\xf9\\x1a\\xba\\x87>\\xb0\\x9f\\x00\\xdbb\\xf5O\\xa9\\x9a\\xaej6m\\x87T\\xd7\\xe1\\x97\\xf2\\xa3Gq\\x99\\x0cW\\xc8u\\xb7\\x13!\\xcdR\\xa4\\xb6\\xa3#/k\\xe22\\x1b#\\\\\\xdaG\\xf6Hf\\x9bC\\xfc\\x00\\xc9\\xbfFI\\xfd\\xd2\\x86\\x96\\xcf\\xf2H\\xfe\\xc9\\x0f7\\x8b\\xaa\\xaa\\xf0\\xa8\\x9a\\xa6\\xfa\\xcf\\xd9\\xb8\\x99\\x98\\xd5\\xf6\\x00\\x03\\xe5\\x00\\x00\\x00\\x0c~\\x8f\\x17\\xad\\xba+YsYq\\xf9\\n\\xb7\\xb0I\\xac\\xdfp\\xb9&[\\xa9IhJ\\xd3B\"\"\\xff\\x00\\xb0\\xd8\\x06i\\x87\\xff\\x002\\xb3\\xfd1e\\xff\\x00\\x9a\\xf0\\xfa~\\x12\\xaa\\xa9\\xa2\\xb9\\xa6m\\xc3\\xeeL\\xccF\\x8f\\x9e\\x03\\xa3\\xf9\\x1a\\xba\\x87>\\xb0p\\x1d\\x1f\\xc8\\xd5\\xd49\\xf5\\x84\\xf8\\x0f^\\xdb\\x17\\xaa}X\\xcdW4\\x07\\x01\\xd1\\xfc\\x8d]C\\x9fX8\\x0e\\x8f\\xe4j\\xea\\x1c\\xfa\\xc2|\\x03m\\x8b\\xd5>\\xa6j\\xb9\\xa08\\x0e\\x8f\\xe4j\\xea\\x1c\\xfa\\xc1\\xc0t\\x7f#WP\\xe7\\xd6\\x13\\xe0\\x1bl^\\xa9\\xf53U\\xcd\\x01\\xc0t\\x7f#WP\\xe7\\xd6\\x0e\\x03\\xa3\\xf9\\x1a\\xba\\x87>\\xb0\\x9f\\x00\\xdbb\\xf5O\\xa9\\x9a\\xaeh\\x0e\\x03\\xa3\\xf9\\x1a\\xba\\x87>\\xb0p\\x1d\\x1f\\xc8\\xd5\\xd49\\xf5\\x84\\xf8\\x06\\xdb\\x17\\xaa}L\\xd5s@p\\x1d\\x1f\\xc8\\xd5\\xd49\\xf5\\x83\\x80\\xe8\\xfeF\\xae\\xa1\\xcf\\xac\\'\\xc06\\xd8\\xbdS\\xeaf\\xab\\x9a\\x03\\x80\\xe8\\xfeF\\xae\\xa1\\xcf\\xac\\x1c\\x07G\\xf25u\\x0e}a\\\\\\xdb\\xc6\\xd9`\\xec#gR\\xb2\\x99\\xb5\\xf2-\\xdc\\'\\xd9\\x87\\x12\\xba*\\x89+\\x93!\\xd5\\x92\\x1bF\\xf2\\xb9$\\xb5=MG\\xee\">Fz\\x11\\xf80]\\xae^9\\x8c\\xdd\\xdc\\xedO\\x12\\x8f\\xb2(\\xf5\\x8f6\\x83~\\xd2\\xfa4\\x98\\xae!z\\x11/\\xd2\\x13\\xba\\x84\\xfbJJt?\\x8c\\xc8\\x867\\x9a\\xe2r\\xe7\\x9e\\xe6j\\xb9\\xae\\\\\\x07G\\xf25u\\x0e}`\\xe0:?\\x91\\xab\\xa8s\\xeb\\x0fL\\xec\\xba\\x8a\\xae\\xdd\\xba\\xa9\\xb7U\\xd1-\\x1c\\x8c\\xb9\\x88\\x84\\xfc\\xb6\\xd0\\xfa\\x98G\\xdf\\xbaH3\\xde4\\'\\xe3V\\x9a\\x17\\xc6b\\xbc\\xd6\\xdc\\xf6n\\xf9\\xb8M\\xed\\x07\\x16p\\xda\\x8e\\x99K\\xdc\\xba\\x8c{\\x8c\\xa8\\x88\\xd2\\xe1\\xfb|\\x90ddd\\xafq\\xea_\\x8c]\\xe2\\xb8\\xff\\x00\\x9c\\xfa\\x99\\xaa\\xe6\\x98\\xe0:?\\x91\\xab\\xa8s\\xeb\\x07\\x01\\xd1\\xfc\\x8d]C\\x9fX~^\\xb2\\xf1\\x0e\\x1f\\x87}\\xc5T\\x9f\\x01\\xcdt\\xa3\\xc5\\xb3\\xf8E\\x9fF}\\xc33\"B\\x1d\\xde\\xddR\\x8c\\xc8\\xf4\"3>F\"\\xdb\\xdb\\xae\\xcd]\\x8c\\xc4\\x84m\\x0b\\x15\\\\w\\xde8\\xec\\xba\\x9b\\xb8\\xc6\\x87\\x1d-5BO\\x7fCW2\\xe4\\\\\\xf9\\x90o\\x15\\xc7\\xfc\\xe7\\xd4\\xcdW4\\xcf\\x01\\xd1\\xfc\\x8d]C\\x9fX8\\x0e\\x8f\\xe4j\\xea\\x1c\\xfa\\xc3\\xf3\\xcav\\x95\\x88`\\xd2\"1\\x92eT\\x98\\xfb\\xf3?\\x9b7ib\\xcce?\\xcfO`\\x96\\xa25s\\xfcC\\xe7%\\xdan\\x1d\\x86:\\xcby\\x06YGD\\xe3\\xec\\x9c\\x96\\x91ed\\xccsq\\xa22#q$\\xb5\\x16\\xa9#2\\xd5E\\xcb\\x99\\x06\\xf1\\\\\\x7f\\xce}L\\xd5s~\\xdc\\x07G\\xf25u\\x0e}`\\xe0:?\\x91\\xab\\xa8s\\xeb\\x0f\\xe5\\x16\\xd0\\xf1\\\\\\xa2\\x0c\\xe9\\xb4\\xd95=\\xbc8)%\\xcb\\x91\\x02{O\\xb7\\x1d&\\x8e\\xf0\\x8d\\xc5%FI#A\\x92\\x88\\xcfN\\\\\\xfd\\xc2N\\x96\\xee\\xbb$\\xaa\\x8biQ>-\\xa5l\\xa4\\x13\\xb1\\xe6By/2\\xf2\\x0f\\xdc\\xa4-&d\\xa2\\xfc\\xa4b\\xc6>$\\xf0\\xae}L\\xd5sF\\xf0\\x1d\\x1f\\xc8\\xd5\\xd49\\xf5\\x83\\x80\\xe8\\xfeF\\xae\\xa1\\xcf\\xac\\'\\xc0]\\xb6/T\\xfa\\x99\\xaa\\xe6\\x80\\xe0:?\\x91\\xab\\xa8s\\xeb\\x07\\x01\\xd1\\xfc\\x8d]C\\x9fXO\\x80m\\xb1z\\xa7\\xd4\\xcdW4\\x07\\x01\\xd1\\xfc\\x8d]C\\x9fX8\\x0e\\x8f\\xe4j\\xea\\x1c\\xfa\\xc2|\\x03m\\x8b\\xd5>\\xa6j\\xb9\\xa08\\x0e\\x8f\\xe4j\\xea\\x1c\\xfa\\xc1\\xc0t\\x7f#WP\\xe7\\xd6\\x13\\xe0\\x1bl^\\xa9\\xf53U\\xcd\\x01\\xc0t\\x7f#WP\\xe7\\xd6\\x0e\\x03\\xa3\\xf9\\x1a\\xba\\x87>\\xb0\\x9f\\x00\\xdbb\\xf5O\\xa9\\x9a\\xaeh\\x0e\\x03\\xa3\\xf9\\x1a\\xba\\x87>\\xb0p\\x1d\\x1f\\xc8\\xd5\\xd49\\xf5\\x84\\xf8\\x06\\xdb\\x17\\xaa}L\\xd5sV\\xab\\xe8aQgX\\xd1\\xc1mlw\\xcb\\x90\\x87\\x0b\\xbeZ\\x89DL\\xa8\\xc8\\x8c\\x8c\\xcc\\xbd\\xe3S\\x19\\xe3\\xff\\x00\\x87\\x18\\xa7\\xf7\\xb2\\x7fp\\xa1\\xa1\\x8f\\x0f\\x8c\\x99\\xaah\\x99\\x9b\\xe9\\xf7\\x96\\xefx\\x8b\\xa2\\xf2\\xaf\\xc1\\x8b\\x8f\\xcc\\xde\\xff\\x00A\\x8a\\xa69\\xf8=W\\xf9\\xab_\\xe8!k\\xca\\xbf\\x06.?3{\\xfd\\x06*\\x98\\xe7\\xe0\\xf5_\\xe6\\xad\\x7f\\xa0\\x86\\xfc?\\xf0\\xcf\\xcf\\xec\\x95~\\xd4\\x88\\x0e{\\xed9\\xb4|S6\\xec\\xd9\\xb6Z\\xfc\\x7f#\\xab\\xbb\\x9dSK!\\xab\\x08\\xd0%\\xa1\\xe7\",\\xc9I$\\xba\\x94\\x99\\x9a\\x0fT,\\xb4=9\\xa4\\xff\\x00\\x10\\xcb\\xb0m\\xab5\\xb43\\xdaf\\xd6I\\x04v\\x1b7\\xc6],F\\x8a\\xd1\\x87\\x10\\x840\\xa8&\\xff\\x00\\xc2jA\\x9aME(\\xd1\\xb8\\x93I\\x91\\x93M\\x99jF\\xa31\\x9a\\xb1b*\\xca\\xc5\\x9d\\xaa\\x03\\x9d\\xaa\\xf6\\xfb\\x98\\xe2\\xf7\\xb8\\xa3\\xb9\\xe4Z\\x03\\xa0\\xca1\\xf9\\xd7q\\x8a\\x89\\xb7\\xc9\\xfa\\xf5Ea\\xb9\\x0bi\\xc58\\xb3\\'\\x88\\xdbY\\xe8\\xa4\\xa5\\xbfi:i\\xa71\\xe6\\xc6\\xb6\\xf1\\xb4f\\xa3l\\xe3&\\xca\\xaa\\xb1\\xb8\\xb8\\x86\\x7f-\\x88Pb\\xd7w\\xea\\x9fV\\xb9L\\xad\\xd8j}j^\\xe3\\xe4\\xa2JR\\xb2J[4\\x9a\\xcbMt\\x17kIgI\\x00\\xe5\\xda^\\xd8r\\xd9cgP/\\xaac&\\xfa}\\xbc\\xca\\xac\\xb50\\x92\\xb2f\\x9f\\xb8\\x93\\xe8D\\xee\\x8aQ\\x9a\\x10\\xb9OE\"5\\x19\\xfb\\x0bW\\xc6Z\\x97\\x82\\x1e\\xd1,\\xb6\\xa5\\xb6\\xfd\\x8f_Hb+4\\xa7\\x95d\\xd0\\xe9\\x17\\x1d\\n%H\\x84\\xc5{\\xac\\x93\\xce\\x19\\xa8\\xf55\\xba\\xdb\\xc6FD\\x92\\xdd\\xdc\\xe5\\xae\\xa6smL\\xf0\\xf2\\xfb{\\x96u\\x88\\x0eA\\xb7\\xed\\x91\\x96L\\x9dyo\\x8b\\xe3\\xcd\\xdb\\xe3\\xd5\\x96/\\xc1b\\x99\\xacr\\xe2L\\xeb40\\xf1\\xb4\\xeb\\x8dMe\\x93\\x8a\\xd9\\xa8\\xd2\\xb3J\\x0f{\\xdcD\\xa5$\\xcc\\xc8\\xa4v\\xdb\\xb5\\x0c\\xebiX>\\xdbba\\xf0\\xe8!\\xe1x\\xc5\\\\\\xfa\\x9b)WI}S\\'H(=\\xec\\x84\\xb0HQ%\\xae\\xed\\x0e\\xa4\\x88\\xd6J\\xdeW-\\x08\\xb9\\x89\\xb6\\xa6\\xd3mK:\\xb4\\x07\\'z\\xf1\\xcej\\xb19uxLLq\\xa8\\xf8&\\x0bYyf\\xe6DO)s{\\xd8\\xabq-0M\\xad$\\x82$0\\xad\\\\V\\xf1o(\\x8bt\\xb9\\x98Z\\xf6\\xb2\\xc9gN\\xa9\\xa5\\xa0f\\x13\\x16Lc\\xd5\\xb6\\xd6\\xf3\\xa6\\xe3V\\xd6M\\xad\\xf9l\\xf7\\x88a\\xb6`\\xa5f\\xc6\\x89-\\xe3S\\x8b?\\xbe\"I/uF.\\xda\\x9f\\x89gX\\x80\\xe1l\\xfa\\xfa6\\xd9\\xf6\\x99\\xb2;\\xbc\\xbfdV\\x993\\xf2\\xf1;w\\x1e\\xc4\\x89\\x86\\xd2\\xfcw\\xdb\\x94\\xc3}\\xe9&R\\xd82F\\xa93I\\x9e\\x8b\\xddq\\'\\xbb\\xef\\xd2\\xdf\\xb3\\xad\\xa9\\xda\\xec\\xfb\\xb3\\x0e2P2\\xba(\\xb7Q\\xee%THc*b|\\xc7\\xe1,\\x9eyI\\xaeC\\r\\x92d>\\xfb)6\\xdb\\xd3\\x97\\xb2\\x93Qj\\x92-s\\x18\\xd13:i\\xff\\x00\\x9e\\xebg]\\x00\\xe4\\xe7;[\\xe5Sv;M\\x921[OQ8\\xf2i\\x18\\xe5\\xdd\\xdd\\x94I\\x8a\\xab\\xac&w\\xff\\x00\\xda\\x96\\xcf\\xb0\\xfa\\x1bp\\xc9\\xb2\"p\\xd1\\xb8nh\\xb3-\\x04\\xd6\\xd0\\xbbKdX\\xdd\\xb6+\\x8aV\\xb9G+$\\x9fHW\\xb6\\x170j,\\xaek\\x89\\x858m\\xb7\\xe8\\xec\\xc3J\\x9dQ8\\xa4\\xac\\xc9kZR\\x92O\\xbdF\\xa2!\\xad\\xb5\\x16\\xbaZ].\\x03;\\xd8F\\xd1\\xed\\xf6\\xa1\\x81\\x95\\xad\\xed\\x1b\\xd46\\xacL~\\x13\\xcd9\\x16Df\\xe4wj\\xd12\\x19D\\x84!\\xd2m\\xc4\\x9aTD\\xb4\\x91\\x96\\xa6G\\xae\\x9a\\x8e{\\xd8F\\xd9d5\\xb5,\\x97f\\x18\\xba\\xebW\\x7f+=\\xc8\\xad\\xad\\xde\\xb1\\xdeR\"W70\\xc9Im)RMo\\xb8jI$\\xb5=\\xc4\\xea\\xb5\\x11\\x96\\x84vqb-\\xe6Y\\xd9\\x009n\\x7fh\\xcd\\xa3WPe\\xd9\\x9b\\x901\\x85b\\x18\\xbea#\\x1e\\x95\\x05-H\\xf4\\xf9Q\\x91`\\x98\\xdd\\xf2\\x1c\\xef7\\x1bZR\\xe2\\x0c\\xd2iY(\\xd2\\xa3#F\\xa4\\x92\\x96\\xcav\\xeb\\xb4I\\xaa\\xda]\\xf6\\x19S\\x8e\\xbb\\x89l\\xfeK\\xd0\\xe6G\\xb67\\xfd:\\xd5\\xd8\\xec\\xa5\\xe9D\\xca\\xd0\\xa2C\\x04\\x94\\xafu&\\xa4\\xb9\\xbc\\xa2=wH6\\xb4\\x96tp\\x0e^\\xc8\\xbb^ZP\\xd2\\xe7\\x0e\\xb3K\\x1e\\xda\\xdd\\xb4TM\\xc3\\xeb\\x98mhr\\xd6-\\x8bDl\\x92\\xd3\\xbez\\xad\\x0bnN\\xf9\\xa4\\xc8\\xb7[.E\\xf1\\xd6\\xbbG\\xed\\xa9[V\\xd9\\x1e\\xd4\\xe1\\xe3\\xc5\\x12N\\x17\\x07\\x10\\xad\\xb0rq\\xa1]\\xfa\\xe6Lu/4\\xd9\\x1e\\xf6\\xe9%1\\xc9*Qn\\xeb\\xab\\xa8\\xe6Zhy\\x9cjb&c\\xf3\\xf2\\xc5\\xa5\\xd8\\xc09\\xdfl\\xdd\\xa3\\xaeq\\x8d\\xa7\\xbb\\x83\\xe2\\xac\\xc5f]}s66\\x16\\x13\\xa8l\\xed\\xd0F\\xf2\\x96M0\\x96\\xa0\\xa0\\xd4\\x832mJ5\\xadDZ\\x19\\x12R\\xa3\\xde\\xd3\\xc9M\\xda\\';\\xda\\x1bxN9\\x8dc\\x10q\\xfc\\xe6\\xe2\\x0c\\xcbKB\\xc9\\x98\\x94\\x98\\xb5\\xf1#H(\\xe6\\xeaY2i\\xe5\\xf7\\xcbRM\\xb4\\xabp\\xc9\\'\\xaa\\xbd\\xc3[Zob\\xce\\x93\\x01\\xc5\\xfb\\x1e\\xdaVg\\x8c\\xa6F\\x1fV\\xc5\\x02s|\\xafhy*\\x1e\\x9d5/.\\xb6)\\xc6>\\xf5\\xf3CiR\\\\p\\xd4fD\\x84\\xef$\\xf4\\xd4\\xcc\\xf9\\x18\\x96Ok\\xbc\\xb2\\x0e)W\\x02|*\\x93\\xce,\\xf2K\\x8aT\\xbd\\n\\xae|\\xda\\xf8\\xcc\\xd7\\xac\\x92\\xf3\\xc9f?x\\xfc\\x8234\\x12t\\xdc/\\xba{F\\x92A\\x9a\\xb3\\x18\\xf4\\xda\\xf3\\xf9\\xf9r\\xce\\xba\\x01\\x92\\xf6}\\xda\\xc6A\\xb4\\xc8y\\x0b\\x19\\x1d2\\xe0\\xcb\\xa8\\x94\\x86Z\\xb3j\\xaemt[&\\x96\\x82Q8\\xd33\\x10\\x97Pi=\\xe4)\\'\\xbcDi#%\\x19(\\x84\\x1fm#\\x94[\\x14h\\xe0\\x93*\\x9b\\xc4t}\\xc1H3&\\xcd\\xcf\\x84\\xa3\\xee\\xef\\x99s\\xdd\\xd7Mt\\xe7\\xa0\\xe95\\xc6I\\xae\\x12\\xda\\xd9\\xbb\\x00\\xc1j\\xf6\\xc5\\x9ccY\\x9eO\\x85gI\\xc5#\\xdcG\\xc7\\x1e\\xc9j/a\\xad\\xf8\\xd5\\x8ba\\xb5\\xf7N\"R\\\\R\\xd6\\xd7v\\xb56jRT\\xa24(\\xcc\\x88\\x8c\\xb4\\x14|7\\xb5\\x9eaf\\xe6kXuu9\\xb5\\xc5v,\\xe6KL\\xe6;[a\\x01\\x99\\xdb\\x8b\\xee\\xcd\\x92D\\xa25;\\xcdM\\x9a\\\\h\\xcc\\x94FdE\\xaf!\\x9d\\xb51\\xc5l\\xeb \\x1c\\xbc}\\xaa.h\\xf6+\\x17,\\x97a\\x8ae\\xb6\\xb76\\xf1\\xa9jxf4\\xd3e\\x89\\x0e\\xa0\\xd4\\xb4J`\\xbb\\xd7\\xc9m\\x12\\x1cQ\\xb6\\x82\\xdfQ$\\x8bu&\\xaeW\\r\\x82\\xed\\xa3)\\xcfr\\xbb\\x9a\\x0c\\x8e\\xa4\\x9dj$6\\xe6\\xc6\\xbf\\x85AeS\\x15\\xe3R\\xcd\\x0b\\x8e\\xa6\\xa7 \\x94N\\'\\xd9Q\\x1aT\\xa2RU\\xf1\\x1aL\\x821i\\x99\\x88\\x8f\\x89f\\xbe\\xcf\\xf4\\x8b\\x8f\\xfeg7\\xfc\\xd9\\x1a\\x00\\xcf\\xd9\\xfe\\x91q\\xff\\x00\\xcc\\xe6\\xff\\x00\\x9b#@\\x1c\\xfc_\\x1a>_yt\\x8e\\x10\\x00\\x00\\xf0\\x80\\x00\\x00\\xa9mO\\xf0*O\\xe71?\\xf2Z\\x1f\\xc1\\xfc\\xda\\xbb\\x89k\\x07\\x96\\xb5\\xa8\\x90\\x84\\xc8\\x88f\\xa5\\x1e\\x84E\\xe9-s1\\x1f\\xc4u>)\\x0b\\xa8G\\x98\\xfa\\xf8\\x14\\xcc\\xe0E\\xa3\\xe3?JR\\xae\\x10\\x91\\x01\\x1d\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6:d\\xab\\x93\\x9d\\xa5\"\\x02;\\x88\\xea|R\\x17P\\x8f0\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc2U\\xc8\\xb4\\xa4@Gq\\x1dO\\x8aB\\xea\\x11\\xe6\\x1cGS\\xe2\\x90\\xba\\x84y\\x86J\\xb9\\x16\\x94\\x88\\x08\\xee#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f0\\xc9W\"\\xd2\\x91\\x01\\x1d\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6\\x19*\\xe4Z\\\\W\\xda\\x03fW{X\\xed\\xb9iM\\x8d\\xe5\\xd7\\x18e\\xea6V\\xa90\\xa7\\xd3\\xcb8\\xc6\\xe3\\xa5`\\xa4\\xa1\\xa7\\x94\\x92\\xde6Tk#Q$\\xc8\\xf5JL\\x8f\\x96\\x87EN\\xd5\\xa9\\xab{?l\\xa3\\x08\\x8fWU\\x81\\xd4B\\xb8\\x99S\\x98\\xd8\\xddQ\\x95\\xd7\\x0f\\xd9Gl\\xd4g\\xdc\\xbaK.\\xf2J\\xdcsu\\xc5\\x12\\xb4%(\\xbe%i\\xfe\\x87q\\x1dO\\x8aB\\xea\\x11\\xe6\\x1cGS\\xe2\\x90\\xba\\x84y\\x8f$\\xf8J\\xef3\\x1f\\x1f&\\xef<\\x9f\\xe6\\x1e\\x1d\\n\\r\\xa6\\xc7q\\x8c^n\\xb6\\xb5\\r\\xf6\\x84\\x84\\xc3ql+\\x8a!=^\\xfb[\\xcd\\x9a\\xa2\\xee\\x92Zm\\xd4\\xadj\\xee\\xc9$\\x92%\\x19\\x11h4m\\xb7R\\xd7\\xe2YGk*\\x8aH1\\xeaj\\x9e\\xc2\\xaa\\xe5\\xae\\x14&\\x92\\xd3=\\xf6\\x8aN\\xf9!$DFdg\\xa9\\x91s\\xd4\\xc7{q\\x1dO\\x8aB\\xea\\x11\\xe6\\x1cGS\\xe2\\x90\\xba\\x84y\\x89\\x1e\\x0e\\xa8\\x8b}\\xbc\\xa6>\\xe5\\xe7\\x93\\x88{,\\xc0sf\\xd9\\x8eQ\\x83\\xed-\\xa8wy&u\\x8e\\xc7\\xbb\\xaa\\xc8_ky6p\\xd3\\x15)r\\xbfEjDLhz \\x88\\xb5I\\x1a\\x8c\\x88\\xb7HRv\\x1fAY\\x94Wv \\xae\\xb8\\xaf\\x8bk^\\xe22\\xc5\\xae,\\xc6R\\xebKR\\x10\\xa5\\xa0\\xcd*##\\xd1IJ\\x8b\\xf1\\x19\\x11\\x8f\\xf4S\\x88\\xea|R\\x17P\\x8f0\\xe2:\\x9f\\x14\\x85\\xd4#\\xccX\\xf0\\x95E\\xa3\\x97\\x97\\x9cO\\xd8\\xbc\\xf2\\x7f\\x9a\\xdbO\\xaa\\x85\\x8fa\\x1b]\\x8d\\r\\x88\\xd1)\\xf1\\xed\\xb0A\\x9d\\x12\\xaaDm\\xfar\\xdem$\\xb4Km?y\\x17E\\x19\\xabt\\x8f\\x9aRD\\\\\\xf9tga\\x088\\xfb\\x8e\\xed+ \\xaa\\xca1\\x0bk\\x0b\\xcb&$L\\xa6\\xc2[q\\xaa\\xfa\\xb4\\xa5\\xb5%\\xb2B\\x1dJ\\x17\\xf7Ol\\xcd[\\xa4Fi=5\\xe67\\r\\xa7\\xe3\\x94;R\\xc5WG\\'/\\xb0\\xc7\\x92o6\\xfa\\'\\xe3\\xb7\\x05\\n[kA\\xea\\x9d\\x1cI\\x9f-y\\xe8de\\xa9\\x11\\xe9\\xc8\\x85\\x7fc\\xbb\"\\xc2\\xb636\\xf2\\xca\\x16Qc\\x92_\\xdd\\x9b%ay\\x92\\xdc&d\\xc9\\th\\x8c\\x9aA\\xab\\xd9\"JIJ\\xd0\\x89%\\xef\\xe7\\xae\\x85\\xa4\\xa7\\xc3bS\\x89\\x15[O\\xfd8\\xc3^\\x01\\x1d\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6=\\x99*\\xe4\\xc5\\xa5\"\\x02;\\x88\\xea|R\\x17P\\x8f0\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc2U\\xc8\\xb4\\xa4@Gq\\x1dO\\x8aB\\xea\\x11\\xe6\\x1cGS\\xe2\\x90\\xba\\x84y\\x86J\\xb9\\x16\\x94\\x88\\x08\\xee#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f0\\xc9W\"\\xd2\\x91\\x01\\x1d\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6\\x19*\\xe4Z^\\r\\xa1\\xfe\\x00d\\xdf\\xa3$\\xfe\\xe9CKg\\xf9$\\x7fd\\x86K\\x9f_\\xd5\\xbd\\x82dm\\xb7e\\x11n.\\xb6JR\\x94\\xbe\\x9333iZ\\x11\\x16\\xa3Zg\\xf9$\\x7fd\\x87\\x0f\\x15\\x13\\x18T^>3\\xf4\\xa5\\xd28>\\xc0\\x00|\\xb5\\x00\\x00\\x00f\\x98\\x7f\\xf3+?\\xd3\\x16_\\xf9\\xaf\\r,e\\x18\\xbd\\xd5t8\\xf6\\xad?>3\\x0e\\xa6\\xe2\\xcbT8\\xf2R\\xa2\\xff\\x00l{\\xe23\\x1fK\\xc2\\xc4\\xcd\\x15\\xdb\\x9c}\\xc9\\xfd\\xabH\\x08\\xee#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f1\\xe9\\xc9W\\'+JD\\x04w\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xc4u>)\\x0b\\xa8G\\x98d\\xab\\x91iH\\x80\\x8e\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0c\\x95r-)\\x10\\x11\\xdcGS\\xe2\\x90\\xba\\x84y\\x87\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\x92\\xaeE\\xa5\"\\x02;\\x88\\xea|R\\x17P\\x8f0\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc2U\\xc8\\xb4\\xa4@Gq\\x1dO\\x8aB\\xea\\x11\\xe6\\x1cGS\\xe2\\x90\\xba\\x84y\\x86J\\xb9\\x16\\x95\\x03\\xb4\\xac\\xac\\x026\\xc7\\xaeS\\xb4\\xe8.\\xcf\\xc3^S,\\xccC\\x11\\xddyh\\xdeq$\\x97\\x08\\x9a-\\xf4\\xee\\x1e\\x8a\\xdeO2\\xdd\\xe5\\xaf\\xb8\\xf8\\x9a\\xf2\\xd3$\\xda\\'e\\xce\\xd08\\xce-i\\x90m\\x0bf\\xb5h\\xab{\\x16\\xbc\\xb8\\x88\\xe9Ku\\t}\\xa7e0\\x95-\\t[\\xc8d\\x9b?h\\xd3\\xa9\\x11~#-?\\xd1\\x8e#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f1\\xe7\\xc4\\xf0\\xf5\\xe2O--\\xc1\\xa8\\xbc|\\x1cIu\\xb6\\xacOm\\xdd\\xa7\\xeamp\\xf9\\xaf\\xd9V\\xc7\\xd9\\xdd\\xcbK\\x94\\xe47XA\\xb8e\\xbchOx\\x94\\xef\\x1aKM\\xed5\"3\\xd3]u\\x14|\\x03f\\xb8\\x9c\\x8cK\\xb1\\xa3\\xce\\xe3U.;e:Y\\xceZ\\xe16g/\\xeeJYw\\xa7\\xa7\\xb7\\xa2\\x92F[\\xda\\xe8d?\\xd1>#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f1\\x8d\\xd6\\xb9\\x9b\\xd5\\xf4\\xf9{-\\xe7\\x93\\xfc\\xe2\\xc9\\xa8\\xeb\\x9b\\xc2\\xf3\\xacm0c\\xa7\\x1foo\\xb1\\xa3\"\\xb0\\x9b\"\\x8e\\x86\\x97\\xb8Jl\\x91\\xee$\\x19r\\xdd\"\\xd3O\\x88[s=\\x97a\\xc9\\xda\\x07l\\xe4\\'\\x16\\xa7B+0\\xe8r`%0[\"\\x88\\xe9\\xd6>\\xe9\\xad\\xa2$\\xfb\\n7\\x1bB\\xcc\\xd3\\xa6\\xa6\\x92?x\\xef\\x0e#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f17:\\xb9v\\xf9\\xfb\\x97\\x9eO\\xf3\\x97i\\xb9\\x8b\\x99F+\\xb3\\xccS&\\x93\\x06\\x86\\xb9\\xcd\\x98\\xd7J\\xac\\xb1\\x7f\\x13E\\xd5\\x85\\xf4\\xd7c\\xa4\\x95\\x11\\x87\\x1cm}\\xd1\\xeaI\\xfb\\xcd\\x17\\xbc{\\xda\\x97#)\\xbd\\x88TT\\xed#=\\xec\\xb2\\xdeC\\n5\\xf3\\x11pk\\x12\\xee\\'\\xb4O \\x9da}\\xc9\\x12\\x90\\xa223F\\xe9\\x91j\\\\\\x8d$~\\xf2!\\xdf\\xfcGS\\xe2\\x90\\xba\\x84y\\x87\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\x1e\\x12\\xbc\\xd7\\x9f\\xa7\\xc8\\xbc\\xf2p\\xf7k\\x9cw \\xd9\\xa6\\xd5_\\xa8\\xc1!\\xa6<-\\xb5T\\xc7\\xc4\\xdeDt\\xee7\\x12c.\\xb6\\xd1\\\\iT\\xf9\\x01c\\x16J\\xc6\\xe6\\xe2\\x96/&\\x07~\\xdc\\xf8\\x0f\\xb0\\xa4%\\x0bA8\\x8d\\r\\xa7\\x0c\\x9dA\\x91\\xf2\\xf6\\x93\\xeeV\\xa5\\xae\\xf1\\x1dO\\x8aB\\xea\\x11\\xe6\\x1cGS\\xe2\\x90\\xba\\x84y\\x8d\\xce\\x04\\xd5\\xc6\\x97=Y\\xe5\\x9e\\xc1\\xe3\\\\\\xda\\xec\\xdeD\\xdb2~\\x1e#W2\\xad\\xe8\\x8a\\x8d\\xca\\xc1\\x12\"\\xb7\\x1dFj\\xdf\\xfb\\x9e\\x84\\x83=4V\\xbb\\xdajZj+\\x18\\x9ff+J\\x99\\xb8L\\x1b\\xec\\xfaFG\\x87a2\\x13&\\x86\\x91u\\x8d\\xc7u\\x0e6\\xda\\x9b\\x8crd%g\\xdfw(Q\\x92tCz\\x99\\x11\\xab]\\x06\\xd5\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6&\\xc2\\xf3|\\xb3\\xdc\\xd5\\x96\\xc8\\xec\\xbb\\x8aJ\\xb4\\xda\\xd4\\xe7w\\xcd\\xdd\\xa20\\xdb\\x134N\\x9e\\x88Igsy\\xa3\\xd7\\x92\\x8d\\xcd]3\\xe5\\xedn\\xff\\x00\\xcaF\\x0fvsb\\xbe\\x87e\\x10\\xf1\\xdb\\xe5\\xd1M\\xd9\\xea\\xd0P\\xe5\\x1cD\\xbe\\x89M\\x1csbB\\x1cl\\xd4Z\\x1b\\xa8R\\x8fx\\x8fT\\xa8\\xf5\\xe65.#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f0\\xd8\\x7f_\\xce&\\xac\\x97\\x1d\\xd8&M\\x81d\\x93\\xcb\\x0f\\xda+\\x94\\x98\\\\\\xfbu\\\\=\\x8f=N\\xd4\\xa7\\x1aq\\xc7;\\xc7\\xdabB\\x96]\\xdbN+{\\xd94(\\xd3\\xbc{\\xa6G\\xccF\\xe6\\xfd\\x99\\xafm\\xa7m\\t\\xbcWh*\\xc5\\xb1\\xfc\\xe9\\x97N\\xe2\\xa1\\xfav\\xe7\\x11Ir90\\xe3\\xec8n \\xdb5\\xa1(\\xdeI\\x92\\xb52\\xe4i\\xe5\\xa6\\xd9\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6&\\xef\\xa5\\xb2\\xcfsW\\x19v\\x86\\xd9<\\xba\\xec\\xcf\\x1c\\xee*\\xedr\\'*\\xb1\\x98\\x95l)\\x18\\x13\\x970\\xa5\\xa9\\xa5/V\\xd6\\xe32\\xdb\\xdd%\\x19 \\xcd\\x12\\x12\\xb4\\'\\x91\\xa5\\\\\\xd66\\n\\xed\\x8fg\\x17sj\\xb6\\x87W\\x91\\xb3\\xb3,\\xee\\xea\\x8e\\x1cL\\x9e\\xa4\\xabQe\\x05\\xd7\\x1bI\\x9a7Pn$\\xd0\\xe3F\\xb5\\xa4\\x94KQn\\xf22=5=\\xb7\\x88\\xea|R\\x17P\\x8f0\\xe2:\\x9f\\x14\\x85\\xd4#\\xccf<4\\xc4\\xcc\\xda{\\xae\\xac\\x9f4\\xd8vau\\x99a\\xf9e\\x16\\xd0\\xe3\\xd5d\\x14T\\xaf\\xd3H\\x97c@\\x99\\xa58\\x9dSJ[\\xc6\\x84<\\xd2[Q\\xa9\\x92=\\x08\\x8c\\xbd\\xa3\\xe5\\xa7!\\x07\\x07\\xb2t\\x9cy\\xaa\\x1bj,\\xdd\\xe6s\\x9a\\xfb\\x9b\\x0b\\xc97\\xd6u\\x8d\\xcajl\\x89\\xcd\\xa5\\xb9;\\xd1\\x92\\xb6\\xc9\\x05\\xba\\x86\\xc9\\x1b\\x8b-\\xd2N\\x9e\\xd6\\xa67N#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f1\\xad\\xde\\xf3|\\xb3\\xdc\\xd5\\x8ec\\x9b\\x00\\xcc\\xf0\\x9cb\\xe2\\xb2\\x83i\\xa8je\\xad\\xf4\\x8b\\xd9Sl\\xb1\\xe6e\\x13\\xe7!\\xb2\\'\\xd9q\\xb2q\\t4\\x9b\\x9a\\xb8\\x93F\\xe1\\x96\\xa4\\x93\\xde\"=b\\xa8\\xbb$H\\xd9\\xfd~\\x1b#\\x04\\xce\\x1f\\xc7\\xb2|~\\xb5\\xfa\\x87\\xac\\xe5\\xd672=\\x8cWd*J\\xdbr6\\xfa\\t$\\x97\\x96\\xa57\\xb8\\xb2\\xdc#\\xdd\\xf6\\x88o\\x1cGS\\xe2\\x90\\xba\\x84y\\x87\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xbb\\xff\\x00Y\\xee\\x9a\\xbf\\x1cJ\\xae\\xd2\\x97\\x1d\\x85\\x0e\\xea\\xe9Y\\r\\xa3I?H\\xb2\\\\dG\\xef\\xd4j3\\xd4\\x9bG\\xb2\\x92\"2\"\"\\xd7\\x91\\x16\\xa6g\\xa9\\x8c\\x81=\\x97\\x1a\\x87_5\\xfa\\xec\\x81\\x102\\x82\\xcd%\\xe6U\\xd7h\\xaf#8\\xab\\x90\\xe9\\x9b\\xb1\\x9cA8F\\xebjeJe^\\xd2w\\x88\\xc8\\xf4-\\xd2!\\xb2\\xf1\\x1dO\\x8aB\\xea\\x11\\xe6\\x1cGS\\xe2\\x90\\xba\\x84y\\x8dN\\x0c\\xd5\\xc6\\x995dV\\x9d\\x9a~\\x12\\xd9Ny\\x85\\xf1\\x1fw\\xc59\\x1c\\x8c\\x83\\xd3\\xbd\\x07_F\\xeff\\xa2Ws\\xb9\\xde{znnoo\\']u\\xd0\\xbd\\xc3\\xcb\\x99vg\\xb6\\xba\\x9f\\x9a\\xc5\\xc7\\xf3\\xf9\\x18\\xc6\\'\\x9b:o\\xdf\\xd3\\xb7X\\xdc\\x87V\\xe2\\xdaK/\\xaa4\\x85,\\xbb\\x93u\\xb4$\\x95\\xaa\\x1c\\xd0\\xf54\\xe9\\xa8\\xd9\\xf8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0e#\\xa9\\xf1H]B<\\xc6g\\xc3\\xdf\\xfe3\\xdc\\xd5A\\x93\\xd9\\xef\\x18{j\\x18.h\\xd3]\\xc4\\x8cB\\xa5\\xea\\xa81\\x12\\x9dPhRR\\x86Tfg\\xff\\x00\\xb4\\x83}$Z\\x7f\\xef\\x99\\xeaZs\\xaf@\\xec\\x99\\x8b\\xd5l\\x83h\\x1b>\\x85)\\xf8\\x95\\xf9|\\xe9s\\xdd\\x90\\xd2\\x08\\x97\\x15N\\x9a{\\xa4 \\x8f]R\\xd2[m)#\\xf7\\x92~-F\\xbf\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6.\\xc3\\xfa\\x9a\\xb2;]\\x82\\xe5\\x9cM]\\x98P\\xed\\x154\\xd9\\xb2\\xaaZ\\xa7\\xbb\\xb0r\\x8d\\x12!\\xdb6\\xda\\x8dHp\\xe2\\xf7\\xa9\\xee\\x9cJ\\x94\\xbd\\x14\\x95\\x9f%i\\xa1\\x90\\xf6e\\xbb\\r\\xc8.m\\xb0\\xfc\\x9e\\x9f=v\\xa3<\\xa2\\x80\\xe5\\\\\\x9b\\xc9\\x15MIf\\xd23\\x86\\x858\\x87\\xa3%M\\x91j\\xe3iZM\\nN\\xe9\\xeb\\xef\\xf8\\xb5\\x0e#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f0\\xd8O)\\xeej\\xe4}\\xaf\\xf6}\\xb0\\xc4vq\\x06\\x04\\xcb[|\\xb6T\\xac\\xc2nI\"uV\\x1cv*m\\xd7\\xd2\\xa3\\xd5L1%\\xb7\\x90IQ\\x9e\\x8be|\\xf5\\xd1H\\xdd\\x13\\xfb3\\xd9\\x06O\\xb4-\\x9b\\xe3\\x13_m;.\\xca0\\xbbye\\x8a\\xd8\\xc4\\xa3(\\xa9z\\x0b\\x8d\\xa5.*Mr\\xddQ\\xa0\\x9e58JA\\xb8J\\xd5\\t^\\xa4g\\xcf\\xa6x\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0e#\\xa9\\xf1H]B<\\xc67Y\\xbd\\xed=\\xd7U\\x1e=\\x9ec\\xb3J\\x08\\xed\\\\E\\xbb\\xda\\xb5\\xac\\xa9\\x0e\\xb8\\xe4\\xba\\x180`\"*4N\\xea;\\xa7\\xa4\\xb7\\xa2}\\xfa\\x1e\\xf2\\xd5\\xae\\xf6\\xa6\\\\\\x85{9\\xa8\\xb6\\xed\\x15\\x89I\\xc5\\xe4\\xe3\\xd96\\xcd\\x94\\xcc\\xb8VL\\xdb\\xda\\xb3_!&\\xe4y-\\xbc\\x94!\\x0c\\xcawU\\x19\\xa0\\xbe\\xf8\\xb7t\\xfc~\\xe3\\xd6x\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0e#\\xa9\\xf1H]B<\\xc6\\xe7\\x06\\xa9\\x8bLM\\xbf?\\xed5bV\\x9d\\x95\\xe4\\xe7q3\\'\\xf3\\xfc\\xd1\\xec\\x96\\xfb \\xa4<}\\x99\\xd0\\xab\\x91\\x01\\x9a\\xe8\\x9d\\xe7{\\xba\\xd3$\\xb5\\xef(\\xdd$)F\\xb5\\x1e\\xf6\\xe2K\\x91\\x0fL\\r\\x81f\\xed\\xe6\\xcb\\xccf\\xedE\\x0f\\xe4\\xa7\\x8f\\xc8\\xc7\\x9b~6:\\xcb,Gmn6\\xebn\\xb6\\xd1\\xba\\xafm. \\xcd[\\xe6\\xb4\\xa8\\x8fB$h6N#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f17\\x7f\\xeb=\\xcdX*\\xbb!\\xbbq\\x17(\\xb1\\xbe\\xcd^\\x91\\x9b\\\\ZW\\xdc\\xc7\\xbf\\xaa\\xacn\\x13p%\\xc1B\\x93\\x1d\\xd4G\\xdfY(\\xf4Z\\xc9{\\xca=\\xf2V\\x9e\\xce\\x845\\xbd\\x9dcY~>\\xcc\\xe3\\xcb\\xf36\\xb2\\xe9/\\xa9\\x1d\\xc9\\xc6\\xa8Es1\\xd2\\x92=I(J\\xd6\\xa5\\x1a\\x8c\\xc8\\xcc\\xd4\\xb3\\xf7\\x16\\x84B\\x7f\\x88\\xea|R\\x17P\\x8f0\\xe2:\\x9f\\x14\\x85\\xd4#\\xccZp&\\x99\\xbcS=\\xcd_\\xc6\\x7f\\xa4\\\\\\x7f\\xf39\\xbf\\xe6\\xc8\\xd0\\x06m]g\\x0e\\xc3h\\xd4E\\x16[\\x12M0\\xe6o\\x13.\\x12\\xf4\\xe6\\xcf\\xbfC\\x1aH\\xf3\\xf8\\xb8\\x98\\x9a\"y}\\xe5\\xd3\\xe1\\x00\\x00\\x0f\\x00\\x00\\x00\\n\\x86\\xd5\\xdbK\\xb8<\\xb4-$\\xb4*DB4\\xa8\\xb5#/Ik\\x91\\x88\\xfe\\x1b\\xa8\\xf0\\xb8]:<\\x84\\x86\\xd5\\xdcKX<\\xb5\\xadD\\x84&DC5(\\xf4\"/Ik\\x99\\x88\\xfe#\\xa9\\xf1H]B<\\xc7\\xd9\\xf0\\xf9\\xf7x\\xcb~3\\xf4\\xa4\\x9b\\xda,p\\xddG\\x85\\xc2\\xe9\\xd1\\xe4\\x1c7Q\\xe1p\\xbaty\\x07\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xc4u>)\\x0b\\xa8G\\x98\\xed\\xfa\\xbe}\\xdc\\xf58n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x88\\xea|R\\x17P\\x8f0\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc?W\\xcf\\xb9\\xa9\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe4\\x1cGS\\xe2\\x90\\xba\\x84y\\x87\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xfa\\xbe}\\xcdN\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x86\\xea<.\\x17N\\x8f \\xe2:\\x9f\\x14\\x85\\xd4#\\xcc8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0f\\xd5\\xf3\\xeejp\\xddG\\x85\\xc2\\xe9\\xd1\\xe4\\x1c7Q\\xe1p\\xbaty\\x07\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xc4u>)\\x0b\\xa8G\\x98~\\xaf\\x9fsS\\x86\\xea<.\\x17N\\x8f \\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc88\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0e#\\xa9\\xf1H]B<\\xc3\\xf5|\\xfb\\x9a\\x9c7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eA\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6\\x1f\\xab\\xe7\\xdc\\xd4\\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc88n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f0\\xfd_>\\xe6\\xa7\\r\\xd4x\\\\.\\x9d\\x1eA\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90q\\x1dO\\x8aB\\xea\\x11\\xe6\\x1cGS\\xe2\\x90\\xba\\x84y\\x87\\xea\\xf9\\xf758n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x88\\xea|R\\x17P\\x8f0\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc?W\\xcf\\xb9\\xa9\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe4\\x1cGS\\xe2\\x90\\xba\\x84y\\x87\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xfa\\xbe}\\xcdN\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x86\\xea<.\\x17N\\x8f \\xe2:\\x9f\\x14\\x85\\xd4#\\xcc8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0f\\xd5\\xf3\\xeejp\\xddG\\x85\\xc2\\xe9\\xd1\\xe4\\x1c7Q\\xe1p\\xbaty\\x07\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xc4u>)\\x0b\\xa8G\\x98~\\xaf\\x9fsS\\x86\\xea<.\\x17N\\x8f \\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc88\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0e#\\xa9\\xf1H]B<\\xc3\\xf5|\\xfb\\x9a\\xa0s\\xec~\\xad\\x9c\\x13#q\\xba\\xd8\\x88q\\x15\\xb2T\\x95%\\x84\\x91\\x91\\x93J\\xd0\\xc8\\xf4\\x1a\\xd3?\\xc9#\\xfb$2\\\\\\xfa\\xfe\\xad\\xec\\x13#m\\xbb(\\x8bqu\\xb2R\\x94\\xa5\\xf4\\x99\\x99\\x9bJ\\xd0\\x88\\xb5\\x1a\\xd3?\\xc9#\\xfb$<\\xbe/6\\xca\\x8c\\xdc\\xe7\\xff\\x00\\x97H\\xbd\\xb5}\\x80\\x00\\xf9@\\x00\\x00\\x03\\'\\xc5\\xe9+\\xa61j\\xeb\\xf0\"\\xbe\\xea\\xae,\\xb5[\\x8c\\xa5J?\\xf6\\xc7\\x8b\\xded5\\x81\\x94b\\xf7U\\xd0\\xe3\\xda\\xb4\\xfc\\xf8\\xcc:\\x9b\\x8b-P\\xe3\\xc9J\\x8b\\xfd\\xb1\\xef\\x88\\xcc}?\\t\\x9b%y|\\xbe\\xe4\\xde\\xda&8n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x88\\xea|R\\x17P\\x8f0\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc{?W\\xcf\\xbb\\x9e\\xa7\\r\\xd4x\\\\.\\x9d\\x1eA\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90q\\x1dO\\x8aB\\xea\\x11\\xe6\\x1cGS\\xe2\\x90\\xba\\x84y\\x87\\xea\\xf9\\xf758n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x88\\xea|R\\x17P\\x8f0\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc?W\\xcf\\xb9\\xa9\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe4\\x1cGS\\xe2\\x90\\xba\\x84y\\x87\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xfa\\xbe}\\xcdN\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x86\\xea<.\\x17N\\x8f \\xe2:\\x9f\\x14\\x85\\xd4#\\xcc8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0f\\xd5\\xf3\\xeejp\\xddG\\x85\\xc2\\xe9\\xd1\\xe4\\x1c7Q\\xe1p\\xbaty\\x07\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xc4u>)\\x0b\\xa8G\\x98~\\xaf\\x9fsS\\x86\\xea<.\\x17N\\x8f \\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc88\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0e#\\xa9\\xf1H]B<\\xc3\\xf5|\\xfb\\x9a\\x9c7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eA\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6\\x1f\\xab\\xe7\\xdc\\xd4\\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc88n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f0\\xfd_>\\xe6\\xa7\\r\\xd4x\\\\.\\x9d\\x1eA\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90q\\x1dO\\x8aB\\xea\\x11\\xe6\\x1cGS\\xe2\\x90\\xba\\x84y\\x87\\xea\\xf9\\xf758n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x88\\xea|R\\x17P\\x8f0\\xe2:\\x9f\\x14\\x85\\xd4#\\xcc?W\\xcf\\xb9\\xa9\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe4\\x1cGS\\xe2\\x90\\xba\\x84y\\x87\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xfa\\xbe}\\xcdN\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x86\\xea<.\\x17N\\x8f \\xe2:\\x9f\\x14\\x85\\xd4#\\xcc8\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0f\\xd5\\xf3\\xeejp\\xddG\\x85\\xc2\\xe9\\xd1\\xe4\\x1c7Q\\xe1p\\xbaty\\x07\\x11\\xd4\\xf8\\xa4.\\xa1\\x1ea\\xc4u>)\\x0b\\xa8G\\x98~\\xaf\\x9fsS\\x86\\xea<.\\x17N\\x8f \\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc88\\x8e\\xa7\\xc5!u\\x08\\xf3\\x0e#\\xa9\\xf1H]B<\\xc3\\xf5|\\xfb\\x9a\\x9c7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eA\\xc4u>)\\x0b\\xa8G\\x98q\\x1dO\\x8aB\\xea\\x11\\xe6\\x1f\\xab\\xe7\\xdc\\xd4\\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc88n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e#\\xa9\\xf1H]B<\\xc3\\x88\\xea|R\\x17P\\x8f0\\xfd_>\\xe6\\xaf\\x0buP\\xa0gx\\xaa\\xe2\\xc3\\x8f\\x19jrI\\x1a\\x99i)3.\\xe1_\\x88\\x86\\x9a3&\\xedaO\\xce\\xf1TF\\x98\\xc4\\x95%\\xc9&ii\\xd4\\xa8\\xc8\\xbb\\x85~#\\x1ah\\xf0\\xf8\\xdb\\xde\\x8c\\xdc\\xbe\\xf2\\xe9\\xf0\\x8b\\xa2\\xf2\\xaf\\xc1\\x8b\\x8f\\xcc\\xde\\xff\\x00A\\x8a&=\\x8e\\xd5.\\x82\\xb1J\\xac\\x86\\xa5\\x1cV\\x8c\\xcc\\xe3\\xa3S=\\xc2\\xfc\\x82\\xf7\\x95~\\x0c\\\\~f\\xf7\\xfa\\x0cU1\\xcf\\xc1\\xea\\xbf\\xcdZ\\xff\\x00A\\x0e\\x9e\\x1a\\xa9\\xa7\\nm?\\x1f\\xb1W\\xed8n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e\\x1b\\xa8\\xf0\\xb8]:<\\x87\\xc3yEc\\xb9D\\x8cu\\x12u\\xb9\\x8f\\r\\xbb\\x07#wj\\xf6Xqn6\\x85\\xefi\\xbaz\\xa9\\xa7\\x0bB=Kw\\x99he\\xad:\\xc3\\xb46\\xcf\\xabss\\xc4]\\xc8;\\xdb\\xe4\\xc9n\\x1b\\x8c\\xc5\\x87\"CL>\\xe1\\x91!\\xa7^m\\xb54\\xda\\xcc\\xd4^\\xca\\xd4G\\xcc\\xb9\\x0e\\x93\\x8b1\\xc6\\xae\\xeew\\x95\\xd3\\x86\\xea<.\\x17N\\x8f \\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc8S(;A`YFC:\\x92\\xae\\xf1r\\xe7@\\x91*,\\xd5&\\x04\\x92b\\x1b\\xb1\\xcdd\\xf2^|\\xdb&\\x9a2\\xee\\xd5\\xa6\\xfa\\x8bx\\x8bT\\xeaFF\\x7f\\xcc\\'\\xb4>\\xcfv\\x8b|\\x8aj\\x0c\\x852\\xec]ioGi\\xe8\\x8f\\xc7)m\\xa7\\xef\\x97\\x1dn\\xb6\\x94\\xbe\\x92#\\xd7V\\xcdE\\xa7?pm\\xbf\\xb7sU\\xd3\\x86\\xea<.\\x17N\\x8f \\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc8R0~\\xd1{<\\xda5\\xfa\\xe9(2\\x0fK\\xb4&\\x17%\\xb8\\xefB\\x91\\x1c\\xe44\\x83\"R\\xd97[I<\\x92\\xd4\\xb56\\xcd^\\xf1\\x01\\xb1~\\xd4\\x18\\xe6\\xd6\\xa92K\\x17\\x9a\\x93\\x8f\\xb5I*q>\\xed\\x849Q\\xe3\\xa2+\\x0f)\\xb2yO\\xbc\\xd3m\\x92\\x8d)\\xdeSz\\xef7\\xa9\\x92\\x8b\\xd916\\xfc#7sV\\xad\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe4*8\\x06\\xde\\xb0]\\xa7\\xdb\\xb9U\\x8e\\xde\\x1c\\xab$1\\xe9E\\x16L9\\x11\\x1cu\\x9dH\\xbb\\xd6\\x92\\xf3h7\\x1b\\xd4\\xc8\\xb7\\xd1\\xaay\\x97>d/\\x16\\x13\\x99\\xab\\x81&d\\x83Y1\\x1d\\xa5<\\xe1\\xb6\\xda\\x9cV\\xeaH\\xcc\\xf4JH\\xd4\\xa3\\xd0\\xbd\\xc4Fg\\xf1\\x10\\xd4b\\xcc\\xc5\\xe2\\xae\\xe5\\xe5\\xe5\\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc88n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x19.!\\xdas\\x16V\\x03\\x8f_d\\xf9\\x15y\\xbb\\x90J\\xb0j\\xac\\xeakg\\x91KLy\\x0bo\\xbbC.5\\xdfw\\xc9I$\\x94\\x83N\\xaaQ+p\\x8c\\x85\\xb5;{\\xc0Og\\x8f\\xe7*\\xc9#\\xb3\\x8c0\\xf2\\xa3;-\\xf6\\xdcmm\\xbe\\x95\\xee\\x1b*eI\\'\\t\\xdd\\xefg\\xbb4\\xef\\xeb\\xa7!\\x98\\xc7\\xbf\\xfc\\xbb\\x9a\\xad\\xbc7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eC\\x1a\\xd9\\xff\\x00jZ,\\xbf&\\xda\\x84\\x896P\\xa0\\xe1\\x18\\x935\\xae7e*+\\xf0\\xdfJ\\x9fm\\xc3u/!\\xed\\x14FJBI)\\xdcI\\x9e\\xf1\\x17\\xb5\\xa9\\x0b<\\x0e\\xd3\\x1b8\\xb1\\xc7\\xaf\\xae\\x99\\xbfu0\\xe8\\x99D\\x9b&\\xdf\\xad\\x96\\xd4\\x98\\xcc\\xa8\\xf4K\\xca\\x8e\\xb6\\x89\\xde\\xef\\xdf\\xf7BA\\xa4\\x88\\x8c\\xcc\\xf4#2\\x91\\xe2/\\xaen\\xe6\\xab\\xf7\\r\\xd4x\\\\.\\x9d\\x1eA\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90\\x87F\\xd41wr\\x0b\\nV\\xad\\xdaz}}j-\\xe6wHZ\\xda\\x8f\\x15z\\xee8\\xb7H\\x8d\\xb4\\x9a\\x89*Q$\\xd5\\xbci#Q\\x11\\x971S\\xa2\\xdb\\xed,]\\x8eS\\xed\\x172\\x94\\xc67Uvi~\\xbd\\x93C\\x8e:\\xb6^Q\\x9cD\\x12\\x10J[\\x8f-\\xad\\xd5\\x1aP\\x93\\xe6j\"-\\x13\\xa8\\xd6\\xdaz\\xbb\\x97\\x96\\x89\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe41\\x8c\\x03\\xb5E\\x1eH\\xde\\xd3/-&\\xb3\\x17\\x0e\\xc6\\xad\\xe3WB\\x9a\\xd5|\\xa4\\xc8p\\x9c\\x8e\\xca\\x8d.\\xb2dn\\x1b\\x9d\\xf3\\xaaA$\\x9bI\\xf2\"\\xd3^g\\xe9\\xcf\\xbbYb8\\xfe\\xc4\\xef\\xf6\\x87\\x8d\\xba\\xbc\\x9d\\x8a\\xb9\\t\\x84\\xa8I\\x8d!\\x97\\x1b\\x92jIwo \\xda\\xef\\x19\\xd0\\x94J\\xd5h\">E\\xaf\\xb4C;\\xc6\\x97\\xcd\\xdduk\\xdc7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eB\\x97o\\xda\\x0b\\x07\\xa0\\xc4\\xaa\\xf2K\\x19\\xf60\\xab,\\xde[\\x11\\x12\\xf5$\\xe4\\xcauh3%\\x17\\xa3w=\\xf1i\\xbag\\xa9\\xa0\\x8bN~\\xe3#\\x1e\\x87\\xf6\\xf1\\x80\\xc7\\xc3(2\\xc5\\xe4\\xb1O\\x1d\\xbe\\x9a\\xd5ut\\xf6\\xd0\\xe2\\xd0\\xfc\\x87\\x14\\xa4!\\xbd\\t&iV\\xf2TG\\xbcE\\xbai=\\xed41\\xad\\xb4\\xf5wMV\\xce\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x86\\xea<.\\x17N\\x8f! \\xb5\\xa5\\xb4)kQ%)-MFz\\x11\\x17\\xe3\\x18\\x04\\x8e\\xd7\\x18\\xd6E\\xb5-\\x9fb\\xd8U\\xa4[\\xc8\\xd7v3\"\\xd8HT)$\\x9e\\xe5\\x98\\x8f;\\xbf\\x15\\xe3$\\xb6\\xef\\xdd\\x1bJMH7\\x0bC\\xd3\\x91\\x99\\x18U\\x8d4\\xda\\xf5w5\\x96\\xdf\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe42\\xdcC\\xb4v7#g\\xb6\\xf9\\x9eE\\x91\\xd5\\xb1F\\xd5\\xec\\x9a\\xc8o\\xc7\\x852;\\x86Isu\\xa8\\xeba\\xf6\\xc9\\xe5J\\xf7\\x92\\x92\\x84\\x19\\x19\\x91\\xee\\x96\\x84b7=\\xeds\\x89\\xe2\\xf8\\xbe5{T\\xd4\\xfb\\xa86\\xf9\\x0b\\x14N\\xe9W5\\xb7b\\xef\\x1aM\\xd5)\\x9e\\xe0\\xdc\\xdfJ\\x14\\x93Kf\\x92R\\xf7\\xbd\\x9dt2\\x19\\xde-\\x17\\xcd\\xddul\\x9c7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eB\\x99e\\xb5\\xa6k\\xf2\\xdc\\x01\\x8e\\xe8\\xf8{2m\\xe6!\\xca\\x90\\xc3\\x91\\x9fjZY\\xef\\xdaC\\x8d\\xb8IRI\\xc6\\x90\\xf7\\xb2\\xa4\\xa5ISdFG\\xbd\\xec\\xe8\\x83q\\x8bT\\xf0\\xa9/(\\xee\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x86\\xea<.\\x17N\\x8f!\"\\x02\\xe7\\xaf\\x9c\\x97\\x94w\\r\\xd4x\\\\.\\x9d\\x1eA\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90\\x91\\x00\\xcf_9/(\\xee\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x86\\xea<.\\x17N\\x8f!\"\\x01\\x9e\\xber^P\\x95\\xd5\\x90\\xeb\\xf6\\x8dDqb1\\x18\\xd7\\x0ef\\xf1\\xb2\\xd9#^l\\xfb\\xf4!\\xa4\\x8c\\xfd\\x9f\\xe9\\x17\\x1f\\xfc\\xceo\\xf9\\xb24\\x01\\xe3\\xf1\\x9334L\\xf2\\xfb\\xcb\\xa7\\xc2\\x00\\x00\\x1e\\x00\\x00\\x00\\x15\\r\\xab\\xb6\\x97pyhZIhT\\x88\\x84iQjF^\\x92\\xd7#\\x11\\xfc7Q\\xe1p\\xbaty\\t-\\xa9\\xfe\\x05I\\xfc\\xe6\\'\\xfeKC\\xf8>\\xbe\\x05SN\\x04Z~3\\xf4\\xa5*\\x9d!\\x1d\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe4?*\\x9c\\xaa\\xae\\xf2e\\xe4XR\\xbb\\xe7\\xe9%\\x94\\x1b\\x04wkOr\\xf1\\xb0\\xd3\\xe4\\x9dL\\x88\\x95\\xf77\\xdaV\\xa9\\xd4\\xbd\\xad5\\xd4\\x8c\\x8a\\x83A\\xda\\x87f\\x99F=2\\xfa\\xaf ze$H\\x855\\xeb\\x14U\\xcc(\\xe4\\xd9\\xa9(\\xdd\\'\\r\\x92J\\x9c\\xdeZH\\xda#5\\x91\\x9f4\\x96\\x8658\\xd3\\x1cj\\xee\\xc5\\xe5\\xa1\\xf0\\xddG\\x85\\xc2\\xe9\\xd1\\xe4\\x1c7Q\\xe1p\\xbaty\\n-_i-\\x9c[\\xe1\\xf7\\x99C\\x19*\\x1b\\xa6\\xa3q\\xb6\\xac\\xdc\\x95\\x12Dwa\\x1b\\x86\\x92Gz\\xcb\\x8d\\xa5\\xc4\\x12\\xb7\\x8bE\\x1at\\xd3S\\xd7B3(\\x9c\\x83\\xb4\\x9e57g\\xdbB\\xb4\\xc3\\xec\\x9b\\xb0\\xbf\\xc6(%]\"\\r\\x8c)\\x11\\xc9\\xc4\\xa1\\x97\\x16\\xd3\\x9b\\x8e%\\xb5:\\xca\\x94\\xde\\x9b\\xed\\x9e\\xe9\\xfcJ\\xe6Bm\\xff\\x00\\xb7sV\\xa1\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe4*\\xf86\\xd5\\xea2\\xa7\\xe9\\xe9\\\\\\x98\\xda\\xf2\\xb9\\x14q\\xae\\xe5\\xc1\\x8a\\xc3\\xaan;n\\xa4\\xb45/CK{\\xca\\xde\\xddB\\xd5\\xbc\\xa2I\\x99\\x11\\x91\\x19\\x8b\\x9c\\xd9\\x8c\\xd7\\xc3~T\\x85\\xf7l0\\xda\\x9dqz\\x19\\xee\\xa5%\\xa9\\x9e\\x85\\xcc\\xf9\\x10\\xd4b\\xd51x\\xa8\\xbc\\xbc\\x9c7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eB\\x87\\x8fv\\x95\\xd9\\xbeQ\\x8eN\\xc8+\\xb2=\\xfa\\x08P\\xd19\\xfbg\\xe0\\xc9b\"ZQ\\x91$\\x89\\xe7\\x1bJ\\x14\\xbd\\xe3$\\x9bi3Y+\\xd94\\x91\\xf2\\n\\xce\\xd2\\xbb6\\xb5\\xa2\\xbd\\xb8FN\\xdcXtl\\xa2E\\x8alb\\xbf\\r\\xe8\\xed,\\xf4B\\xcd\\x97\\x9bK\\x86\\x95\\x1f\\xb2\\x93$\\x99(\\xf9\\x16\\xa7\\xc8go\\xfd\\xfb\\x9a\\xaf\\x9c7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eB\\xa1M\\xb7\\xbc\\x1e\\xfa\\xbe\\xa2lKw\\x89\\x9b[r\\xa1\\x88R+\\xa50\\xe2\\xe7\\x1b*x\\x99Sn6\\x95#\\xeei5o,\\x89:i\\xcf\\x99k_\\xdbF\\xde\\x98\\xc1\\xf1\\xec\\x88\\xb1\\xc7\\xa2\\xcf\\xc9(,\\xa9bXC\\x9b\\x1d\\xd3m\\x96\\xe7\\xcbi\\xa4\\x9e\\xa5\\xbaJ3mkQn\\xa8\\xf42-\\xe2\\xf8\\x8d8\\xf3\\x11|\\xdd\\xcdZ\\x7f\\r\\xd4x\\\\.\\x9d\\x1eA\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90\\xcc\\x8b\\xb4mB;AX\\xec\\xbd\\xf8\\x16\\r\\xc8\\x8d\\x06#\\xed\\xcej\\xbe[\\xa8q\\xf7\\x96\\xe1wjRY46\\x84\\xa5\\t>\\xf5K\\xdc3R\\x93\\xa9\\x1a\\x14B\\xc5\\x86\\xed\\xd7\\x07\\xda\\x0eQ7\\x1e\\xc7\\xae\\xce\\xce\\xce\\'|n\\x13p\\xdfK\\n\\xee\\x96M\\xbb\\xdd\\xbe\\xa4\\x13Nn\\xadD\\x93\\xdcR\\xb43\\x08\\xc7\\xbf\\n\\xbb\\x9a\\xad|7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eC0\\xa4\\xed%\\x8d\\xb7\\x83q\\xaeMa\\x1e\\x8b\\x17\\xb3\\xbd~\\xa2\\x8eI\\xb6\\xe3\\x87!-\\xa9\\xc6\\xd2\\xb5\\x9aIZw\\x8b\\x8e\\xfa\\xd2z\\x11n\\x1a\\x08\\xf9\\x9f9{~\\xd1x\\r\\x05-E\\x9d\\x85\\xb4\\xc8\\xad\\xdbw\\xbe\\x87\\x11u\\x13}5\\xd4\\xb4\\xbd\\xc7\\x17\\xe8\\x9d\\xcf~\\x94$\\xfd\\xebR\\t<\\xc8\\xf5\\xd0\\xcbV\\xdf\\xfbw5^8n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e\\x1b\\xa8\\xf0\\xb8]:<\\x85\\n\\xeb\\xb4\\xc6\\xcd(ihm\\xa4e,\\xbf\\x06\\xf1\\xb7\\x1e\\xaf\\\\\\x08\\xcf\\xcc[\\xcd\\xb6dN\\xafq\\x94-IJ\\x0c\\xf4Q\\xa8\\x88\\x92|\\x8fC\\x16\\xaa\\x9d\\xa3\\xe3\\xb7\\x977UPl{\\xf9\\xf4\\xd1cM\\x9c\\xd7r\\xe2{\\x96d%ke[\\xc6\\x92%o%\\xa5\\x9e\\x8932\\xd3\\x99\\x16\\xa5\\xad\\x8ci\\x9d\"\\xae\\xe5\\xe5\\'\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe43\\x0c\\x7foLe\\xdbV\\xa4\\xac\\xa5r<\\xfc.\\xd7\\x0bw)b\\xc11\\x9eL\\x97\\r2\\x9ai:$\\xf4=\\xd3C\\x86{\\xa6\\x8d\\xedt\\xfe\\xa1\\x17\\xb2\\xfe\\xd7x\\x86s\\xb3\\x9b\\x8c\\xba\\xe0\\xe4\\xe2\\xf0\\xeadHD\\xb5M\\x81-,\\xa1\\xb4\\xcb\\\\vM.\\xad\\x94\\xa5\\xd7\\x17\\xba\\x8d[oyIR\\xf7\\x0c\\xb5-\\x06w\\x8dm\\x9b\\xba\\xea\\xd8\\xf8n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e\\x1b\\xa8\\xf0\\xb8]:<\\x85-}\\xa1p\\x06p\\xb6\\xb2\\xc9\\x17\\xaa\\x87D\\xe5\\x83uE\"d\\x19,,\\xa5-D\\x94\\xb4\\xa6\\x96\\xd98\\x933Qs4\\x91\\x11s3\"\\xe6.\\xd9\\x04\\xf7*\\xa8l\\xa6\\xb2\\x94\\xa9\\xd8\\xd1\\x9cy\\x04\\xb23I\\x9aRfZ\\xe9\\xf1r\\x1b\\x8cY\\x9e\\x15wK\\xcb\\xe7\\x86\\xea<.\\x17N\\x8f \\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc8c\\x14\\x1d\\xa1%\\xd9\\xe0\\xdb\\x0f\\x9f:MMe\\xf6y\\xe8nHa\\xe8\\x13W\\x1d\\xc4\\xae9\\xb8\\xebQ\\xdcl\\x96\\x96\\x9d\\xde4\\x9a\\t\\xe5\\x91\\x1aR\\xbeg\\xa7+]\\x7fi\\x0c\\x02\\xeeM\\xf4Z\\x9b\\x97\\xad\\xa4\\xd2\\xc6\\x93*JbWJq\\x0bDs\\xdd{\\xb9q-\\x1a^4\\xa8\\xc9&M\\x1a\\xcfS\"\\xd3Q\\x88\\xc7\\xbf\\xfc\\xbb\\xae\\xab\\xe7\\r\\xd4x\\\\.\\x9d\\x1eA\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90\\xc6\\xb0\\xce\\xd3\\'\\xb4^\\xcf\\x13v\\x8bGL\\xfa,\\xebkSi6\\xa2\\\\Y-\\xb6iIw\\xae\\xb2\\xcb\\xce6\\xda^Q\\xb4\\x85\\xa5.#To\\x1aL\\xf9r=\\xa2\\x86\\xee\\x1eKG]o\\\\\\xf1H\\xaf\\xb0\\x8c\\xdc\\xb8\\xcf\\'\\xdc\\xe3N$\\x94\\x85\\x17\\xf5\\x91\\x91\\x8dS\\x8d5p\\xa9//\\x8e\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x86\\xea<.\\x17N\\x8f!\\x1b\\x85\\xed\\x1b\\x1c\\xda\\x19\\xde\\x16;h\\xdd\\x99\\xd2Y=Oa\\xb8\\x85\\xa7\\xb8\\x96\\xd6\\x9d\\xe3G\\xbcE\\xae\\x9b\\xc5\\xcc\\xb5#\\xd7\\x91\\x98\\xabM\\xda\\x9c\\xca\\xed\\xbcO\\xc3\\xe42\\xc1PD\\xc4K![\\xed\\xb0\\xe3\\x92\\xbb\\xdfJ[JI\\x12L\\xf7\\x93\\xb8\\x8dI$\\x83Q\\x9f\\xb8\\xcf\\xdc\\x1bi\\xe3\\x98\\xbc\\xaf|7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eC!\\xc5\\xfbQ\\xe2ll\\xee\\x83#\\xcar8\\n]\\xec\\xb9\\xed\\xd7\\x15-\\\\\\xf5\\x1b\\xedG\\x90\\xe3j\\xd2:\\x9a7\\xd2m\\xa5)\\'\\x14\\xa4\\x12IZ\\x99\\x1e\\xe9\\xa4z&\\xf6\\x9f\\xa0\\x87\\xb6\\xda\\x8c\\x0b\\xd1\\'\\xbd\\x1e\\xca\\x95\\xabF-#WL}+q\\xd7\\x9bC(\\xd1\\x0c\\x99%\\xb3J\\xf7\\x94\\xea\\x94HI\\xe8\\x95\\x1aLgx\\xd2\\xf9\\xbb\\xae\\xab\\xc6}\\x8f\\xd5\\xb3\\x82dn7[\\x11\\x0e\"\\xb6J\\x92\\xa4\\xb0\\x9222iZ\\x19\\x1e\\x83Zg\\xf9$\\x7fd\\x86i\\xb4?\\xc0\\x0c\\x9b\\xf4d\\x9f\\xdd(il\\xff\\x00$\\x8f\\xec\\x90\\xc7\\x8a\\xaaj\\xc2\\xa2\\xf3\\xf1\\x9f\\xfeZ\\x8e\\x0f\\xb0\\x00\\x1f-@\\x00\\x00\\x19>/I]1\\x8bW_\\x81\\x15\\xf7Uqe\\xaa\\xdce*Q\\xff\\x00\\xb6<^\\xf3!\\xac\\x0c\\xd3\\x0f\\xfeeg\\xfab\\xcb\\xff\\x005\\xe1\\xf4\\xbc,\\xccQ]\\xb9\\xc7\\xdc\\x9f\\xda\\xf6p\\xddG\\x85\\xc2\\xe9\\xd1\\xe4\\x1c7Q\\xe1p\\xbaty\\x0f\\x8bL\\xa2\\xb2\\x9a\\xe2\\x9a\\xaed\\x9e\\xe6}\\xc3\\xae3\\x05\\xae\\xedJ\\xef\\x96\\xdbJue\\xa9\\x11\\x92tB\\x14~\\xd1\\x97\\xbbB\\xe7\\xc8U\\xeev\\xeb\\x83\\xd0g\\xac\\xe1s.\\xcc\\xb2GV\\xc3g\\r\\x98o\\xbc\\x96\\x96\\xf1\\xe8\\xca]u\\x086\\xda5\\xf2\\xdd%\\xa9&z\\x90\\xf4N,\\xc7\\x1a\\xbb\\xb9\\xdeV\\xbe\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x86\\xea<.\\x17N\\x8f!Y\\x95\\xb6\\xac2\\x1e!}\\x94\\xbds\\xb9EE=\\xea\\xcb\\x19~\\x8a\\xf1\\xf7\\x12Zx\\x98q\\x1b\\x84\\x8d\\xe5h\\xe1\\x92uI\\x19\\x1f\\xbc\\x8c\\xcb\\x98\\x84\\xca;O\\xec\\xcf\\x0c\\xb9\\xb5\\xab\\xb8\\xc9}\\x12eL\\x94E\\xb1\\xd2\\x04\\xa7\\x1b\\x82\\xb5\\xa1\\x0bA\\xbe\\xe2\\x1a44\\x85\\x13\\x89\\xd1k2I\\x9e\\xf1\\x11\\xea\\x95\\x11I\\xc6\\xb7\\x1a\\xbb\\x9a\\xb4\\x1e\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x86\\xea<.\\x17N\\x8f!G\\xa8\\xed\\x19\\xb3\\xcb\\xd8y\\x0c\\xa8\\x99\\x01\\x9bT\\x15\\xca\\xb7\\x9eOA\\x92\\xca\\xd3\\t)R\\x8eKh[d\\xa7\\x9a\\xd1\\n\\xd1m\\x12\\x88\\xf9\\x11\\x19\\x99\\x96\\xb1)\\xedm\\xb2\\xa7&\\x14F\\xf2W\\x9e\\x94\\xe3>\\x91\\x19\\x96j&\\xadsZ\\xff\\x00\\x9e)%\\x939)\\xf7\\x99\\xa9\\x9d\\xf2\"#3\\xe4Fbm\\xff\\x00\\xbfsV\\x9d\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe4)V\\x9d\\xa26}S\\x85\\xd2\\xe5\\x8e\\xdf\\x9c\\x8a\\x1b\\xa5)\\x10$A\\x85\"R\\xdfRu\\xdeI4\\xd3jp\\x8d;\\xaa%\\x11\\xa4\\xb7M&G\\xa1\\x90\\xfe\\xd9v\\x87\\xd9\\xddN\\x19\\x8f\\xe5\\x92r\\x88\\xc5\\x8e\\xdf\\xc9\\xf4:\\xd9\\xed\\xb4\\xeb\\x89\\x90\\xfe\\xeb\\x8a\\xee\\xf4JL\\xd2\\xaf\\xb8\\xb8Z(\\x88\\xf7\\x93\\xbb\\xf7\\xc6Dwm\\xfd\\xbb\\x9a\\xae\\x9c7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eC+\\x8b\\xda6\\x92\\xcfh\\n\\xaf\\x89a\\xdd\\xd2\\xc4\\xc6\\xe5^\\xcd\\x892\\x82\\xd1\\x8b=\\xd6\\xdem\\x04\\xe3I[\\x04\\x95\\xa0\\x89J%6Dn\\x9a\\x8d\\x1a\\'MD\\x86!\\xda\\x83g\\x19\\xceLx\\xfdU\\xc4\\xe2\\xb6LG\\'\\xa9\\x9b\\n9\\xf0R\\x88\\xed\\xfd\\xfb\\xaaq\\xf6\\x10\\x84\\xa0\\xb5\"\\xd4\\xcc\\x8bS\"\\xf7\\x8c\\xed\\xff\\x00\\xb7sV\\x89\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe4)xGhm\\x9fm\\x1a\\xf9\\xbaZ\\x0c\\x852\\xec^moGi\\xe8\\x8f\\xc7)m\\xa7\\xef\\x97\\x1dn\\xa1)}$FG\\xabf\\xa2\\xd3\\x9f\\xb8h\\xa3q\\x8b5k\\x15w/(\\xee\\x1b\\xa8\\xf0\\xb8]:<\\x83\\x86\\xea<.\\x17N\\x8f!C\\xc7\\xbbJl\\xe3*js\\xf5\\xb9\\x1f}\\x0e\\x03\\x12$L\\x9c\\xe4\\x19-E\\x8a\\x86\\x17\\xb8\\xe9:\\xfa\\xdb&\\xdbROOaJ%\\x19\\x19(\\x88\\xd2dg\\xfd\\xc7\\xfbI\\xec\\xdf%\\x8bp\\xfcL\\x99\\xb6\\x11S\\tVS\\x13c\\x15\\xf8+n*}\\xef\\x92\\x1fm\\n[\\x7f\\x16\\xfaH\\xcbS\"\\xd7\\x99\\x0c\\xed\\xff\\x00\\xbfsU\\xef\\x86\\xea<.\\x17N\\x8f \\xe1\\xba\\x8f\\x0b\\x85\\xd3\\xa3\\xc8R){El\\xfe\\xfe\\xa25\\x9c;\\xa7\\xbd\\x0eM\\xa4Zf\\x95\"\\xb6[\\x0bT\\xb9&D\\xc27\\x1ci*\"Y\\xa9:,\\xcbwC\\xd4\\xcc\\x88C\\xed\\xf3oQ\\xf6e\\x84\\xe7NP\\xbd\\x16fc\\x8d\\xd5G\\xb5U|\\xd8\\xee\\xa9\\x92i\\xe7\\x8d\\xa6\\xd4\\xa3N\\xe9(\\x8c\\xd0\\xe7$\\xafR\\xdd\\xe7\\xa1\\x19j\\x9c{E\\xf3w5i\\xfc7Q\\xe1p\\xbaty\\x07\\r\\xd4x\\\\.\\x9d\\x1eC2\\xcb\\xfbF\\xd4a{s\\xa6\\xd9\\xdc\\xe8\\x16\\n\\xf4\\xfa\\xa7\\'\\x9c\\xf8\\xb5\\xf2\\xe4\\xee;\\xdf4\\xd3m\\xee\\xb4\\xca\\x8bt\\xc9\\xc5\\x9a\\x9c\\xde\\xddF\\xeaIZo\\x10\\xb1Qm\\xd3\\x07\\xc9\\xb3\\x99x}]\\xd9\\xcd\\xbe\\x8a\\xf3\\xb1\\x9di\\xa8o\\xf7$\\xf3E\\xab\\xad\\x13\\xfb\\x9d\\xd1\\xad\\x1c\\xf7\\x92K3-9\\x90m\\xf5\\xb6n\\xe6\\xab_\\r\\xd4x\\\\.\\x9d\\x1eA\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90\\xce+\\xbbA\\xd11\\x8e\\xe6\\x19~A2=&\\x13MxtQ\\xecV\\x85\\xb8n\\xad\\x0e\\xa2;\\x8e\\xac\\xd2G\\xa2\\x0eJ\\xd4\\xd9hZ\\x117\\xbej\\xd1^\\xcc\\x84\\xce\\xd1X\\rv3\\x1e\\xfaU\\xc4\\x98\\xd0$\\xcbT(\\xa8z\\xaabd\\xcau)%\\x991\\x1c\\xda\\xef]N\\xe9\\x92\\xb7\\xd0\\x85\\'Nz\\x86\\xdf\\xfbw5]\\xf8n\\xa3\\xc2\\xe1t\\xe8\\xf2\\x0e\\x1b\\xa8\\xf0\\xb8]:<\\x85\\x0egim\\x9a\\xc2\\xc5k\\xb2%d\\xed\\xbd[a-p#&,W\\xdf\\x92\\xe4\\x84\\x11\\x9b\\x8dz2\\x1b7\\x89h\"3RM\\x04i-\\x0c\\xf4\\xd4\\x85\\x8f\\x1f\\xda\\x96/\\x94]@\\xa9\\xab\\xb3\\xf4\\xab\\t\\xd5\\x08\\xbe\\x8e\\xcf\\xa3\\xba\\x8e\\xf2\\n\\xd7\\xb8\\x97\\xb7\\x94\\x92\"\\xd5\\\\\\xb7L\\xf7\\xbe=4\\x161\\xa6xU\\xdc\\xd53\\xc3u\\x1e\\x17\\x0b\\xa7G\\x90p\\xddG\\x85\\xc2\\xe9\\xd1\\xe43\\x03\\xdb\\xd3\\x17\\xdbE\\xd9\\x9dn.\\xf4[\\xc18JI\\xa0\\xd5\\xa6\\xf1(\\xb4\\xd7Q\\xe1\\xb5\\xc22\\x9aK{l\\xab\\x84\\xae.j\\xe8v\\xbd\\' z\\x9a<5)\\xf9\\x90\\xd7^\\xd3)\\x97\\x19\\xa5\\x11w\\xdd\\xdb\\xab5\\x96\\xee\\xba\\x9aW\\xa721\\xd9\\xe01\\xb1\\x8bZ\\xff\\x00\\x96\\x88\\xfb\\x17p\\xc6\\xd0q\\x1c\\xabk\\xd7\\xbbQ\\xc9\\xa9\\xf1\\x0c\\xb2%z-\\xb1[v\\xab\\xa5\\xc7z\\xa6m\\xacxd\\xf7\\xa47\\x19FhR]N\\xa4\\xa4\\xe8d\\xa2RS\\xee3N\\xba>0\\x9cb\\x92\\xaf0\\xce\\xea6{\\xb4\\xfb\\xbb\\xc8\\xb4\\xc5R\\xd4L\\xc3\\xe1\\x19O\\xd9!\\xf5\\xeb\\xe8\\x8d5)\\xc7\\x14H\\'\\x10\\x8d\\xf5\\xee\\x12\\x12\\x95\\x99\\xeae\\xbc:\\x80\\x020m7\\xb9w\\x1c\\xec\\x9bd\\x99^\\xcc6=\\xb5\\x1d\\x93YQ\\xa9\\xcb\\xbb|vL\\xe8\\x19%k/9\\x16R\\x9d\\x86l\\x145<\\xb3V\\xeb\\x91\\x94Ii\\xb4\\x9a\\x8by\\xa2B\\x92E\\xa2\\x87\\xb1\\x92\\x9e\\xd6\\xcf{.\\xed\\x12\\xbf\\x1f\\xb1\\xca\\xa81\\xaa\\x94&\\xc6\\xb2\\x9a9\\xc9\\x96\\xc9\\xbfZ\\x86Q!\\x0c\\x177\\r\\xa5\\xa5I2N\\xaa\"Y\\x99\\x11\\xe8c\\xae\\x84f=\\x8dWb\\x95\\xea\\x83U\\x18\\xa1\\xc27\\xdd\\x90L%FhB\\xdcY\\xb8\\xbd\\xd23\\xf6H\\xd4\\xa5\\x1e\\xe9r-y\\x11\\x10lm\\xa4O\\xe5\\xee]\\xc8\\xd2gg\\x92\\x1a\\xda\\xb6C\\x8f\\xe3\\xf9\\x8e=]{\\x9aU9-l\\xd4\\xad\\xabs\\xa8(1\\xdb\\x90\\xecF\\x94\\x93Q\\xafy\\x04^\\xc9\\x1a\\xd2F\\xaeD\\xa4\\xe8Pnl\\xdb\"\\xbe\\xc0\\xfbGW\\xd1\\xe3\\x19\\x81\"\\xf9\\x8a\\xbb\\nB\\xca{\\xf5\\xcc\\xb1K-\\xa5.\\x17x\\xf2\\x94\\xae\\xf0\\xd4\\xc1\\x916\\xb3%\\x92T\\xde\\xa9\"2!\\xdd\\x00&\\xc6\\'\\x8c\\xfeM\\xfd\\xcb\\xb9\\xafj\\x9bA\\xbf\\xce8\\x12t*}\\xa2\\xd1\\xe0R\\x1f\\x98\\x8b\\xe6)\\xea$\\xc5\\xba\\xef\\x92\\xdbg\\x15\\nB\\x0b\\xbfm\\x85)N\\xef8\\xde\\x9c\\xd0\\x925\\x11\\x1e\\xa7\\x1f\\xb0I\\xd6\\xdb,\\xd9\\x02\\xe1\\xd9l\\xef+\\xb1}\\xcc\\xeeS-V\\xc9\\x8b\\xdf\\xcb\\x8a\\xc4\\x99F\\xf3S\\x1cZ\\xd4d\\xb4!\\x0e$\\xd6\\xeaT\\xa2%\\xefj\\xadIF]H\\x03[9\\xcd\\x9a\\xfa\\x97T6\\xc5\\x8cX\\xe6\\xdb%\\xcd1\\xea\\x87\\x8a=\\xad\\xad,\\xc81\\x1d5n\\x92^q\\x85\\xa1\\x06g\\xf1\\x16\\xf2\\x8b\\x9f\\xc49\\xcf\\x18\\xba\\xb5\\xcbr\\xbe\\xce\\xd5L\\xec\\xe3,\\xc6xJC\\xec\\xdbz}+\\x8c\\xc2\\x84i\\xaa}\\x82$\\xbd\\xf7\\x8bA\\xafBJ\\xd3\\xec\\x9e\\xa9\\xd4\\xc8\\xcc\\x88\\xfa\\xec\\x06\\xaa\\xa34\\xc4\\xdf\\xf3\\x89\\x13g\\x17C\\xc2\\xb2\\x9cV\\xe6\\xb37{\\x11\\xb9\\xb9\\xac\\xc7v\\x9d\\x93\\xd9K\\xa6\\x8d\\rJ\\x96\\xf4Yf\\xf3lMa\\x95ho\\x12\\rD\\xa2\\xdd\\xd4\\xcd*3N\\xbaj4m\\xb3dV[K\\xd9\\xde3\\x94Sa\\x99JZ\\xc6\\xb3:\\xcbI\\x15s*\\x96\\xc5\\x8c\\x98\\xac8\\x93u\\xd6#\\x1f\\xdd\\x15\\xa18z$\\xc8\\x94{\\x8a\\xe5\\xa6\\x86}\\x16\\x031\\x85h\\x98\\x89\\xe2]\\xce\\xdbu\\xb1^\\xd1\\'\\xf6\\x7f*\\xd8s\\xe0\\xca\\x93\\x9dG\\xb4(\\xb61W\\x1aKq\\xe3E\\x96\\xb7\\x8d\\xc6\\x96D\\xa4{:r2\\xff\\x00\\x88\\xbf\\x19\\x0e\\x89\\x11\\x8fcu\\xd2r(\\xb7\\xae\\xc6\\'m\"\\xc6r$w\\xd6\\xa3>\\xe5\\xb7\\x14\\x958IN\\xba\\x11\\xa8\\xd0\\x8dOML\\x92E\\xae\\x82Lt\\xa6\\x9bL\\xcc\\xfcP\\x00\\x01\\xb0\\x00\\x00\\x00\\x00\\x01\\x1a\\xcf\\xf4\\x8b\\x8f\\xfeg7\\xfc\\xd9\\x1a\\x00\\xcf\\xd9\\xfe\\x91q\\xff\\x00\\xcc\\xe6\\xff\\x00\\x9b#@\\x1e_\\x17\\xc6\\x8f\\x97\\xde]c\\x84\\x00\\x00< \\x00\\x00*[S\\xfc\\n\\x93\\xf9\\xccO\\xfc\\x96\\x87\\x9a\\xca\\n-+\\xa5Cq\\xc7\\x9anCKeNFyL\\xba\\x92Q\\x19\\x19\\xa1i2R\\x14Z\\xf2RL\\x8c\\x8f\\x99\\x1e\\xa3\\xd3\\xb5?\\xc0\\xa9?\\x9c\\xc4\\xff\\x00\\xc9h\\x7f\\x07\\xd5\\xc1\\xfe\\x08\\xf9\\xcf\\xd2\\x94\\xab\\x841}\\x98vvcf\\xd9nc}\\xf0\\xe5\\xfc\\xf3\\xb1\\xb09PX\\x93\\x91O\\x96\\x83d\\xe0\\xc7`\\xfd%\\xb7\\\\4\\xba\\xee\\xfbN\\x19-D\\xa3$\\xf7dF[\\xa4I\\xcfq\\n\\x9c\\xef\\x05\\xec!\\x87Wc\\x95v\\xd5\\x19_t\\xdeq\\x06D\\xe1\\x194\\xb2J\\xb7IJ$\\x92KQ\\xb0\\xf6\\x88\\xc4/n\\xb6\\x8d\\x9bJ\\xad\\xa4\\xb1\\x9f\\x1eV\\xc8/+\\x1bz,G\\x1cC\\xb2\\xd6\\xf3f\\xd4t\\x9aH\\xc9N\\xa8\\xb7\\x8d(/h\\xf9\\xe8C\\xa7@f0b\"b\\xff\\x00\\x9a\\xfb\\x97s\\x1ff|\\x0b\"\\xd8fN\\xc5%\\x93\\x16\\xb7\\xd5y\\x9dLkw/%C3z\\x05\\x9b1\\xdbm\\xf8\\x92T\\x94\\xfd\\xcd\\xa3A \\xd9%\\xe8I48\\x82\\xe7\\xa6\\xbd\\x0f\\x962\\xe4\\x9cV\\xe5\\x96[S\\xae\\xb9\\t\\xe4!\\xb4\\x11\\x9a\\x94f\\x83\"\"\"\\xf7\\x98\\xfe\\xe4\\xf8\\xad6kI\"\\x9e\\xfe\\xae\\x1d\\xd5L\\x8d\\xd3z\\x0c\\xf6R\\xf3.n\\xa8\\x94\\x9d\\xe4(\\x8c\\x8fE$\\x8c\\xbf)\\x10\\xaaPv}\\xd9\\x96+q\\x16\\xda\\x9bg\\xf8\\xddU\\x9cUo\\xb12\\x1d[-:\\xd2\\xb4\\xd3T\\xa9)##\\xd0\\xcf\\xdc7M3De\\x8e\\x05\\xd8%\\xe6\\xca2[N\\xc4\\x9b)\\xa9\\x81GfV\\xb8\\xeah\\xedlq\\xf8\\xae.\\x04\\xf9\\x08\\x8eh\\\\\\x86Pz\\xa1M\\xbf\\xa9\\xa9e\\xcc\\x94KAi\\xedh=Q\\xb0=\\x9e\\xe5\\xd8\\xbeiu3\\x00\\xda\\xc5\\x99|\\x14\\xcd[\\xc9\\xc8\\x9d\\xb1z\\xc2C+\\x90\\x97M\\x10\\xda\\x94\\xf9\\xafy\\x97\\x1am\\xd3RH\\xb9\\x91n\\x9a\\xb9\\x90\\xeb\\x00\\x19\\xd8\\xc7k\\x17q\\xa2W\\xb4k\\x8cW\\x1a\\xba\\xb8\\xa8\\xca\\xb2*,;hQl+\\xde\\xb4\\xa96\\xaf\\xa5T\\x947Y[\\x8e\\xc5JIn)\\xb7d\\x19jH%\\xad)5n\\xeb\\xef\\xf1\\xe74\\x99N\\xd0\\x8bo\\x17\\xd5\\x98FH\\xd32\\xe7b\\x93\\xebbN\\xaf\\\\y6,\\xc2y\\x0e>l\\xb6\\xbd\\x0c\\xd4ImZ \\xf4W\\xde\\x91\\x91\\x19\\xe8;\\\\\\x06v7\\x8bL\\xfeZ\\xc5\\xdc\\xf8\\x9b\\x99\\xf8\\xe7i\\xa4e/b\\xb9$\\x9a\\x0c\\xbb\\x16\\xac\\x81\\x1a\\\\J\\xa7]\\xf4)\\x08\\x92\\xfa\\xd4\\xdc\\xb4\\xa4\\xb5\\x8f\\xa2$!Fk\"\"\\xddQk\\xa9\\x19\\n\\xf6\\xc8Y\\xbd\\xa2\\xda\\xeb\\xd8\\xee\\x15K\\x98\\xd1l\\xe6SVO\\xda\\xd7e5\\xa6\\xc4J\\xa9\\x8ap\\x94\\xd2\\xeb\\x9fW5\\xa5\\xd5\\xad\\xc5\\x1biR\\xd2D{\\xc5\\xbag\\xa1u \\r\\xec\\xf5\\xbd\\xcb\\xb8\\xaff\\xd8\\xa5\\x95\\xaff^\\xce\\x15\\xadTI\\x9d\"\\x875\\x8a\\x9b6\\x18\\x8e\\xa7}\\x0c\\xe2\\xbd1\\xb7\\x96\\xe1\\x11\\x1e\\xe2P\\xe2y\\xa8\\xf4\"3/\\xc6-\\xbbq\\xc3e\\xd5v\\x8ac3\\xb6\\xa9\\xcem\\xf1+\\x0cm\\xba\\x94H\\xc0\\xe6MnT\\x19MHq\\xdd\\xd7Z\\x88\\xe2\\x1cSN%\\xddI^\\xd1\\x12\\x91\\xcc\\x8b]GJR\\xe3U\\xd8\\xeb\\xb6K\\xae\\x8cQ~\\x11\\x96\\xa9\\xd2R\\x95\\x1e\\xea\\x9eRRKY$\\xcfD\\x9a\\xb7H\\xcfM\\x08\\xd4f\\xa3\\xe6\\xa33\\x93\\x19\\x8c\\x18\\xcbo\\x97b\\xee=\\xcf\\xb0\\xaa\\x8c\\x17\\x0b\\xc6n0\\xd7\\x17\\xban#\\xdb;V9)\\xc4\\xc2^\\xb5\\xb3\\t\\xf6\\x1f$\\xcaI\\xe8\\xa6RimE\\xbc\\xa2\\xfb\\xed\\x0b\\xe3\\x1fX\\x95|x[\\x0f\\xcc6e\\x9c\\xe0\\x19\\xb4\\xd3\\xad\\xb7\\x9f5k\\xa6\\xaby^\\x90\\xdb\\x96\\xca~<\\x88O\\xa4\\xc9+Z;\\xe6\\xdd\\xddI\\x9a\\x88\\x9a_\\xb2zn\\x9f]\\x00\\x91\\x83\\x11\\x16\\xbf\\xe7\\xe4\\x17s\\x96\\xcd\\xb6\\x85\\x9bSl\\xb2|\\xdc\\xb3\\x12\\xcasfZ\\xc9[\\x81H\\x99u(b\\xdeEy\\xa9\\xae\\xee\\\\\\xa8\\xfa$\\x92m\\xac\\xd7\\xaa\\xcd)3$%F\\x94\\xeaj\\x1b\\xeeE\\x05\\xcb<~\\xce\\x1b:w\\xd2\"\\xba\\xca7\\x8fB\\xdeR\\x0c\\x8b_\\xfb\\x98\\x90\\x01\\xd6\\x9am\\x16\\x99G#c\\x14\\x19\\x14\\xdd\\x8b\\xf6k\\xaa{\\x15\\xbc\\xaf\\xb3\\xc5r\\x9a\\xe8\\x96\\xd1eAY*1G\\x81)\\xa7\\x1f3-K\\xb85\\x1atw]\\xd3\\xdeO=LI\\xec\\x92=\\xe5.\\xd6\\xa4c\\x98e\\x16_M\\xb3\\x89l\\xd9=kY\\x95\\xd6\\x9b\\x10\\xea\\xe5\\xa9\\xc2Sk\\xafy\\\\\\xd6\\x87V\\xb7\\x14m\\xa5KI\\x11\\xef\\x16\\xe9\\x9e\\x85\\xd4\\xc09\\xc6\\x15\\xad\\xaa\\xdd\\xcc\\x1b\\x1c\\xc8$\\xe3=\\x91-q\\x9c\\x8f\\x1d\\xbb\\xc6\\xec\\xf0\\xdcY\\xf8\\x96\\'o\\x01lGuM\\xb2\\xe9)L<~\\xc3\\xc9\\xd1\\x1b\\xdb\\xc83-\\x14\\x9f\\xc65~\\xcdt3\\xb1\\x8e\\xcf{7\\xaa\\xb2J\\x9b\\x9f\\x13\\x1f\\x82\\xdb\\xcd\\xaf\\xef\\x9bWp\\x9dP\\x7f\\x95?{\\xff\\x00au\\xc91\\xba\\xec\\xba\\x99\\xfa\\xabh\\xc52\\xb9\\xf5 \\xde\\x8e\\xa5\\x19%\\xc2J\\xd2\\xb2J\\xb42\\xd5&i\"4\\x9f%\\x16\\xa4ddfBLj\\x9a2\\xccyE\\x8b\\xaa;;\\xca\\xd7\\x94\\x9eK\\xbf\\x8aY\\xe2\\xbf\\x07\\xdc\\xc8\\x82_\\tGK?\\x08\\x927\\x7f\\xda\\xda\\xd0\\xfd\\xb6\\xd7\\xaf%\\x1f\\xbft\\xc5\\x12\\xce\\xbe\\xd6\\x9f\\xb5\\xedU\\xda\\xa9leQ\\\\b*\\xa6M\\x9cF\\r\\xd6\"\\xc9jR\\xe4\\x1a_Q\\x7f$JA\\xe8\\x95\\x1f#W\\xb3\\xef1\\xb4\\x80\\xd4\\xd3x\\x8b\\xca8\\x86&+7\\x17\\xd8\\x96\\x1b)\\xdcw?\\xa8\\xda\\r\\\\\\xfc\\x95\\xeak\\\\j\\x99r]\\x86\\xa7l\\x9fZZ\\x94\\xc2\\x92d\\xa6d$\\xdaQo\\xa3t\\xc9:\\xef#\\x91\\x9e\\x88\\xc5\\xaea\\x8bm\\x7fg\\xb9\\xd6_\\x88\\xdb\\xcb~\\xd7\\x05*kD\\xe3\\x90\\x174\\xa0\\xd9\\x9c\\x86_Z\\x1cC{\\xc6\\x86\\xcf\\xdb\"Y\\xea\\x924\\xe9\\xaf\\xc6:h\\x07(\\xc2\\xb7\\t\\xe5\\xd9n\\xaf\\xed\\x0f\\xf0\\x03&\\xfd\\x19\\'\\xf7J\\x1a[?\\xc9#\\xfb$3M\\xa1\\xfe\\x00d\\xdf\\xa3$\\xfe\\xe9CKg\\xf9$\\x7fd\\x86\\xbcO\\xf1Q\\xf3\\x9f\\xa5-\\xc7\\x07\\xd8\\x00\\x0f\\x9a\\xa0\\x00\\x00\\x0c\\xd3\\x0f\\xfeeg\\xfab\\xcb\\xff\\x005\\xe1\\xa5\\x8c\\xd3\\x0f\\xfeeg\\xfab\\xcb\\xff\\x005\\xe1\\xf4|7\\xf1\\xd7\\xf3\\x8f\\xb9?\\xb5\\x9bm3\\xb3\\x94m\\xa1m\\'\\x1b\\xc9\\xbe\\x1e\\xc8a5\\x12S\\xafNb.Ka\\x15)I\\xc4[(\\xf4d4\\xe9%\\x95o\\x1aMF\\x8d\\xcd\\xe2\\xdf\\xd4\\xcfx\\xc8\\xe8\\x1bI\\x8dq\\x86\\xed\\xb2,\\xcd\\x9bQ\\xe6\\x8d\\xe5S%U\\xc3\\xb7q\\xd8\\n\\x91Au\\x05;\\xa8[\\xaf\\xc9^\\xa4\\xdb\\xac\\xb4j.\\xf3y+3F\\x9a,\\x8fQ\\xd4\\xa05V\\x1cO\\r\\x1c\\xee\\xe2\\r\\xa1B\\xc9i\\xb65\\xb6=\\x9d\\xb7\\x82\\xe5V\\x97\\xd6\\xd9t\\xdbXOW\\xd4\\xba\\xf4G\\xa1\\xc8\\xb0D\\x94:\\x97\\xd2[\\x86d\\x8326\\xc8\\xcddd~\\xce\\x9a\\x99Y\\xf6\\x89\\x83\\xe4S\\xb6\\x7f\\xda\\xca\\xe2\\x8d\\x19\\x1a]qF\\x84\\x91\\x99\\xa4\\x9a=\\xf3=4\\xdd\\xe7\\xee\\xe64N\\xd2\\xf8]\\x82\\xb6\\xb7\\x81\\xe6\\xb2+2\\xcb\\xacR\\x05|\\xea\\xb9\\xcdar\\xe53c\\t\\xc7\\x94\\xd2\\xdb}(\\x8c\\xb4:\\xe3j\\xee\\x8d\\x0bJu\\xd3\\xd93#\\xd0\\x87AVcU\\xd4\\xd6v\\xd3\\xe1F(\\xf2m]D\\x89\\x86\\x85\\x1e\\xeb\\xce\\xa5\\tl\\x9c4\\xeb\\xa1+q\\x08I\\x99\\x16\\xa6HN\\xba\\xe8BLf0c-\\xa7\\xcb\\xb1w$\\xde\\xe0X}N\\x03S\\x7fQ\\x8am^\\x8e\\xeaU\\xd4\\x9bX\\x96\\xf1#\\xc9\\xb2\\xbb\\x833\\xb9\\xf4s}\\xf6\\xdd[\\xab6\\xdei\\xb4$\\xd0\\xb224\\xe8J$\\xfb\\xcb\\xd3\\x88\\xdd\\xe7\\x98\\xd6\\xd0\\xb0}\\xa0\\xed\\x07\\x10\\xba\\x95>\\xd7\\x05\\xf8\\x1e\\xc1\\x18\\xfdb\\xe5\\xae<\\xf4\\xcc\\'\\x92\\x97Zkx\\xda\\xdfmDz\\x9f\\xb0\\x95\\x12\\x88\\xcc\\x88\\x87V\\x80\\xbb+M\\xe2K\\xb8\\xbfdx\\xceU\\x8dF\\xd8\\x1d\\xf5\\x96\\x1b}\\x1d\\xa8W\\x99D[(e\\tJ\\x93]\\xe9\\xf3]\\xf4w^l\\xb9\\x93^\\xe3S\\x85\\xaaI&J\\xd7t\\xc8\\xc5\\xb7\\x11\\x8a\\x8a\\x8am\\xb5`9\\x96\\x03\\x94\\\\B\\xb4\\xbb\\xba\\xb8$\\xc0\\xacq\\xc8\\xd6Pd\\xa8\\x9cB\\x18\\x90FH7\\x8c\\x94i$o\\x12\\x89I\\xf8\\xb4\\xd4u\\x18\\t\\x189xO\\xe5\\xac]\\xcc;.\\xcd\\xb3\\x0ccg\\xdbD~\\xd3\\x16\\xcc\\xf3|>\\x98\\xe2\\xa3\\x1b\\x83{RM^\\xcfmI\"}\\x85\\xb4\\xa4\\xa5N\\xa5\\xa5\\x1aw\\\\Rw\\x94[\\xdf}\\xbaC\\xa6b>rb2\\xf1\\xb4\\xe4sq\\tY\\xb4\\xe9\\x11-\\x1a\\x96\\xbb\\xaa\\xd3R\\xd4\\xbd\\xc6?P\\x1di\\xa6i\\x8b]%\\xc6\\xb8\\xc6%\\x93\\xb5\\xd9\\xc7f\\x18\\xac\\x9cV\\xee-\\xde-\\xb4\\x1a\\xa6g\\xb0\\xe4\\x15\\x99\\x1bMY\\x93\\xcb\\x94\\xda\\x92FK\\x8eM\\xac\\x95\\xde\\x97\\xb2Z+S-\\xd3\\x16\\xbao\\x86\\xb1\\x8e\\xd1\\xc5\\x1b\\x02\\xa1\\xcc+i-\\xee\\xe5\\xbd\\x96\\xc2\\xb9\\xad4\\xd2,\\xbb\\xa5\\x1f\\xc2\\x11$\\xab\\xdc\\xe3\\x8e%\\xbfa\\n2^\\xf1\\x9a\\x90\\x93-GO\\x80\\xe7\\x18V\\xb5\\xa7\\x85\\xbb-\\xd8\\'d\\xd9\\xd3\\xb1\\x1d\\x9f+\\x04\\xbf\\xc7\\xee\\xa8\\xedq\\xd9\\x16\\n\\x936t\\x05\\xb7\\x01\\xf6\\xd55\\xe7\\x10\\xb6$\\x99n:F\\x87\\x12\\xafd\\xf9s\\xd7\\xdc=\\x1d\\x88k%U\\xf6]\\xc2\\x13)\\x0bmR\\x91*{HYhd\\xcc\\x89o>\\xcf\\xec\\xdcA\\xff\\x00\\xdcl\\xb7\\xb4\\xb12JY\\xd5S\\xd0\\xa7`\\xcdeq\\xdfm\\x0e)\\xb3[j#J\\x93\\xbc\\x93#-H\\xcc\\xb5##\\xe6=1b\\xb3\\x06+1\\xa34\\x86#\\xb2\\x82m\\xb6\\x9bI%(I\\x16\\x84\\x92\"\\xe4DDZh5M\\x19f<\\xa2\\xdfOb\\xea\\xb6%\\x95.\\xef2\\xcck\\x15\\x8aY\\xd1\\xa6\\xa9\\xf8\\xed\\x95\\xbc\\xc8\\xe9m\\x8bm\\xf6\\xb7\\xb7\\xd9Y\\x1e\\xab$hH3?w\"\\xfcdX\\xb7j6\\xeci\\xb2\\xa8\\x99\\x1e\\x11K\\x99\\xa7i\\xb1\\xeaN=e\\xa5\\rY\\xcc\\xac\\x9c\\x83x\\xd4P\\'k\\xaa\\x12\\x8d\\xe25o+sp\\x97\\xbcK\\xd7\\x90\\xe9p\\x16\\xaa3Sk\\xa5\\xdc\\xfdO.\\xeb\\x06\\xed7{cs\\x8b\\xdc\\xc9\\x87\\x98\\xd2R\\xc5b\\xc6\\x9e\\x0b\\x92\\xe1\\xc5\\x92\\xc2\\xe4%\\xe6\\xdfq$}\\xd2K\\xbfJ\\x89k\\xd0\\x8d$|\\xf9h\\'\\xbb(\\xe3\\x93\\xb1\\x8d\\x96L\\x8deY\"\\xa6k\\xd9\\x15\\xd4\\x95\\xb3-\\x852\\xe2\\xd2\\xbb\\x17\\xcd\\xb7\\r*\"3%7\\xb8d\\x7f\\x1awL\\xb9h60\\x12\\x9a-7\\xbf>\\xebtS\\xff\\x00\\x87\\x18\\xa7\\xf7\\xb2\\x7fp\\xa1\\xa1\\x8c\\xf1\\xff\\x00\\xc3\\x8cS\\xfb\\xd9?\\xb8P\\xd0\\xc7?\\x17\\xff\\x00\\x0f\\x97\\xde]#\\x84\"\\xf2\\xaf\\xc1\\x8b\\x8f\\xcc\\xde\\xff\\x00A\\x8a\\xa69\\xf8=W\\xf9\\xab_\\xe8!k\\xca\\xbf\\x06.?3{\\xfd\\x06*\\x98\\xe7\\xe0\\xf5_\\xe6\\xad\\x7f\\xa0\\x87O\\x0f\\xfc3\\xf3\\xfb%_\\xb5\\xe1g4\\x84\\xfe{3\\x12KR\\n\\xca-c\\x16\\xabt\\xd2\\x9e\\xe4\\xdau\\xd7ZJH\\xf5\\xd7x\\x94\\xca\\x8c\\xcbM42\\xe6|\\xc8\\xb2\\xbc\\xc7\\xb5\\xee+\\x88\\x9eQ!\\x18\\xf6U{G\\x8d<\\xb8\\x96\\x97\\xd5\\x15\\xc8r\\x0b\\x12Q\\xa1)\\x82Z\\xdcI\\xa9d\\xa3JL\\xd2\\x93JL\\xfd\\xa5\\x173\\x12U\\xbd\\x94voU\\xb5G3h\\xf8\\x9d\\x13RI\\x88\\xfe\\x8f\\x1d\\x15m$\\xe3\\xcbi\\xf7\\x1e\\xf4\\xb4\\xaf\\xdeN\\xa8\\xd6\\x82\\xd7M~\\xe6\\x93\\xd7\\xf1s\\xbe\\xda\\xe8s\\xdd\\x91vu\\xda\\xde\\x15#\\rM\\xa6)\"|\\xbb8Y\\\\{6\\x10\\x84G\\x930\\x9f\\xdcy\\x85\\x1fzn\\xa1K4\\xea\\x94\\x9aU\\xcb\\x99h9bW\\x89M7\\xf9\\xf9\\xb3\\x11\\x0e\\xa6\\x7fn4Q\\xea6\\x99b\\xa8\\x96&\\xc6\\xcf\\xd4\\xf2m\\x12M\\xb7\\xbc\\xf7u\\x11\\x12\\x95\\xdc{z+\\xd8Y\\x11o\\x1a=\\xadK\\x91s\\x117=\\xa3\\xaa!I\\x8b\\x16\\xa3\\x18\\xca2\\xe9\\xcb\\xaeb\\xd6\\\\\\\\~\\x02\\x1f]{\\x0f$\\xd4\\xd7~jq)%\\xa8\\x89FM\\xa0\\xd4\\xb3$\\x99\\x91i\\xa1\\x9eg\\xb4-\\x9f\\xed\\x1a\\xa9\\x8d\\xbd\\xd0\\xe3\\xb8ad\\xb16\\x82\\xcb\\xb2+\\xad\\x13i\\x1e3q\\xd6\\xe5r\"\\xb8\\xd3\\xc8qD\\xbd\\xe26\\xf5A\\xa5&\\x95o\\x11)H-L\\xabs;9\\xd9\\xe3y\\xdc\\xac\\x82\\xcfc\\xd4\\xdb[\\x89{KT\\xca\\x98\\x99\"\\x1bri\\xe6E\\x8a\\x98\\xebF\\xb2\\x0fuL\\xac\\x90\\x83\\xdel\\xcc\\xc9I?d\\xf5#\\x12k\\xc4\\xe1\\x11\\xdb\\xe6Z\\x1b\\x14\\xbe\\xd5x\\xbb\\xb6x\\xe5~?O\\x90f\\x12\\xf2\\x1aU_V\\xb7G\\x11\\xb5w\\xb1\\xd2\\xe1!IWz\\xe3}\\xda\\xc8\\xcc\\xf5%\\xee\\x91i\\xbb\\xae\\xf1\\x92NRWh\\xccr\\x0e\\xcf\\xf2\\xcc\\xaeD\\x0bfQ\\x8c\\xda9K:\\xa9M4sNY:\\x86\\xd0\\xdbi\\'\\r\\n7\\r\\xd6\\x8d\\x07\\xbeDd\\xe2O\\x97\\xc5\\t\\x87\\xec\\xaev7\\xb7Lj\\xf2\\x061\\x0b\\x1d\\xc5\\xe1\\xe0\\xafT\\xaa5s\\x8d\\x13\\x10\\xe695\\x97\\xcd\\x84%;\\xaa2\\xd1.\\x1e\\xf9 \\x92z|Fz\\x0c\\xf3*\\xc2\\xcb(\\xed\\xa7\\x06\\xa6\\xa2\\xc6,\\x9cq\\xf6\\xa2e\\x99<\\x06VK[3\\xe0w\\x8cD\\xdf\\xd0\\xfd\\x83p\\xdd\\x8e\\xad\\xd3-U\\xe8z\\x97\\xb8\\xc5\\x9a\\xb1\"/?#GX$\\xcdI#4\\x9aL\\xcb]\\xd3\\xf7\\x90\\xc1\\xec\\xbbD\\xc3\\xc3s\\r\\xaeK\\xc8\\x15~\\xdd&\\x1b\\x1a\\xa8\\xdd\\xab]dm\\x12R\\x1cy\\x05&;\\x88t\\xdcy.n\\x91\\x9a\\\\$\\x1aI\\x05\\xbaFj2\\x17\\x89\\x1d\\xa2vS\\x12C\\xac?\\xb4\\xdc9\\x97\\xdaQ\\xa1\\xc6\\xdc\\xbf\\x88\\x95!Dz\\x19\\x19\\x1b\\x9a\\x91\\x91\\xfcC\\x13\\xda\\xd6\\xc9\\xb2\\x8d\\xa2\\'m\\xf6X\\xdc\\x06n+\\xb3\\n\\\\m\\x142\\xa3\\xce\\x8em\\xcf\\xee\\x1dy\\xc7M\\n5\\xe8I$8\\x85\\x12\\x95\\xa1(\\x95\\xec\\x99\\x8d\\xe2U6\\xff\\x00\\r\\x7f\\xf2R#\\x9b^\\xc7{A\\xd0\\xda\\xdc\\xdd\\xd5\\xdcU\\xddas*j\\x8e\\xf1\\xc4\\xe4q\\xdb`\\x9d\\xaf#2T\\x94\\x1a\\x1c^\\x89I\\x96\\x8aJ\\xf7V\\x9dKT\\x90\\xcf!\\xf6\\x98\\x9f\\x9cm\\x93e\\x15\\x94\\x94\\xd9&;\\x8a\\xdf\\x9d\\x93\\xcfH\\xbc\\xaci\\x86mXn\\x1a\\x9ce\\xc6\\x94jR\\xd0D\\xa2%\\xe8}\\xda\\x8c\\x8c\\xb9\\x19\\x0f^\\xdc\\xf6\\x17\\x7f\\xb5}\\xa3\\xdf\\xfa*\\x13\\x0e\\x9a\\xd7gV8\\xeam\\x16\\xeawZ\\x9a\\xec\\xa6\\\\i\\nA\\x1e\\xf9\\xa7D(\\xcc\\xc9:hFZ\\xeadB\\t\\xbc[i\\xbbM\\xcd\\xf6W\\xc5\\x1b=V%]\\x8e\\xc5\\xb3\\x87kb\\xcd\\xc4I\\r\\xa9O\\xd7\\xaa:V\\xc2\\x10\\xbd\\xfd\\xc3W\\xbbR%\\x16\\xa5\\xa9hFc\\x9dUb^\\xde~\\xba\\xae\\x8d\\x06\\xa7\\xb5&3mgREI\\x92E\\xc7.&\\xa2\\xbe\\xb3+\\x93^I\\xab\\x9a\\xf3\\x8a\\xdch\\x90\\xbd\\xf3p\\x92\\xe2\\xb4J\\x16\\xb6\\xd2\\x95\\x19\\x96\\x86z\\x90\\xf4\\xd7\\xf6\\x97\\xc6,\\xec\\xab\\xe9\\xd9\\xaf\\xb7\\xe2yW\\xaf\\xd0/\\x1e6Z\\xf4\\xd8\\xce\\xb2[\\xee\\xbe\\xea{\\xcd\\xd2a-\\x1a\\x1d\\xef\\tFF\\x97\\x11\\xa6\\xaaQ$c\\xfb\\x07\\xec\\xf8\\xac\\x11\\xec[\\x1c\\xc96\\t\\x8b\\xca\\x99F\\xe16\\xe6\\xd0\\x19z\\x11\\xa6A2Fl\\xcaCfF\\xff\\x00|f\\x96\\xf5%\\x11h{\\xca\\xde\\xf7\\x10\\x98\\xc7\\xb6k\\xb4\\x8a\\xbd\\xb6/l\\xefPG;+\\xc9j\\xa3\\xb1\\xc5R\\xe4R~\\x1d*M)\\x8f \\xa4\\x12\\xb7U!*o\\xbdq$\\xe2\\x89Hp\\x90\\\\\\xdbHEx\\x96\\x89\\x9f\\xa1hhq6\\xdf\\x11\\xac\\x9bjv\\xd6\\xf6-V\\xe0\\xd8!5\\x01\\xf7\\xcd\\x1b\\xdd\\xe4\\xae\\xe5/\\xc9Y\\x99\\x11\\xa8\\xf7R\\xeb-\\xa5\\t\\xe6j\\xdf\\xfb\\xe34\\xe9\\xe5\\xa7\\xedMMak\\x1a\\xbe~\\x1f\\x98c\\x92e\\xd5\\xcc\\xb8\\x88WU\\xcd0R#FBV\\xe2\\x93\\xa3\\xa7\\xa1\\xe8\\xb4\\xe8\\x85h\\xa2\\xd4\\xb7\\x89:\\x91\\x8c\\xc5\\xed\\x8f\\xd9\\xe6\\x18\\xf7i\\x9d\\x9b\\xc6y\\xa8\\xb7v\\xb9\\x13Y\\x1dk\\x92\\xb5\\xee\\x9dK\\xec\\xc6u\\x83W\\xc7\\xdd\\x9b\\xd1\\x1dh\\xcf\\x9e\\x9b\\x8b\\xe4zh\\x17VY\\x86\\xd36\\xfd\\xb3\\xea\\xcc\\xb7\\x0bs\\x03\\x99\\'\\x14\\xc8\\xe1\\xa4\\x9c\\xb2\\x8f9.\\xad\\xc4CJ\\xd4\\x83eG\\xa2\\x12{\\xbaoh\\xa3\\xd7\\xefKNs=q\\xf9\\xe7b\\xd0\\xdc\\xd8\\xdb\\x95\\x0c\\x8am\\x99Y\\xa6%\\x891\\xb4\\x050\\x9a\\xb4\\x9bm\\xef3\\xde\\xc4\\\\\\xa4\\xf7\\xe5\\xbf\\xa2tB\\x0c\\x8ft\\xd7\\xedh\\\\\\xcb\\x98\\xaf\\xc6\\xedM\\x8cJ\\xb0\\x88\\xb2\\xa5\\xc9\\x1b\\xc5\\xe6OMl\\\\\\xc1u\\xe4U\\x0f>\\xa7;\\xa4\\x12\\\\\\xdf\\xef7\\x14\\xe6\\x88K\\xa6\\xd96fe\\xa2\\xb421\\x97\\xe3\\xdb>\\xdam\\x9c\\r\\x82\\xe36\\xd8!\\xd4W`o\\xb7\\x1e\\xd6\\xd8\\xad\\xe2\\xb8\\x97\\x90\\xd5k\\xd1\\x12\\xf3\\x08J\\xcdf\\x85\\x1a\\x88\\xcfx\\x92\\xb25$\\xb7\\x0c\\xb7\\x94^\\x1d\\x88vp=\\x9e\\xab\\x1f\\xc52-\\x83\\xe3\\x17n\\xd4J6\\xcfh)v\\x11&C\\x08Z\\x94\\xd4\\x83iDo\\xf7\\xe4[\\x84i2\\xd3R3%\\x8b\\x9f\\x12mh\\xed>_\\xec\\xb45W\\xbbVS\\xa8\\xb37k\\xf0\\xcc\\xbe\\xe2\\x16!*|;\\x89\\xd0\\xa1\\xc7\\xeeYr\"\\x14\\xb7\\t&\\xb7\\xd2no\\x12}\\x92A\\x19\\xfbI\\xde$\\xef\\x10\\xbaK\\xdb\\x1d\\x0bY\\x06%Q\\x11\\x13-\\x9f\\xc9\\xab\\xa4\\xdb\\xc3v\\xbd\\xa4\\xb8\\x84De\\xb4,\\xdds\\xda%\\x11(\\xddm\\t\\xdd%\\x19\\xa9dZ\\x11je_\\xd8\\x86\\x13a\\x84\\xd5\\xed!9,&\\xa1F\\xb5\\xcb\\xad\\xad\\x9a\\xef\\x9dmhv\\x1b\\xcb#C\\x8a\\xd1FI#I\\x1e\\xa4\\xad\\x0c\\x8b\\xdeD2\\x9e\\xc6\\x98t\\xc8\\xb33k\\xf2\\x98\\xc5\\xf5U\\x03\\x8fa\\x98\\x84\\x96\\xde\\xd5\\x0e\\xd6F\\x90\\xeb\\xa4d\\xe1jFF\\xa7[h\\xd4Z\\xff\\x005/~\\x9c\\xf5\\x15Wx\\x89\\xf8\\xa3\\xa2\\xf6q\\x9dF\\xda^\\x13U\\x93D\\xad\\xb3\\xa8\\x8f`\\xda\\x9cD+\\x98\\xde\\x8d-\\xa2%\\xa9:8\\xde\\xa7\\xbb\\xae\\xee\\xa5\\xcc\\xf5##\\xf8\\xc5Z\\x97oU\\xb9.\\xd0\\xed1Z\\x8cs#\\xb3Ed\\xe7+&\\xde\\xc7\\x84\\x83\\xadbZ\\x19\\'V\\xca\\x9c7\\tdd\\x93Ionn\\xef)%\\xbd\\xa9\\x90\\xb4\\xec\\xe2\\xd7%\\xbc\\xc2j\\xa7f\\x14L\\xe3Y#\\xcd\\xa8\\xe6UG\\x96\\x99H\\x8e\\xa2Z\\x88\\x88\\x9cO%j\\x92I\\xf2\\xf7k\\xa6\\xa7\\xa6\\xa3\\x13\\xb7\\xc0\\xb3)\\x1d\\xa2\\xebr\\x0cc\\x0b{\\x0e`\\xadIW\\xb9#7m*\\x15\\xedrYRw]\\x84\\x93\\xde9\\x1b\\xdb\\x84\\x95\\xa9\\x1a\\xa7w\\xef\\xcc\\xb9\\x16\\xea\\xaa\\xa8\\x8af>\\x83\\xfb\\xb3^\\xd5\\xd3\\xec\\xb6e\\x9efY\\x8e\\x17yU]\\x8c\\xca\\xb3R\\xdfa\\x98\\xdb\\x8e3\\x1eB\\xdbLt\\x11IQ\\x9c\\x84\\xa5$K\\xd7u\\x06\\xa2V\\xea\\x8c\\xb4\\x16,\\xdfn\\xd3\\xe3l\\x8a\\xd7h5T\\x175\\x15\\x94N\\xc7\\xb0Z.b\\xb4\\xdf\\xc2\\xb5\\xc6\\xa4\\x9b\\xeai$\\xb58\\x8f\\xb9)KN\\xf9!D\\xb4\\'T\\xe9\\xbcG\\x9cZ\\xec\\xbfh\\xcdlsm;2g\\rT\\xc6\\xee^\\xba\\xb0\\xa5\\xbcf\\xce134\\xe5\\xc87\\x9a`\\xdbZ\\xd2\\xb6\\xd6]\\xea\\x88\\xcdDH-\\xcf\\xbe=Hi\\xdd\\xa9\\x19}\\xbe\\xcb\\xd9\\x8d[,\\x1b\\xb665I\\xa7\\x8b\\x19\\x1a\\x1a\\x9c\\x93 \\xd1\\x1d\\xa6\\xd3\\xf8\\xcc\\xd6\\xe2H\\x87(\\x9a\\xf2\\xcd\\xe7\\x84w].\\xd8\\xa3Ijdf\\xa40\\xe2]a\\xd4\\x13\\x8d\\xb8\\x93\\xd4\\x94\\x93-H\\xcb\\xfa\\xc8V\\xb6k\\xb4\\x18\\xdbN\\xc5[\\xbd\\x89UoL\\xc2\\xdfy\\x82\\x8bw\\x0c\\xe2\\xc9#m\\xc5 \\xd4h3?d\\xcd&dz\\xfb\\x8f\\xe2=H\\xa5\\xb1zs\\xc7q\\x9a\\x8a\\xa3s\\xbd80\\xd9\\x8an\\x7f\\xcd\\xb8\\x82N\\xbf\\xf7\\xd0D\\xec\\xd2\\xe3*\\xbd\\xc5[\\x97\\x99\\xe3\\xd1\\xf1{\\xc3}\\xe4.\\xbe4\\xd4\\xcbB[K\\x8a&\\xd7\\xde\\'\\x97\\xb4\\x82J\\xb4\\xf8\\xb5\\xe7\\xa7\\xb8\\xbd7\\x9b\\xc3,\\xfe\\x1e\\xd3\\xefb\\xedSm\\x15\\xef\\x94\\xcbz\\xacf\\xb2\\xa6Uue|v\\x0eBV\\xf32\\x14\\xf6\\xe1\\xac\\xd0KR\\x8d\\xb4\\x1e\\x8e/B\\xdd\\xe5\\xa6\\xa6!0\\xce\\xd40Z\\xc4\\xb0Hg\\x03,\\xda\\x16Gq\\x8e\\xb3~\\xfb\\xb5t\\xec7!1T\\xad\\xc2\\x90\\xfb)x\\x90\\x83R\\xf5.\\xed\\xa5,\\xf5#\\xd0\\x84\\xe4<#(\\xa4\\xdb\\xde\\xd1m\\x9b\\xa8D\\xdco-\\xa3\\x84\\x86\\xec\\x9b\\x94\\xdaU\\x16LT<\\xdfr\\xb6\\x94d\\xa3\\xdf\\'\\x89D\\xb4\\xeaE\\xbad~\\xf1\\x8c\\xdb\\xecc?\\x89\\xb1=\\x9a\\xd0Ul\\xf2K{B\\xa3\\xc6\\x98\\x89\\x0f.\\xae\\xc8c\\xc3z\\x92ihKi\\xed\\x17\\xf7h\\xfe\\xc9)IOx\\x95je\\xbb\\xaf\\xb4<\\xd35\\xc7\\x0f?\\xaf\\xb3Z6*\\x1d\\xb9\\xdf\\xd9v\\x91\\xcbv~\\xee!h\\xed%\\\\Z\\xf5\\xb3g\\x1d\\xb8\\xe4\\x86\\x14\\xf1H5\\xbc\\xfa\\x95#x\\xdbWv\\x94\\xa3q\\x06\\xadP\\xbd\\xe2\"23\\xdb\\x06\\x19_\\x8d\\xe78Gh[\\x1c\\x85\\xbcp\\xb2z\\\\\\xa6\\xa2\\xa6\\x04\\xfb8sX\\x8f\\xf0k\\xf1V\\xf98\\xe2\\xdaqD\\xa5\\xb6\\xa4\\xbf\\xbc]\\xde\\xf1\\xfb&Z|cs\\x1d\\xb0\\xefi\\xbf4\\x90\\x00\\x07TF\\xb3\\xfd\"\\xe3\\xff\\x00\\x99\\xcd\\xff\\x006F\\x803\\xf6\\x7f\\xa4\\\\\\x7f\\xf39\\xbf\\xe6\\xc8\\xd0\\x07\\x97\\xc5\\xf1\\xa3\\xe5\\xf7\\x97X\\xe1\\x00\\x00\\x0f\\x08\\x00\\x00\\x08\\xec\\x86\\x866KP\\xfdt\\xb5:\\x96\\x1d4(\\xd4\\xca\\xf7VF\\x95\\x12\\x92d\\x7f\\x16\\x86\\x92\\x15\\xff\\x00V\\x91\\xbcn\\xf3\\xad\\xfb\"\\xe2\\x03\\xbd\\x18\\xf8\\x98q\\x96\\x9a\\xad\\x0bu;\\xd5\\xa4o\\x1b\\xbc\\xeb~\\xc8z\\xb4\\x8d\\xe3w\\x9do\\xd9\\x17\\x10\\x1b\\xde\\xb1\\xba\\x8b\\xa9\\xde\\xad#x\\xdd\\xe7[\\xf6C\\xd5\\xa4o\\x1b\\xbc\\xeb~\\xc8\\xb8\\x80oX\\xddE\\xd4\\xefV\\x91\\xbcn\\xf3\\xad\\xfb!\\xea\\xd27\\x8d\\xdeu\\xbfd\\\\@7\\xacn\\xa2\\xeaw\\xabH\\xde7y\\xd6\\xfd\\x90\\xf5i\\x1b\\xc6\\xef:\\xdf\\xb2. \\x1b\\xd67Qu;\\xd5\\xa4o\\x1b\\xbc\\xeb~\\xc8z\\xb4\\x8d\\xe3w\\x9do\\xd9\\x17\\x10\\r\\xeb\\x1b\\xa8\\xba\\x9d\\xea\\xd27\\x8d\\xdeu\\xbfd=ZF\\xf1\\xbb\\xce\\xb7\\xec\\x8b\\x88\\x06\\xf5\\x8d\\xd4]N\\xf5i\\x1b\\xc6\\xef:\\xdf\\xb2\\x1e\\xad#x\\xdd\\xe7[\\xf6E\\xc4\\x03z\\xc6\\xea.\\xa7z\\xb4\\x8d\\xe3w\\x9do\\xd9\\x0fV\\x91\\xbcn\\xf3\\xad\\xfb\"\\xe2\\x01\\xbdcu\\x17S\\xbdZF\\xf1\\xbb\\xce\\xb7\\xec\\x87\\xabH\\xde7y\\xd6\\xfd\\x91q\\x00\\xde\\xb1\\xba\\x8b\\xa9\\xde\\xad#x\\xdd\\xe7[\\xf6C\\xd5\\xa4o\\x1b\\xbc\\xeb~\\xc8\\xb8\\x80oX\\xddE\\xd4\\xefV\\x91\\xbcn\\xf3\\xad\\xfb!\\xea\\xd27\\x8d\\xdeu\\xbfd\\\\@7\\xacn\\xa2\\xeaw\\xabH\\xde7y\\xd6\\xfd\\x90\\xf5i\\x1b\\xc6\\xef:\\xdf\\xb2. \\x1b\\xd67Qu;\\xd5\\xa4o\\x1b\\xbc\\xeb~\\xc8z\\xb4\\x8d\\xe3w\\x9do\\xd9\\x17\\x10\\r\\xeb\\x1b\\xa8\\xba\\x9d\\xea\\xd27\\x8d\\xdeu\\xbfd=ZF\\xf1\\xbb\\xce\\xb7\\xec\\x8b\\x88\\x06\\xf5\\x8d\\xd4]H\\x9b\\xb2\\x88\\x160\\xdf\\x8b&\\xde\\xe9\\xe8\\xef\\xb6\\xa6\\x9cmSy)*-\\x0c\\x8f\\x97\\xc6F.\\xc9I%$E\\xee\"\\xd0\\x7f@s\\xc4\\xc6\\xc4\\xc5\\xb4W7\\xb1p\\x00\\x07\\x14\\x00\\x00\\x00S\\x95\\xb3\\x08\\x05\"K\\x8c\\xd9\\xdb\\xc5L\\x87\\xdd\\x92\\xa6\\x98\\x97\\xba\\x82[\\x8b5\\xafB\\xd3\\x96\\xaaQ\\x9f\\xfd\\xc5\\xc4\\x07Z1k\\xc2\\xbeI\\xb2\\xddN\\xf5i\\x1b\\xc6\\xef:\\xdf\\xb2\\x1e\\xad#x\\xdd\\xe7[\\xf6E\\xc4\\x07]\\xeb\\x1b\\xa8\\xba\\x9d\\xea\\xd27\\x8d\\xdeu\\xbfd=ZF\\xf1\\xbb\\xce\\xb7\\xec\\x8b\\x88\\x06\\xf5\\x8d\\xd4]N\\xf5i\\x1b\\xc6\\xef:\\xdf\\xb2\\x1e\\xad#x\\xdd\\xe7[\\xf6E\\xc4\\x03z\\xc6\\xea.\\xa7z\\xb4\\x8d\\xe3w\\x9do\\xd9\\x0fV\\x91\\xbcn\\xf3\\xad\\xfb\"\\xe2\\x01\\xbdcu\\x17S\\xbdZF\\xf1\\xbb\\xce\\xb7\\xec\\x87\\xabH\\xde7y\\xd6\\xfd\\x91q\\x00\\xde\\xb1\\xba\\x8b\\xa9\\xde\\xad#x\\xdd\\xe7[\\xf6C\\xd5\\xa4o\\x1b\\xbc\\xeb~\\xc8\\xb8\\x80oX\\xddE\\xd4\\xefV\\x91\\xbcn\\xf3\\xad\\xfb!\\xea\\xd27\\x8d\\xdeu\\xbfd\\\\@7\\xacn\\xa2\\xeaw\\xabH\\xde7y\\xd6\\xfd\\x90\\xf5i\\x1b\\xc6\\xef:\\xdf\\xb2. \\x1b\\xd67Qu;\\xd5\\xa4o\\x1b\\xbc\\xeb~\\xc8z\\xb4\\x8d\\xe3w\\x9do\\xd9\\x17\\x10\\r\\xeb\\x1b\\xa8\\xba\\x9d\\xea\\xd27\\x8d\\xdeu\\xbfd=ZF\\xf1\\xbb\\xce\\xb7\\xec\\x8b\\x88\\x06\\xf5\\x8d\\xd4]N\\xf5i\\x1b\\xc6\\xef:\\xdf\\xb2\\x1e\\xad#x\\xdd\\xe7[\\xf6E\\xc4\\x03z\\xc6\\xea.\\xa7z\\xb4\\x8d\\xe3w\\x9do\\xd9\\x0fV\\x91\\xbcn\\xf3\\xad\\xfb\"\\xe2\\x01\\xbdcu\\x17S\\xbdZF\\xf1\\xbb\\xce\\xb7\\xec\\x87\\xabH\\xde7y\\xd6\\xfd\\x91q\\x00\\xde\\xb1\\xba\\x8b\\xa9\\xde\\xad#x\\xdd\\xe7[\\xf6C\\xd5\\xa4o\\x1b\\xbc\\xeb~\\xc8\\xb8\\x80oX\\xddE\\xd4\\xefV\\x91\\xbcn\\xf3\\xad\\xfb!\\xea\\xd27\\x8d\\xdeu\\xbfd\\\\@7\\xacn\\xa2\\xeaw\\xabH\\xde7y\\xd6\\xfd\\x90\\xf5i\\x1b\\xc6\\xef:\\xdf\\xb2. \\x1b\\xd67Qu;\\xd5\\xa4o\\x1b\\xbc\\xeb~\\xc8z\\xb4\\x8d\\xe3w\\x9do\\xd9\\x17\\x10\\r\\xeb\\x1b\\xa8\\xba\\xadW\\xb3\\xd8U\\x96\\xf1lN}\\x94\\xc7\\xe2\\xef\\xf7I\\x97\\'}\\t5$\\xd2g\\xa6\\x9f\\x88\\xccZ@\\x07\\x1a\\xf1+\\xc5\\x9b\\xd77\\x11yW\\xe0\\xc5\\xc7\\xe6o\\x7f\\xa0\\xc5S\\x1c\\xfc\\x1e\\xab\\xfc\\xd5\\xaf\\xf4\\x10\\xbd\\xc8a\\xb9L8\\xcb\\xa8\\'\\x1aq&\\x85\\xa0\\xfd\\xca#-\\x0c\\x85];*\\xc5P\\x92JjP\\x94\\x91hDN\\xb8DE\\xff\\x00\\xf2\\x1e\\xac\\x0c\\\\:(\\x9ak\\xbf\\x1f\\x84_\\xef\\t\\xa4\\xc5\\x9f#\\xc5wE[\\x92\\xd5H\\xac\\xb7\\xaf\\x8bk[%;\\x8f\\xc3\\x9a\\xca^e\\xd2\\xd7]\\x14\\x85\\x11\\x92\\x8bR#\\xe6_\\x10\\x90\\xf5[\\x8b\\xf8R\\x7f\\xc6s\\xeb\\x07\\xaa\\xdc_\\xc2\\x93\\xfe3\\x9fXv\\xdb`s\\x9fH\\xf7L\\xb1\\xcd\\xf0\\x03\\xef\\xd5n/\\xe1I\\xff\\x00\\x19\\xcf\\xac\\x1e\\xabq\\x7f\\nO\\xf8\\xce}av\\xd8\\x1c\\xe7\\xd2=\\xcc\\xb1\\xcd\\xf0#\\xeb1\\xda\\x9aIS\\xe5WVC\\x81&\\xc1\\xde\\xfec\\xd1c\\xa1\\xb5\\xc9sM7\\xdc4\\x91\\x1a\\xd5\\xf9OS\\x12~\\xabq\\x7f\\nO\\xf8\\xce}`\\xf5[\\x8b\\xf8R\\x7f\\xc6s\\xeb\\t\\xb6\\xc0\\xe7>\\x91\\xeee\\x8eo!\\xd6\\xc3338\\xac\\x19\\x9f\\xc6m\\x97\\x90\\xfd\\xd2\\x92BI)\"JH\\xb4\"\"\\xe4D?OU\\xb8\\xbf\\x85\\'\\xfcg>\\xb0z\\xad\\xc5\\xfc)?\\xe39\\xf5\\x83m\\x81\\xce}#\\xdc\\xcb\\x1c\\xdf\\x00>\\xfdV\\xe2\\xfe\\x14\\x9f\\xf1\\x9c\\xfa\\xc1\\xea\\xb7\\x17\\xf0\\xa4\\xff\\x00\\x8c\\xe7\\xd6\\x17m\\x81\\xce}#\\xdc\\xcb\\x1c\\xdf\\x00>\\xfdV\\xe2\\xfe\\x14\\x9f\\xf1\\x9c\\xfa\\xc1\\xea\\xb7\\x17\\xf0\\xa4\\xff\\x00\\x8c\\xe7\\xd6\\r\\xb6\\x079\\xf4\\x8fs,sx\\xfe\\r\\x89\\xf0\\x91Xz+>\\x9eMw\\x1e\\x93\\xb8]\\xe7w\\xae\\xf6\\xe6\\xf7\\xbfw^z{\\xb5\\x1f\\x9c\\x8a*\\xd9v\\xf0\\xed_\\xaf\\x8a\\xf5\\xa46\\xdcj4\\xd7\\x19J\\x9ea\\x0en\\xf7\\x89B\\xcc\\xb7\\x92J\\xdcF\\xf1\\x11\\xf3\\xdd-}\\xc4$=V\\xe2\\xfe\\x14\\x9f\\xf1\\x9c\\xfa\\xc1\\xea\\xb7\\x17\\xf0\\xa4\\xff\\x00\\x8c\\xe7\\xd6\\x13m\\x81\\xce}#\\xdc\\xcb\\x1c\\xdf\\x00)\\x9b\\\\\\xc1))q\\xea\\x97\\xa0D\\xf47]\\xc8)\\xe3-i}\\xc2\\xdei\\xdb\\x06\\x1bq\\x1f}\\xeeR\\x14\\xa4\\xff\\x00\\xdc]\\xbdV\\xe2\\xfe\\x14\\x9f\\xf1\\x9c\\xfa\\xc2\\xed\\xb09\\xcf\\xa4{\\x99c\\x9b\\xcb:\\x0ckHR!\\xcc\\x8e\\xd4\\xb8r\\x1bS/G}\\x04\\xb6\\xddB\\x8bE%I>FFFdd|\\x8c\\x8c|\\xd6U\\xc2\\xa4\\xaf\\x8f\\x02\\xba#\\x10 \\xc7A6\\xcch\\xad%\\xb6\\x9bI{\\x92\\x94\\xa4\\x88\\x88\\xbf!\\x0fg\\xaa\\xdc_\\xc2\\x93\\xfe3\\x9fX=V\\xe2\\xfe\\x14\\x9f\\xf1\\x9c\\xfa\\xc2m\\xb09\\xcf\\xa4{\\x99c\\x9b\\xe0\\x07\\xdf\\xaa\\xdc_\\xc2\\x93\\xfe3\\x9fX=V\\xe2\\xfe\\x14\\x9f\\xf1\\x9c\\xfa\\xc2\\xed\\xb09\\xcf\\xa4{\\x99c\\x9b\\xe0y\\xe6V\\xc4\\xb1\\\\eJ\\x8c\\xcc\\x95Ft\\x9f`\\xddA+\\xbap\\x88\\xc8\\x96\\x9d}\\xca\"R\\x8bR\\xe7\\xcc\\xc7\\xaf\\xd5n/\\xe1I\\xff\\x00\\x19\\xcf\\xac\\x1e\\xabq\\x7f\\nO\\xf8\\xce}`\\xdb`s\\x9fH\\xf72\\xc77\\xc0\\x0f\\xbfU\\xb8\\xbf\\x85\\'\\xfcg>\\xb0z\\xad\\xc5\\xfc)?\\xe39\\xf5\\x83m\\x81\\xce}#\\xdc\\xcb\\x1c\\xdf\\x00>\\xfdV\\xe2\\xfe\\x14\\x9f\\xf1\\x9c\\xfa\\xc1\\xea\\xb7\\x17\\xf0\\xa4\\xff\\x00\\x8c\\xe7\\xd6\\r\\xb6\\x079\\xf4\\x8fs,s|\\x00\\xfb\\xf5[\\x8b\\xf8R\\x7f\\xc6s\\xeb\\x07\\xaa\\xdc_\\xc2\\x93\\xfe3\\x9fX6\\xd8\\x1c\\xe7\\xd2=\\xcc\\xb1\\xcd\\xf0\\x03\\xef\\xd5n/\\xe1I\\xff\\x00\\x19\\xcf\\xac\\x1e\\xabq\\x7f\\nO\\xf8\\xce}`\\xdb`s\\x9fH\\xf72\\xc74S?\\xd2.?\\xf9\\x9c\\xdf\\xf3dh\\x02\\x02\\x9f\\x04\\xa2\\xa0\\x9eS`W\\xa1\\x89IB\\x9b\\'w\\xd4\\xa3$\\x9e\\x9a\\x91jg\\xef\\xd0\\xbf\\xf8\\x13\\xe3\\xc9\\xe21)\\xc4\\x9arp\\x88\\xb6\\xbf9\\x9f>my\\x00\\x00<\\xa8\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xcfv\\xe1\\xbb\\xc2\\xf4\\x9b\\xfa\\xe9\\xc5\\x14^\\xe3\\xd3\\x9f\\xc2q\\xb4\\xf8\\x8f\\xe3\\xff\\x00\\xfe/x\\xd0\\x86\\x7f\\xb6\\xc5\\x1aq\\x8aC#Q\\x7f\\xf8\\x9a\\x8c\\xbd\\x95\\xee\\xff\\x00\\xf9\\x9co\\x8f\\xff\\x00\\xf5\\xf1\\xfb\\xbe1\\xa0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0c\\xf7m\\xe9%b\\xf4\\x84e\\xa9q=\\x11\\xfd\\xf1\\'\\xff\\x00\\xcc\\xe3|g\\xfe_\\x1f\\xb8hC=\\xdbz\\x94\\x9c^\\x90\\xd2\\xad\\xd3\\xe2z\"\\xd7]9|\\'\\x1bR\\xff\\x00\\xe3\\x90\\xd0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00S\\xf3\\xbbkX\\x96Tpk&\\xa2\\x01\\xcc[\\xdd\\xeb\\xaa`\\x9d=\\x10\\x8d\\xe2\"#\\xfc\\xa27w)\\xf9\\xce\\x9f\\xd5\\xcd\\xf9\\x8fe\\x1e\\x16k\\xa6+\\x9a\\xa2/\\xf3\\xf9|!t\\xf8\\xcbB\\x01\\x9e\\xee\\xe5?9\\xd3\\xfa\\xb9\\xbf0\\xdd\\xca~s\\xa7\\xf5s~c{\\xa7\\xf7\\x8e\\xfe\\xc9\\xa76\\x84\\x03=\\xdd\\xca~s\\xa7\\xf5s~a\\xbb\\x94\\xfc\\xe7O\\xea\\xe6\\xfc\\xc3t\\xfe\\xf1\\xdf\\xd8\\xd3\\x9bB\\x01\\x9e\\xee\\xe5?9\\xd3\\xfa\\xb9\\xbf0\\xdd\\xca~s\\xa7\\xf5s~a\\xba\\x7fx\\xef\\xeci\\xcd\\xa1\\x00\\xcfwr\\x9f\\x9c\\xe9\\xfd\\\\\\xdf\\x98n\\xe5?9\\xd3\\xfa\\xb9\\xbf0\\xdd?\\xbcw\\xf64\\xe6\\xd0\\x80g\\xbb\\xb9O\\xcet\\xfe\\xaeo\\xcc7r\\x9f\\x9c\\xe9\\xfd\\\\\\xdf\\x98n\\x9f\\xde;\\xfb\\x1ash@3\\xdd\\xdc\\xa7\\xe7:\\x7fW7\\xe6\\x1b\\xb9O\\xcet\\xfe\\xaeo\\xcc7O\\xef\\x1d\\xfd\\x8d9\\xb9gn\\xbbc\\xc9{.\\xdd\\xb7\\x87\\xe4\\xe8\\x95\\x90\\xe0\\xf2,\\xa1\\xdec7N<\\xb7%\\xb4\\x88\\xd2\\xda}\\xea\\xe7\\x96\\xa2Q\\xb9\\xa1$\\xd0\\x85\\xa8\\xcdDKF\\xf1\\x99+\\xee{\\xf7f\\xfe3\\xcdk$m\\';u\\xe83\\xf2\\x06\\x93\\xf0V6\\xd3\\xa6Q\\xaak\\xf5\\xdel\\xb7=\\xcay\\xdeK[\\x8a\\xd5Zn$\\xb7\\x08\\x8d\\x04\\xda\\x0e\\xcb\\xbdj\\xd5D\\xad\\xcbfD\\xbc\\x83\\x12[s\\x99bMr7R\\xf25\\xddW%\\x16\\xa5\\xcc\\xc8\\xd2|\\x94FdddfB\\xd1\\xbb\\x94\\xfc\\xe7O\\xea\\xe6\\xfc\\xc3t\\xfe\\xf1\\xdf\\xd8\\xd3\\x9bB\\x01\\x9e\\xee\\xe5?9\\xd3\\xfa\\xb9\\xbf0\\xdd\\xca~s\\xa7\\xf5s~a\\xba\\x7fx\\xef\\xeci\\xcd\\xa1\\x00\\xcfwr\\x9f\\x9c\\xe9\\xfd\\\\\\xdf\\x98n\\xe5?9\\xd3\\xfa\\xb9\\xbf0\\xdd?\\xbcw\\xf64\\xe6\\xd0\\x80g\\xbb\\xb9O\\xcet\\xfe\\xaeo\\xcc7r\\x9f\\x9c\\xe9\\xfd\\\\\\xdf\\x98n\\x9f\\xde;\\xfb\\x1ash@3\\xdd\\xdc\\xa7\\xe7:\\x7fW7\\xe6\\x1b\\xb9O\\xcet\\xfe\\xaeo\\xcc7O\\xef\\x1d\\xfd\\x8d9\\xb4 \\x19\\xee\\xeeS\\xf3\\x9d?\\xab\\x9b\\xf3\\r\\xdc\\xa7\\xe7:\\x7fW7\\xe6\\x1b\\xa7\\xf7\\x8e\\xfe\\xc6\\x9c\\xda\\x10\\x0c\\xf7w)\\xf9\\xce\\x9f\\xd5\\xcd\\xf9\\x86\\xeeS\\xf3\\x9d?\\xab\\x9b\\xf3\\r\\xd3\\xfb\\xc7\\x7fcNm\\x08\\x06_}?+\\xa8\\xa3\\xb1\\x9e\\x8c\\x91\\x0e.,g\\x1f$*\\xbd\\xb2%\\x1aRj\\xd0\\xf9\\xfeA\\xa4\\xc1yR \\xc7u\\x7f~\\xb6\\xd2\\xa3\\xd3\\xf1\\x99j8\\xe2\\xe0N\\x151U\\xe2by_\\xef\\x10\\xbf\\'\\xee\\x00\\x03\\xcc\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\nFs\\xf8U\\x8a\\x7fjW\\xee\\x88zG\\x9b9\\xfc*\\xc5?\\xb5+\\xf7D=#\\xecS\\xfcX\\x7f)\\xfa\\xcb5|\\x00\\x00\\x06\\x00\\x00\\x00\\x15\\xad\\xa3\\xed#\\x1c\\xd9&\\x1f;)\\xcb,~\\n\\xa1\\x84m\\xa6D\\xbe\\xe1\\xc7\\xb7\\r\\xc7\\x12\\xda=\\x86\\xd2\\xa5\\x1e\\xaaZK\\x91\\x1f\\xbf\\x9f!e\\x1c\\xd1\\xff\\x00\\xd4x\\xcd=\\x8f3\\x83\"\\xd4\\xc9\\xda\\xed\\x0b\\xf1\\xff\\x00\\xb7\\xc7\\x1c\\xf1*\\x9a(\\x9a\\xa3\\xe1\\x04k-\\xb6^\\xd4\\xb1x{G\\x85\\x819g\\xbd\\x96\\xcc\\x84\\xbb\\x16\\xebY\\x8e\\xeb\\x86\\x98\\xe9=\\r\\xd7\\x16\\x94\\x9a\\x1aN\\xa5\\xa1\\x1a\\xd4\\x9dO\\x91jf-C\\x88{8\\xdd\\xdccy^\\xdc+\\xb6\\x91\\x0c\\xea6\\xdbi\\x00\\xef\\xfd1\\x0f\\xef%\\xea\\xe3\\x8f\\xf7\\x16\\xa2\\xa8\\xbe\\xf51\\xd7\\xaa\\x0c\\x92g\\xcfNg\\xbb\\xa9W\\xb6su\\x99a\\x98\\x17f\\xed\\xa2/h\\xd9m\\xfc\\xfc\\xcb \\x85Aq[sdr`\\xbb\\x1eA:\\x924\\xb4e\\xec\\xad\\x04\\xd2O\\x7fSQ\\x99\\x99\\xa8\\xcc\\xccp\\x8c\\x7f\\x8c\\xc7\\xfa\\xd6\\xcdY\\xdf\\xe0?\\xcf\\\\\\x935\\xcdo\\xf6M\\xb5\\xbd\\xb6\\xabi\\xf9\\x0e?\\x91by4\\x9a\\xfa\\xdcr4\\xc4\\xb7U\\x1d\\xa6\\x1fm\\xb4F~)\\x96\\x8e\\xadd\\xb3\\xd5J\\xe6fi\\xfc\\xba\\xc8mCi\\xdbQ\\xc7\\xb3\\x9c\\xabf\\xd8\\xfc\\xfb~%\\xda\\x84z\\xablMo\\xcbuEJN!_\\x08\\xa1+Q\\x9ft\\x84w*2Jt\\xdd%jD\\x1b\\xc4G\\xc3\\xf3_c+\\xb8\\xb3<\\xb2\\xbb\\x02\\xc4n\\xb2[u\\xad\\xba\\xba\\x88n\\xce\\x94\\xb6\\x90kRZm\\x06\\xb5\\x99$\\xbd\\xe7\\xa1\\x1f!\\xfab\\x99$<\\xcb\\x16\\xa6\\xbf\\xaf\\xef=\\x02\\xd6\\x133\\xa3\\xf7\\xa9\\xdd_v\\xea\\ti\\xde.z\\x1e\\x8a-Hp\\xaav\\xb3\\x90\\xf6\\x8e\\xd9\\xc6\\xdc3Y/\\xd9U\\xe3\\xb4\\x1b;r\\x85t\\xa6\\xeb\\x8d\\xc5r\\xe1qV\\xf4\\xe7\\x14\\xd6\\xa4F\\xb6\\x8fu\\xa25\\x16\\xbb\\xa7\\xcb\\xdef^O\\xf4@\\x07\\x17bY.]\\xb2N\\xd06Lm6\\xdf1\\xb1\\xb4\\xbb\\x95h\\xf6.\\xa8\\x96\\xa9{\\x1d\\xb2a\\xb6\\xd4\\xb4E8\\xa4Z\\xc7y\\t-\\x0bR\\xe6\\xaf\\x8c\\xf9\\x19\\xe6\\xbb\\x15\\xc8\\xf6\\xff\\x00\\xb4\\xd8X~\\xd2j\\x0f.\\xb4\\x99ir\\x97f\\xba\\xe6OZX\\xf3\\x90\\xbb\\xf5%\\xe8\\xed\\xd7\\x1a\\xc9\\xc6\\xcd-\\xa4\\xc8\\x8f\\xef\\xf5I\\x9f\\xc6Zt\\xdb\\xc5\\xedi\\xb9g\\xfa6\\x00\\x03\\xd4\\xc8\\x00\\x00\\x00\\x00\\x02\\x1b5\\xfc\\r\\xbe\\xfc\\xc2G\\xee\\xd4/\\xb5_\\xee\\xb8\\x7f\\xdc\\xa3\\xfd$(Y\\xaf\\xe0m\\xf7\\xe6\\x12?v\\xa1}\\xaa\\xff\\x00u\\xc3\\xfe\\xe5\\x1f\\xe9!\\xcb\\xc4\\xff\\x00\\r?9\\xfaC\\xa5<\\x1e\\xa0\\x00\\x1f1@\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x14\\x8c\\xe7\\xf0\\xab\\x14\\xfe\\xd4\\xaf\\xdd\\x10\\xf4\\x88\\xcd\\xa6W\\xfc\\'\\x90b\\xaczL\\x88\\x9a\\xaeI\\xf7\\x91\\x9c\\xdc_\\xf2e\\xcbQ\\xe0\\xe0\\xdf\\xa7n\\xba\\xbf\\xb2>\\xe6\\x1d4\\xce\\r\\x17\\xaa\\xdaO\\xd6R\\xabh\\xb1\\x00\\xae\\xf0o\\xd3\\xb7]_\\xd9\\x0e\\r\\xfav\\xeb\\xab\\xfb#y0\\xfa\\xbb1h\\xe6\\xb1\\x00\\xae\\xf0o\\xd3\\xb7]_\\xd9\\x0e\\r\\xfav\\xeb\\xab\\xfb!\\x93\\x0f\\xab\\xb1h\\xe6\\xb1\\n\\xd6\\xd1\\xf6o\\x8emo\\x0f\\x9d\\x8be\\x95\\xdf\\n\\xd0\\xcd6\\xd5\"\\'~\\xe3;\\xe6\\xdb\\x89q\\x1e\\xdbjJ\\x8bE!\\'\\xc8\\xcb\\xdd\\xcf\\x90\\xfb\\xe0\\xdf\\xa7n\\xba\\xbf\\xb2\\x1c\\x1b\\xf4\\xed\\xd7W\\xf6D\\x9a0\\xe6-5v[G4ng\\xb1\\xac;h9-6C{O\\xe9Wt\\xed>\\xc4)\\xadJz;\\x8d\\xb6\\xf2w\\x1dmF\\xd2\\xd3\\xbe\\x95$\\xcc\\xb7W\\xa9s=\\x08\\xb51\\xe3o`\\x98#X\\xbe\\x1b\\x8e\\xa6\\x8bJl>{\\x16tq\\xbd1\\xff\\x00\\xf6I,\\xefwk\\xde\\xef7\\x97\\xa6\\xfa\\xb9,\\xd4G\\xaf2>B{\\x83~\\x9d\\xba\\xea\\xfe\\xc8po\\xd3\\xb7]_\\xd9\\x19\\xd9aq\\xbfcNjE\\xff\\x00e\\r\\x92\\xe5\\x19\\xe2\\xb3+L&\\x0c\\xbc\\x81o\\xa2S\\x8f\\xa9\\xc7I\\xa7\\x9eO\\xde\\xb8\\xe3\\x04\\xb2i\\xc5\\x7f\\xd4\\xa4\\x19\\x9f\\xc6/\\xd3\\xf0\\x9a;L\\xb6\\xa7\\'\\x95Z\\xd3\\xd7\\xd51\\xdf\\x8b\\nr\\xb5\\xdfa\\xb7\\xb7;\\xd4\\x91k\\xa7>\\xed<\\xcc\\x8c\\xcb\\x99\\x16\\x9b\\xca\\xd7\\xf0\\xe0\\xdf\\xa7n\\xba\\xbf\\xb2\\x1c\\x1b\\xf4\\xed\\xd7W\\xf6B0\\xb0\\xa3\\x84\\xf64\\xe6\\xf3F\\xd9N\\'\\x0f\\x1c\\xc9h\\x18\\xa4\\x8e\\xd5>H\\xfc\\xb96\\xd1\\x12j\\xdd\\x96\\xe4\\x92\\xd2B\\x95\\xcfR5\\x91\\xe8z\\x19i\\xf1h*YOe]\\x95\\xe6\\x98\\xbe9\\x8f\\\\\\xe2L\\xcc\\xad\\xc7X(\\xd5_\\xedr\\x10\\xfcF\\x88\\x88\\x89\\xb4\\xbe\\x97\\t\\xdd\\xdd\\x08\\xb9\\x1a\\x8c\\xb9\\x10\\xbbpo\\xd3\\xb7]_\\xd9\\x0e\\r\\xfav\\xeb\\xab\\xfb!883\\xa4\\xcfcNj\\xd5\\xd7g=\\x9cd[1\\x81\\xb3\\xdb\\x1cV,\\xac>\\xbc\\xc9Q+\\x96\\xe3\\x85\\xe8\\xea#Q\\x92\\x90\\xe1+\\xbcJ\\xbd\\xb5\\xfbD\\xad}\\xa3\\xe7\\xcc\\xc6m\\x9cv1\\xc4 l\\xbf\\'\\xc7\\xf6m\\x8b\\xd1WX\\xde?\\nD\\x94_\\xca\\x9e\\xf4g\\x95\\x19Fm\\x99\\xa9\\x0fw\\xad/E/\\xdbl\\xc8\\xcfx\\xcc\\xf5>cn\\xe0\\xdf\\xa7n\\xba\\xbf\\xb2\\x1c\\x1b\\xf4\\xed\\xd7W\\xf6Fg\\x03\\x06\\xae3\\xd8\\xff\\x00\\xb7>\\xec\\x07\\xb2E\\xa6)\\x136\\x89\\xb4gil\\xf1\\xfc\\x95\\x86c\\xab\\x11\\xac\\x95>usF\\x835\\x1b\\xfd\\xe4\\xd7\\x16\\xefx\\xa32\\xe6Zi\\xbaFG\\xa9\\x16\\x9a\\x8e\\r\\xd9{f;6\\xa3\\xc8\\xea1\\xcc`\\xab\\xe0d1\\x0e\\r\\x9a\\x0et\\x97U!\\x83J\\xd3\\xdd\\xef\\xb8\\xe2\\x94\\x92$\\xb8\\xb2-\\xd3-7\\xb9h.<\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8\\x94\\xe0`\\xd3\\x11i\\xe1\\xe4\\x7f\\xda\\x85\\xb3~\\xc8\\xdb#\\xd9&C\\x1e\\xf7\\x17\\xc3X\\x85o\\x19\\x06\\xdcyre\\xc8\\x98\\xb8\\xe4e\\xa1\\xf7]\\xfb\\x8b\\xee\\xf9\\x19\\x96\\xa9\\xd0\\xf43/\\x8c\\xc7\\x99\\xae\\xc6\\xbb\\x1ab\\xf5W,\\xe0\\xf1\\x98\\xb1\\xf4\\xd4X\\xb6\\xf3R\\xe4\\xa0\\xa3\\xc8K\\x84\\xe18\\xcaI\\xc2K\\'\\xbe\\x923&\\xc9$zhde\\xc8h\\xdc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8\\xbb\\x0c\\x0bZ\\xf1\\xe8\\x7f\\xda\\xa7\\x8d\\xf6g\\xd9\\xa6#\\xb4\\x17\\xb3z\\xbcY\\x96r\\x87^~G\\xa7\\xbb%\\xf7\\xbb\\xb7^37\\x96\\xdbkZ\\x90\\xda\\x97\\xa9\\xeahI\\x19\\xea\\x7f\\x8cx\\xa9{&l\\x97\\x1c\\xcf\\xd1\\x9a\\xd5\\xe1\\x91k\\xf2&\\xe4\\xaaco\\xc7\\x90\\xfa\\x1am\\xe5\\x11\\x91\\xb8\\x96\\t\\xce\\xe9*\\xd0\\xcf\\x99#\\xe3\\x17\\x9e\\r\\xfav\\xeb\\xab\\xfb!\\xc1\\xbfN\\xddu\\x7fd]\\x8e\\x0f8\\xf4?\\xedb\\x01]\\xe0\\xdf\\xa7n\\xba\\xbf\\xb2\\x1c\\x1b\\xf4\\xed\\xd7W\\xf6GL\\x98}]\\x92\\xd1\\xcdb\\x01]\\xe0\\xdf\\xa7n\\xba\\xbf\\xb2\\x1c\\x1b\\xf4\\xed\\xd7W\\xf6C&\\x1fWb\\xd1\\xcdb\\x01]\\xe0\\xdf\\xa7n\\xba\\xbf\\xb2\\x1c\\x1b\\xf4\\xed\\xd7W\\xf6C&\\x1fWb\\xd1\\xcd\\xe9\\xcd\\x7f\\x03o\\xbf0\\x91\\xfb\\xb5\\x0b\\xedW\\xfb\\xae\\x1f\\xf7(\\xff\\x00I\\x0c\\x8f.\\xc4\\xbb\\x8cR\\xe9\\xcf\\x86\\xad\\xdc\\xdc\\x84\\xfa\\xb7\\x1c\\x95\\xaaU\\xa3j\\xe4e\\xa72\\x1a\\xe5W\\xfb\\xae\\x1f\\xf7(\\xff\\x00I\\x0f7\\x8b\\x8ac\\n\\x9c\\xb3}g\\xe9\\x0e\\x91\\xc1\\xea\\x00\\x01\\xf2@\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x05#9\\xfc*\\xc5?\\xb5+\\xf7D=\"3i\\x95\\xff\\x00\\t\\xe4\\x18\\xab\\x1e\\x93\"&\\xab\\x92}\\xe4g7\\x17\\xfc\\x99r\\xd4x87\\xe9\\xdb\\xae\\xaf\\xec\\x8f\\xb9\\x87M3\\x83E\\xea\\xb6\\x93\\xf5\\x94\\xaa\\xda,@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8\\xdeL>\\xae\\xccZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xac@+\\xbc\\x1b\\xf4\\xed\\xd7W\\xf6C\\x83~\\x9d\\xba\\xea\\xfe\\xc8d\\xc3\\xea\\xecZ9\\xbd9\\xaf\\xe0m\\xf7\\xe6\\x12?v\\xa1}\\xaa\\xff\\x00u\\xc3\\xfe\\xe5\\x1f\\xe9!\\x91\\xe5\\xd8\\x97q\\x8a]9\\xf0\\xd5\\xbb\\x9b\\x90\\x9fV\\xe3\\x92\\xb5J\\xb4m\\\\\\x8c\\xb4\\xe6C\\\\\\xaa\\xff\\x00u\\xc3\\xfe\\xe5\\x1f\\xe9!\\xe6\\xf1qLaS\\x96o\\xac\\xfd!\\xd28=@\\x00>H\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xa4g?\\x85X\\xa7\\xf6\\xa5~\\xe8\\x87\\xa4y\\xb3\\x9f\\xc2\\xacS\\xfbR\\xbftB70\\x8f\\x92I\\xa6R1Y\\xf5U\\xb6\\xdd\\xe2L\\x9f\\xb8\\x84\\xe4\\xb67?\\xe2.\\xed\\xb7\\x9aV\\xbe\\xed\\x0f\\x7f\\x97\\xe21\\xf5\\xe9\\xfe\\x1a>S\\xf5\\x96j\\xf8&\\xc0q^CA\\x9bb]\\x85\\xd8\\x8d\\x9a?Z\\xd3\\x10O\\x1e[0\\xd8\\xadz\\x1c\\x98-\\xb7g\\x19OzR\\x9cyd\\xb3$\\x91\\x1e\\xa9KdD\\x95jG\\xaf\\'j;\\xea\\xdb\\xad\\xa0\\xed]\\xba\\xfb\\x08\\xd3\\x97\\x1bcS\\xd2\\xf1Fy.wf\\xa9[\\xc9%hg\\xa1\\x9aL\\x8fC\\xf8\\x8c\\x8f\\xe3!\\xe7\\x9ckE\\xe6?5\\xf6K;PU\\xb3=\\xa2\\xd6\\xe0\\xd6\\xd8\\x9dt\\xf6%<\\xfeKi\\xf0L5FBT\\x96\\xdd\\xee]{y\\xcdTFI\\xddeE\\xa9\\x11\\x9e\\xa6\\\\\\xbd\\xe6\\\\\\x99t\\xdd\\x16\\xces|\\x01\\xfd\\x8a\\xfa\\x11\\xe5\\x13\\xf1{\\x89W\\x10i\\x9f\\xf4\\x84Lm\\xba\\xe3\\\\W\\xa4\\xa1*Q\\x1a\\xfd/\\xb9JV\\xa2\\xdeQ\\xadE\\xa9\\xf3\\xd2#\\r\\xc6vp\\xd2\\xfb3e\\x18\\xe4\\xf8\\xb6\\xf9\\x8d\\xd5\\xdbR.-\\x15`o\\xce\\x98\\xea\\xebd\\xaeB\\x9f#Y\\x9e\\xf2]\\xd0\\xb9\\x97\\xb1\\xae\\xe9i\\xae\\x87\\'\\x1axD~i\\xeeY\\xdeb\\x0b\\x19\\xcd\\xe9\\xf2\\xf9\\xd9\\x04J\\xa9\\'%\\xfa)\\xe7Y<\\x8d\\xb5 \\x9b\\x90M6\\xe9\\xa0\\x8c\\xc8\\xb7\\xb4K\\xa8\\xe6Z\\x97=5\\xe48Z\\x9a\\xd6\\x04\\x8d\\xa6l\\xc7i\\x94,\\xe3X\\x83\\xb9Fr\\xe4\\x05WD\\x97!\\xdb\\xb9Q\\xd6\\xa9\\r\\xbd\\xe9\\x8b[\\xdb\\x86\\x93ZR}\\xd14}\\xd9\\xa9\\xa2%\\x16\\x9a\\x1d\\xcf\\x17\\xc7\\xe8\\xb0G\\xbbO[b\\x94\\xf5P\\xf6\\xa1]cd\\xaa^\\xe5\\x96\\xcaz[]c\\x0f \\x9aO\\xdf)*p\\x9cY\\x11\\x16\\x8aQ\\x1f\\xe5\\x08\\xc6\\x99\\xf8~Z\\xe5\\x9d\\xa6 \\xe8\\xb3*\\xec\\x8e\\xf6\\xfe\\xae\\x01\\xb8\\xfb\\x94o7\\x16c\\xe4\\x92\\xee\\x92\\xfa\\xdb\\'\\r\\x92V\\xbc\\xd6\\x94-\\xb5+\\x96\\x85\\xde$\\xb53\\xde\"\\xe4\\x9e\\xce[.\\x87\"\\xff\\x00gY}\\x06\\xd0\\xf0\\xb4K~?\\xa7Kf\\x924\\x94\\xd9\\xde4\\xa6\\x0c\\x9enZ\\x9d\\x9e\\xefz\\xa4\\xadiZ\\x94\\xa6\\xf7\\x92\\xb4\\x17\\xde\\xf3!\\x14\\x8c\\x82\\xc6\\x17e\\xdc\\x86T\\xdb\\t5\\x11\\xed\\xb6\\x9d&\\x1eYg\\x1d\\xd54\\xec8.\\xdb\\x1bRU\\xbe\\\\\\xdb\"l\\x90\\x83W-\\x12a\\xb6\\x9b^`\\xb3\\xaf\\x91\\xb4Z\\xd76\\xa2\\xfe\\x06LJ\\xf8a\\x9af\\xef\\x14\\xf9\\xa1>\\x8el-\\xf5\\xb2I%ooo\\xef6g\\xa6\\xee\\x9ai\\xcf^B\\xce\\xf3\\xa8a\\xa5\\xba\\xe1\\xee\\xa1\\t5(\\xff\\x00\\x11\\x17\\xbcp\\xf5\\xdavo\\xb0\\xfd\\xa7\\xed:n!M\\x12\\xc3\\x19\\x8b\\xb3\\x04?.\\xaa\\x96z\\xb7\\x1eZ\\xe6<\\x83.\\xf5+3ky\\x06\\x83R\\xd2ddFk\\xe6|\\xcf\\xf6\\xd8\\x9e\\x1b\\x0b\\x1e\\xdb\\xaeI\\x81\\x9a\\xb1\\x07\\xe9r\\x0c\\x05\\xc9\\xd6\\x18\\xde*\\xa7\\x9c\\x81\\xde\\xfaJ\\x1bOx\\x97^s\\xbcY\\xb6\\xea\\xc8\\xd6D\\x8d\\xf4\\x99\\x19\\xa7\\xe34cM\\xed0Y\\xd8xVeU\\xb4,N\\xab%\\xa2\\x90\\xa9t\\xf6\\x8c&LG\\xd4\\xda\\x9b7\\x1bW\\xde\\xabuDFZ\\x97\\xc4dF&\\x87\\x08\\xec\\xf4\\xf0\\x1co\\xb1F\\r\\x06\\xb3\\x1c\\xc7\\xad,\\xf2\\xd7\\xaa\\xaa\\xac\\xdazQ\\xc5\\x8f\\xe9\\xabZ\\x89/X-\\xa3%\\xee!M,\\x8d\\'\\xcdf[\\x9f\\xf1\\x18\\xb2l\\x1e\\xaf\\x1c\\xae\\xa4\\xed\\x0b\\x81\\xe4\\xf7\\xd4\\xa7\\x80\\xd5\\xb9\\x1c\\xa4\\xfc\\x03!\\xd8\\xf5\\xf0Z~\\x11\\x1c\\x94\\xb2ky\\xc5\\xb4[\\xc9V\\xa9%\\x99o\\x92\\xf4\"\\xd7t)\\xc6\\x99\\xb5\\xe3\\x8c\\x16vP\\xabm\\'h\\xb5\\xbb-\\xc6Syj\\xc4\\xa9\\x11\\x156$\\r\\xc8hJ\\x9c\\xef$Hm\\x86\\xcfE)%\\xbaJq&|\\xf5\\xd0\\x8fB3\\xe4$0\\xb8\\xf510\\xea&(\\x1fL\\x9a&\\xa00\\x8a\\xf7\\xd2\\xf1\\xbcNG&\\xd2M(\\x96ff\\xbdQ\\xba{\\xc6g\\xaf\\xbcp\\x1c\\xca\\x9c7)\\xd8l<\\xe6\\xfaLi\\xbbe\\x91\\x9b\\xc3f\\xd5\\xc9SO\\xd3c>\\x9b\\xa46q\\t\\xa3V\\xa8m\\x0c\\xa4\\xb7[\\xdd\\xd3D\\x92\\xb4\\xd7\\x99o\\x13\\x12i\\x8d8\\xa4E\\xdd\\xf9Y\\x92\\x9d\\x96KuO\\xf0M\\x9cB\\xacK\\n\\xf8BL}\\xc8\\xb2\\xfb\\xd4\\xa9Z0\\xe6\\xbe\\xd9\\xa3wE\\xf2-\\x0c\\xcb\\xdf\\xa8\\xf9\\xcd\\xf3J\\x8d\\x9db6\\xd95\\xf4\\x93\\x87MW\\x1dR\\xa5>\\x96\\xd4\\xe1\\xa1\\t\\xf7\\x99%$f\\x7f\\xd4D8\\xcbj\\xc9,\\x13\\'\\xed\\x11\\xf0*\\xdc\\xc7jg\\\\\\xe2\\xa7}>\\xb0\\xcd\\xa7c\\xc3\\x92g\\xe9\\xd2\\tI\\xe6\\x95)+^\\xf2\\x8b\\x99\\x12\\x94|\\xbd\\xe2\\xd9\\xb7\\x9d\\x96\\xec\\x8e\\xa3\\xb2\\xf6\\xd6\\xa2`5\\xf4o:T\\x88\\xb0\\x91\\x1e\\xb2YI\\xdd\\xee\\xf7\\xd4\\xcc\\x83N\\xfa\\xb4>N\\x199\\xa6\\xaa\\xdd=L\\xf7yck6\\x9bG\\x0b\\xfd\\xfd\\x96\\xce\\x9e\\xc8\\xf3Z\\xdcQtgb\\xa7\\x1ab\\xdesu\\xccH$\\x91\\xb6\\x87\\x9cJ\\x8d\\xa4\\xac\\xf5\\xf6w\\xd4D\\x82=\\x0f\\xdaRK\\xe3!<9k\\xb4}~+\\x8b\\xf6)\\xc9\\x8bglU\\xc3\\xaf\\x88pf@E\\x1e\\xe7t\\x89^\\x9b\\x19\\xd6\\xd4\\x9d\\xceD\\xa3Y\\xa0\\xff\\x00\\x1f2\\xfcc\\xa9K]\\x0b_\\x7f\\xe4\\x1d\\xa9\\xaaff\\'\\xc9\\x90\\x00\\x07@\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10\\xd9\\xaf\\xe0m\\xf7\\xe6\\x12?v\\xa1}\\xaa\\xff\\x00u\\xc3\\xfe\\xe5\\x1f\\xe9!B\\xcd\\x7f\\x03o\\xbf0\\x91\\xfb\\xb5\\x0b\\xedW\\xfb\\xae\\x1f\\xf7(\\xff\\x00I\\x0e^\\'\\xf8i\\xf9\\xcf\\xd2\\x1d)\\xe0\\xf5\\x00\\x00\\xf9\\x8a\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xa4g?\\x85X\\xa7\\xf6\\xa5~\\xe8\\x87\\xa4y\\xb3\\x9f\\xc2\\xacS\\xfbR\\xbftC\\xd2>\\xc5?\\xc5\\x87\\xf2\\x9f\\xac\\xb3W\\xc1\\xf8O\\x81\\x1a\\xd2\\x13\\xf0\\xe6\\xc7j\\\\I\\x086\\x9e\\x8e\\xfa\\tm\\xb8\\x83-\\r*I\\xf222\\xe4dc7\\xcd;=b7\\xbb8\\xc9\\xf1lz\\x92\\x97\\x0fz\\xe6\\x9eM:l+*YB\\xa3\\xb4\\xf1\\x1e\\xa4IF\\xe6\\xf2w\\xb4Q\\xa3x\\x88\\xcc\\xb5\\xf7\\xf3\\x139\\xfe\\xd8\\xf1M\\x9dl\\xc6\\xc3<\\xb0\\xb7\\x86\\xf6?\\x1a\"\\xa53\"<\\x96\\xd4\\x99\\x87\\xbaf\\x86\\xd8V\\xf6\\xea\\xd6\\xb3-\\x12D|\\xcc\\xc4F\\t\\xb6\\xfa\\xdb\\xad\\x98\\xc3\\xcd\\xb2\\xc9\\xb8\\xd6%U5\\xd3Ly(\\xc8\\xd8\\x99\\ri\\xf7\\x16\\xb2w[o\\xbc\\xde%\\xa4\\xd0\\x9d\\xed\\r\\x06[\\xc7\\xf1s\\xaah\\x99\\xcb,\\xea\\xb1\\xe0\\xbb0\\xc5\\xf6u\\x18\\xca\\x87\\x1e\\xa8\\xa9\\x96\\xf3HD\\xb9u\\xd5\\xedFrR\\x92Zo8h\"5\\x19\\x9f>f~\\xf1\\xf3\\x0bdx-m\\xdf\\xc310\\xbcz-\\xc7\\xa4z_\\xc2\\x0cU0\\x89\\x1d\\xfe\\x8a.\\xf7\\xbc$oo\\xe8\\xb5\\x96\\xf6\\xba\\xe8\\xa3\\xe7\\xcc\\xc4\\x06\\xd1\\xfbD\\xe0\\xbb0\\x81\\x88\\xcf\\xb6\\xbe\\xaf:\\xfc\\x9e\\xc1\\x10`\\xcdD\\xe6\\x12\\xc6\\xe1\\xa4\\xd4\\xb9\\nZ\\x96E\\xdc\\xa0\\x88\\x89KN\\xba\\x1a\\xd0G\\xf7\\xda\\x8b=\\xde\\xd30\\xfcj\\x92\\x05\\xcd\\xbeWIUQ`\\x94\\xae\\x1d\\x84\\xdb\\x16Y\\x8f%*I)&\\xdb\\x8aQ%dddddg\\xa9\\x19\\x18^\\x8e\\x1c\\x93W\\xe2[(\\xc2\\n\\xc6m\\x81a\\xd4\\x05>k\\xc8\\x93*W\\xc1lw\\xb2\\x1dB\\xc9hq\\xc5n\\xea\\xa5%dJ%\\x1e\\xa6FDe\\xcc{\\xdc\\xc1\\xb1\\xb7\\xb2\\xa6\\xb2w1\\xfa\\xa5\\xe4\\x8d6l\\xb7r\\xa8M\\x9c\\xc4#C-\\xd2{w|\\x8bC2\\xd3]43\\x14I\\x1d\\xa31j\\xbd\\xa5\\xd9\\xe37\\x16u\\x14\\xd51\\xaa \\xda\\xc7\\xbe\\x9dl\\xd3LK9.>\\x84\\xb4\\x8d\\xed\\x13\\xc8\\x98\\xde#%\\x9e\\xf6\\xf7\\xb8\\xb4\\xd4\\xf5F\\x9dC\\xcd\\xa1\\xc6\\xd6\\x97\\x1bY\\x12\\x92\\xb4\\x9e\\xa4\\xa2?q\\x91\\x8b\\x19g\\x81\\xaa\\xbdG\\xb3\\\\C\\x18\\xbb\\x97sO\\x8a\\xd2T\\xdb\\xcb\\xd4\\xe4XA\\xaee\\x99\\x0fjz\\x9e\\xfb\\x89I)Z\\x9f\\xe31\\x1d\\x8dl\\xbe\\xbb\\x1b\\x99\\x996D\\xcc\\xca,\\x96i\\xd9?U%\\x82[h}\\xc6\\xc9\\x12}\\xfa\\x92\\x90\\xe6\\xe2Vi2\\xe4\\xa58z\\x99(\\x896\\x1c\\x93)\\xa5\\xc3j\\x9c\\xb4\\xbf\\xb7\\x81GZ\\xd9\\x92W2\\xcaJ#\\xb2\\x93?q\\x1a\\xd6dE\\xaf\\xf5\\x8c\\xf3\\x05\\xed\\x17\\x8beTY]\\xed\\x95\\x9dF=AI\\x90?F\\xd5\\xb4\\xbbf\\xbd\\x16a!\\xb6\\xd6\\x97\\x92\\xea\\xb7RD\\xb2s\\x92H\\xd5\\xf7\\xba\\xeaz\\xf2\\x93\\x92&\"MV\\xda\\r\\x94\\xe18\\xa2_M&\\x1fAN\\x97\\xe3\\x9c7\\x8a\\x05c\\x0cw\\x8c\\x19\\x99\\x9bJ\\xdcIj\\x8333\\xdd>Z\\x99\\xf2\\x1f\\xa6?\\xb3,;\\x12\\\\E\\xd1\\xe2tt\\xcb\\x88n\\x1cuW\\xd6\\xb2\\xc1\\xb3\\xde\\x11\\x13\\x9b\\x9b\\x89-\\xdd\\xe2Jw\\xb4\\xf7\\xe8Z\\xfb\\x85wi]\\xa00\\xbd\\x98\\xe0\\x10s)\\xf70\\xa6Q\\xcf\\x99\\x1e\\x1491&2\\xa6\\xe4)\\xd7I\\x1b\\xc8Y\\xac\\x92\\xa4\\xa0\\xb7\\x96\\xa3#=\\x10\\xda\\xcfC\\xdd\\x13\\x13\\xb6\\xc5\\x81U\\xd3V\\xdb\\xcd\\xcd\\xf1\\xc8\\x956z\\xfa\\x0c\\xf7\\xed\\xa3\\xa1\\x89z\\x1e\\x87\\xdd8k\\xdd_>^\\xc9\\x98^\\x88\\x9bi\\xa1\\xab\\xf5-\\x93`\\xe4\\xc5\\xcb\\x05\\x86\\xe3\\xe4\\xcd\\xd2\\x89v\\x8d\\xfc\\x16\\xc6\\xec\\xf5\\x12\\x8dDo\\x96\\xe6\\x8e\\x99\\x19\\x99\\xea\\xady\\x99\\x98\\xfd\\xa2l\\xd3\\x10\\x81\\x06T(\\xd8\\xa5$hr\\xa2\\x14\\t\\x11\\xda\\xaee-\\xbd\\x18\\xb7\\xb4ai$\\xe8\\xa6\\xcb}~\\xc1\\xf2\\xf6\\x8f\\x973\\x1e}\\xa0m\\x12\\x16\\r\\x8eC\\xb2Keg\"\\xcadj\\xea\\xc8\\xac\\xb8DR\\xe4\\xc8Y!\\xa4\\x92\\xf9\\x91\\'\\x99\\xadJ\\xd0\\xf4BT\\xad\\x0fM\\x0f\\xd9\\x91m\\x0f\\x15\\xc4,!@\\xbd\\xc9\\xa9\\xe9gM\\xfek\\x1a\\xc6{Q\\xdd\\x7f\\x9e\\x9e\\xc2V\\xa25s\\xfcZ\\x8b\\xfe0&\\xe1\\xc3b\\xbe#\\x11b\\xb0\\xdch\\xac!-4\\xc3($!\\xb4$\\xb4JR\\x92\\xe4DDDDD+\\x16\\x1b#\\xc1m\\xaf\\x1e\\xb9\\x9d\\x85\\xe3\\xd3.\\x1eR\\x16\\xed\\x84\\x8a\\xa6\\x1c\\x90\\xe2\\x90\\xa2R\\rN\\x1a7\\x8c\\xd2\\xa4\\xa4\\xc8\\xcc\\xf9\\x1aH\\xcb\\xdc%\\xaa2\\xea,\\x82A1Wu]d\\xf9\\xc5fq7\\x0eSn\\xab\\xd1\\xdd#6^\\xd1&g\\xdd\\xafu[\\xab\\xf7+C\\xd0\\xcfA\\x1f;j\\x18m^3\\x13#\\x99\\x96\\xd1D\\xc7\\xa6\\x19\\x14kg\\xec\\x99DG\\xcc\\xf5\\xd0\\x90\\xe9\\xabqZ\\xe8~\\xe3\\xf8\\x8cY\\xcb1\\xa8\\x94N3N\\x99v\\x92\\x8a\\xaa\\tI\\xb5B\\x1b\\xb0x\\xa3#~bR\\x93JR\\xea\\xb4\\xd5\\xc2$\\x99\\xa4\\x89Z\\xe8Fd#\\xb1\\xad\\x9bb8dI\\x91q\\xfcZ\\x96\\x8a4\\xc22\\x92\\xcdms1\\xd0\\xff\\x00\\xbc\\xbd\\xb4\\xa1$J\\xf7\\x9f\\xbf\\xf1\\x98\\xfa\\xb0\\xda.)S\\x02\\x04\\xe9\\xd9=4(S\\xdb[\\xd1$\\xc8\\xb0i\\xb6\\xe4\\xa1\\x085\\xadM\\xa8\\xd5\\xa2\\xc9(#Q\\x99jDE\\xa9\\xf2\\x1f\\xad\\x06y\\x8c\\xe5p$\\xce\\xa4\\xc8\\xaan!E$\\xa9\\xf90\\'4\\xfbl\\x92\\x9b\\'\\x12kR\\x14d\\x924)+-}\\xe9Q\\x1f\\xb8\\xc3\\xfcn*\\xf7{\\x0e\\xc6gP\\xd5c\\xb5Uu\\xb8\\xee3\\x1e\\xdd\\x8b\\x89\\xb5UP[\\x8e\\xd4\\xd7\\x19Q8\\xdaT\\x94\\x12R_um\\x95\\xa8\\xf43Q4I\\xf8\\xf5-\\x10B\\xd6\\xe6\\xd8\\xed\\xcc\\xa8\\x11\\xab\\xef\\xab\\'H\\xb0\\x86V0\\xd9\\x8d1\\xb7\\x17&)\\x99\\x11>\\xd9\\x12\\x8c\\xd6\\xdf2\\xf6\\xcbT\\xf3.|\\xc7\\xeb\\x93e\\xb4xUZ\\xac\\xf2\\x1b\\x9a\\xfa\\x1a\\xd4\\xa8\\x90\\xa9\\x96r\\x91\\x19\\x92Q\\xfb\\x88\\xd6\\xb3\"\\xd4\\xff\\x00\\x16\\xa1\\x19cX\\x12\\xa02\\xcc\\xbf\\xb4\\xc6\\xcf0\\x8c\\xbb\\x12\\xa2\\xb4\\xc9\\xea\\xa3\\xf1$W\\xe6\\xc6\\xb1r\\xca:\"\\xb6\\xcbd[\\xabZ\\xd4\\xe1rqFil\\xcbRQ\\xa1|\\xfd\\x91o\\xb1\\xda^!Qt\\x9ay\\xf9U$+eHj\\x19@\\x91b\\xcbo\\x9b\\xee\\x91\\x1bMwf\\xa2V\\xfa\\xc8\\xc8\\xd2\\x9d5Q\\x19hF&zg\\xe2Yd\\x01V\\xd9\\xfe{\\x1f<\\x89o\\xbb\\x1dP\\xac)\\xec\\xdf\\xa9\\xb0\\x86\\xa5\\xef\\x9b/\\xb6ddd\\xad\\x0bT\\xad\\xb5\\xb6\\xe2OB\\xf6\\\\N\\xa4G\\xa9\\x14\\xd3\\xd9\\x05\\\\k\\xa8\\xd4\\xefYCj\\xdeSK}\\x8a\\xf5\\xbe\\x82\\x90\\xebh2%\\xad\\r\\x99\\xef))3-L\\x8bB\\xd4\\xb5\\x1a\\x89\\x89\\x8b\\x8fx\\x0f\\xc6t\\xe8\\xd5\\x90\\xdf\\x972CQ\"0\\x83q\\xd7\\xdfY!\\xb6\\xd0E\\xa9\\xa9J>DD_\\x19\\x8cp\\xfbR\\xe2\\xd2\\xb2<\\xba\\x1de\\x8d\\r\\x8d-\\r\\x04{\\x94\\xdf\\xa6\\xfe:!>\\xf3\\xcf<\\xcac)\\xd3\\xfb\\x9bG\\xbe\\xd2\\x0b|\\xd6|\\xdd\"\\xd0\\xb4\\xe7&\\xa8\\xa7\\x8c\\x8d\\xa4\\x058\\xb6\\xab\\x8fSat\\x19\\x06YuI\\x89\\xb7k\\x15\\x97\\xd2S\\xad\\xd8\\xee\\t\\xc5\\xb6\\x95\\x9bm\\xbej$:E\\xbd\\xa1)<\\x94Z\\x19r1\\x03s\\xdaG\\x00\\xa1\\xdae.\\x13;#\\xac\\x8d>\\xda\\xb5VQ\\xe5\\xbda\\x1d\\x11\\xd4\\x93q\\xb6\\xdah\\x8c\\xd7\\xaa\\x9cw\\xbc5!$^\\xd1!FZ\\xe8$\\xd7Lq\\x92\\xcb\\xb6k\\xf8\\x1b}\\xf9\\x84\\x8f\\xdd\\xa8_j\\xbf\\xddp\\xff\\x00\\xb9G\\xfaHP\\xb3_\\xc0\\xdb\\xef\\xcc$~\\xedB\\xfbU\\xfe\\xeb\\x87\\xfd\\xca?\\xd2C>\\'\\xf8i\\xf9\\xcf\\xd2\\x1d)\\xe0\\xf5\\x00\\x00\\xf9\\x8a\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xa4g?\\x85X\\xa7\\xf6\\xa5~\\xe8\\x84naEe\\x91S*\\x1dVG;\\x15\\x96n%ea\\\\\\xc4w\\x9d\"/zwd4\\xe24?\\xec\\xeb\\xf8\\x8c\\x84\\x96s\\xf8U\\x8a\\x7fjW\\xee\\x88zG\\xd7\\xa7\\\\\\x1a>S\\xf5\\x96j\\xf89\\x13\\'\\xd9nE\\xb3n\\xc1\\xd9\\xb5NY{2\\xe6[\\x18\\x9a\\xd0\\x8a\\xc9\\xac\\xc3\\xee\\xab\\x1cm\\xa5\\xea\\x96V\\xc3I5\\xeb\\xa9{KR\\xcf\\xd8-\\x0c\\xb5=|\\x9bx\\xae\\x8bA\\xb5]\\x98\\xdbdW\\xd2p\\xad\\x9f\\xb3\\x8d\\xc8\\x89\\x16\\xe2-dIqaY\\xad\\xc6\\x96d\\xead0\\xf3l\\xf7\\x8d\\x11\\x919\\xbaG\\xaaM$\\xad\\x14\\xa2>\\xc6\\x01\\xc2pb\\xd6\\x89\\xe5\\xd9\\x9b\\xb8\\xaf\"\\xc5\\xf1\\x0c\\x03d\\x9b:\\xca*n\\xe6\\xe4\\xb8R6\\x90\\xd6A>\\xe2\\xc6\\xbd\\xb6[\\x8a\\xc3\\x88}\\x87\\x96\\x96Za\\xa4\\xb7\\x1f\\xbd\\xddW\\xb2\\x82I\\xf7\\x9b\\xc5\\xa9\\x19\\x18\\xb0]\\xe7;?\\xa6\\xed\\x16\\xeee\\x9d=\\x05\\xec&\\xdf\\x15\\x86\\xc6!w6)\\xbb\\\\\\x8d\\x1dx\\xe52\\x85\\x1aM(uz\\xb2\\xafq\\x1a\\x92DDg\\xee\\x1dh\\x01\\xb2\\xb7\\t.\\xe5\\xfcC\\x1d\\xc2\\xf6\\x83\\xdaS&\\x90\\xdd\\x1dm\\x8d\\x0b\\x9b=\\xa7E{R+\\xc9\\tj+\\xafL-\\xc46\\xb4\\x91\\xb6F\\x92I\\x1at/q\\x11\\x97!{\\xeci-\\xe9\\xbd\\x96vb\\xeb\\xee\\xa9\\xe7\\n\\x91\\x84o,\\xf5=\\xd4\\x91\\xa5%\\xff\\x00b\"/\\xfb\\r\\x94R2\\xbd\\x91Tf7\\x0b\\xb2\\x99m\\x94\\xc3}HJ\\r\\xaa\\x9c\\x9e\\xc2\\x03\\x04DZ\\x11\\x93L>\\x84\\x11\\xfe3\"\\xd4\\xfe1c\\x0ei\\x9b\\xc7\\x9fr\\xf7d\\x9d\\xa3li1\\xad\\xb8\\xec\\xb3 \\xcf\\xdbB\\xb6w\\x0e5\\x8b~\\x93-\\x83v\\x14;U\\x93>\\x8e\\xe3\\xe5\\xa1\\x91j\\xd9<\\x94)E\\xa2Tg\\xcc\\xb5\\xd4b\\x94y\\xb5=~5x\\x8a\\xb5\\xd2\\xd0\\xe0\\xf7\\x1bU\\xb372\\x99\\xd4\\xe8\\x91\\x1a\\x9d\\xa2\\x84\\xda\\xdaq\\xa6\\x9dF\\xe3kuZ\\xb6\\x97\\x16\\x9d\\xd2%\\x9f#\\xde!\\xdc8v\\x1d\\x0f\\x08\\xabr\\x04)\\x96\\xd3Y[\\xa6\\xf1\\xb9qk&\\xc5\\xe23\"-\\t\\xc7\\xd6\\xb5\\x12}\\x92\\xf6H\\xf4#3=53\\x13\\x833\\x855M\\xee]\\xfe}\\xd3\\xb7\\x11=\\x996\\xa4\\xc4gd\\xdc\\xd7\\xd0\\xed\"-\\xb9)\\xda\\xd2a\\xdf\\x83\\xbd*\\x04\\x85ILd6\\x82KjG|\\xe1n!)4\\xef\\x19\\x16\\x9a\\x8d+l;S\\xc4s\\\\\\xbf\\x13\\x88\\xd6CGA\\xb3\\xf9\\x94\\x92\\xe4D\\xca\\x93F\\xc5\\x8b\\xb3\\xdf\\'\\xc9\\xa7+\\xe2\\xf7\\xcd8\\x84\\x9e\\x89%\\xa9$\\x85)~\\xc1$\\xb9j:\\xe8\\x020f\"\\xd7\\xfc\\xf5.\\xe1\\x8d\\x9c=)\\xdd\\x92\\xf6J\\xf4\\x95\\xbc\\xa8\\xb5\\xb9K\\xb5\\xd3\\x19\\x90\\x93J\\xd8\\x90\\xdb\\x13Ze\\xb7\\x12|\\xd2\\xa4\\x1aM:\\x19r\\xe4_\\x88H\\xe7W\\xd8N\\x13\\xb4\\x1e\\xd0qv\\x96\\xccV\\xb2K\\xe8\\xa9^=*\\xd6\\x11\\xbc\\x99\\x95\\xe5\\x01(m\\x98\\xaa4\\xa8\\xb5C\\xc4\\xe6\\xf2\\x13\\xa1\\x9a\\x8c\\x8fC\\xf7\\x97[\\xe6\\xf8U~{FU\\xb6\\x06\\xe3]\\xd4\\x96f\\xc6\\x94\\xc1\\x91;\\x1aC.%\\xc6\\x9dl\\xcc\\x8c\\x89IZH\\xf9\\x91\\x91\\x96\\xa4ddfG\\xef\\xc8i\\x18\\xc9h,\\xea%-\\xc6\\xe3XEv#\\xabd\\xc8\\x96\\x948\\x83I\\x9aL\\xc8\\xcb]\\x0c\\xf4\\xd4\\x8cM\\x8c\\xc4Z\\xff\\x00\\x96\\xb2\\xdd\\xc8;\\n\\xcei6O\\x95c\\x969T\\xcf\\x82a\\xde\\xec\\xb7\\x18E[\\xabij\\xf4\\xd7XC\\xdd\\xeb,\\x92H\\xcdn\\x97z\\xd9\\xf7i\\xd5FJ-\\x08\\xc6O\\xb3\\xd6\\xd9\\xab\\xa3\\xd8}\\xe6]\\x93\\xcd\\xc30\\x93\\xc1\\xdd\\x87\\x02\\xf15\\xb1&F\\x8fbs\\x16\\xa7[s\\xd2c\\xbc\\x86T\\xe3=\\xde\\x8b\\xd1&\\xae\\xec\\xd3\\xbd\\xef!\\xfe\\x86\\xe1x\\xacL\\x17\\x0e\\xa1\\xc6\\xa08\\xf3\\xd0i\\xa01]\\x1d\\xc9*%:\\xa6\\xd9m-\\xa4\\xd6dDF\\xa3$\\x96\\xa6DE\\xaf\\xc4BdM\\x84\\xda\"\\xfc\\x0b\\xb8\\xc6\\x97f\\x98\\x84\\x0c\\xbf\\xb3\\xc4*\\x9b\\x199n3a\\x90\\xdf\\xdc\\xc7v\\xda\\x1bl\\'\\xbc8n\\xb9\\xf76\\x10\\xd3Hm\\xb2u=\\xe2\\x12H\"\\xd4\\xf5.Z\\t.\\xd356\\xf8f\\xd1\\x97O\\x8bG[1\\xb6\\xc3X\\xce&\\xf2\\xa2\\xa4\\x890\\xa52\\xe2Q\\xe9&_\\x11z\\x0b\\xd2K\\x97\\xc9\\x90:\\xf0Sd\\xec\\xae\\xb2\\xc3i\\x90\\xf3i\\xf3\\xac\\xe7\\xcf\\xafeMW@\\x91\\'XP\\x14\\xb4\\x1a\\x1cu\\xa6\\x88\\x8b\\xee\\x8bA\\x9aMJ5\\x1e\\x8a2-51\\xa9\\xc1\\xff\\x00\\x1bG\\xe7\\xc1.\\x83\\x8a{;\\xc76\\xc5\\x8e\\xe3\\x0cS\\xb4\\xcem\\x17\\x1aRkd\\xa6\\n\\x8c\\xd9\\xacm\\xc4\\xa0\\xda\\'\\xf4\\xd0\\x88\\x95\\xa7\\xb3\\xae\\xbf\\xff\\x00.un\\xd6\\xb0qUc\\xf8\\xa5\\xa6C\\x96F\\xc3l*m\\xfd.\\x9e\\xc6\\xca\\t\\xcd\\x82r{\\x97\\x12m\\xc9h\\xcbt\\xdbR\\re\\xa9\\xa9&G\\xbb\\xba\\xa2=\\x08\\xf7p\\x1df\\x8b\\xc4\\xc2]\\xc6\\x07\\xb4J\\xc5\\xca\\xec\\xe1\\xb4\\x8c\\xd2\\x82\\x0e\\x15B\\xa87p\\xa6i\\rH\\x83\\x15\\xc7\\x10\\xd9F\\xd0\\x8d:\\xb6\\xdb\\xa4\\xca\\xd6\\xd9(\\xbd\\xcb\"\\xf7\\xf3=3d\\x14\\xd4\\xf6\\xfd\\xa86\\xe3zp\\xe3K\\x97\\xff\\x00\\xa0\\xa6<\\xc5\\xb6JR\\x1b8\\tYn\\x19\\x96\\xa9#=\\xd3\\xe5\\xef\\xddO\\xe2!\\xd0`1\\x18v\\x9b\\xcc\\xfeZ\\xcbv\\x0f\\xb0\\xe6\\xdfWh\\x8e\\xd0\\xf2\\x1b\\xd7\\xe0\\xa5[T4\\xd1\\x97\\xdezBj\\xd9\\xef\\xf4\\xfc\\xbe\\xd3z\\xff\\x00\\xd8]\\xae\\xed\\xf0fv\\xe1\\x8c\\xd6\\xd8W!\\xdd\\xa0?W)\\xda\\xc9\\xc7\\x05KSQR\\xa4\\x93\\xc9\\'\\xb4\\xd1:\\x99\\x97#?\\xc7\\xee\\xde\\xe7d\\xc3p\\xba\\xfc\"\\x0c\\xd6 \\x9b\\x8e\\xbb>s\\xf6S%>dnH\\x90\\xea\\xb7\\x96\\xb5\\x19\\x11\\x17\\xbbu$DZ\\x12P\\x94\\x97\"\\x13\\xc3t\\xd31M\\xbc\\xef\\xde\\xe5\\xd8?l\\xa8\\xeasf\\x942&\\xc4~~%\\x0f&\\xad\\x95\\x93\\xc5a\\xa5=\\xdeU\\xa1\\xd37w\\xdbI\\x19\\xad\\xb4\\xaf\\xbaR\\xd2D~\\xcaU\\xcbMFB\\xbb\\xdc\\x1b5\\xcf6\\xef?\\x08\\xf82e\\x0b\\xbb2cy\\xda\\xf8\\xc4\\xdb\\x0e\\xba\\x93\\x9d\\xed\\x17\\xb2D\\xa3\"&\\xcbx\\xb5\\xd3t\\x8b]S\\xa1v\\xb0\\x0cU\\x87\\x9a\\xab\\xdc\\x89q&\\x13{\\x89\\xe09\\xd6!}\\xb5V\\xe3\\xb1A7g\\x14\\x911\\xabKH\\xc6\\xec&\\\\Kj9\\x8c\\x12\\x8d&\\x94<\\xbd\\xe6U\\xcfCRH\\x88\\x8c\\xfd\\xc2\\xed\\x96\\xe6\\x18F\\x19\\xb7\\xcd\\x98\\xe6s\\xfd\\x16\\x9b\\x04\\xb1\\xc3f\\xd7\\xd6\\xcfz\\n\\x99\\x8e\\x87M\\xf8\\x8e\\xb0\\xce\\xe9\\xa0\\x8d\\xa5whV\\xeaTI>FDZ\\x96\\x83\\xa9@H\\xc2\\x98\\x8bD\\xf6.\\x86\\xcd\\x7f\\x03o\\xbf0\\x91\\xfb\\xb5\\x0b\\xedW\\xfb\\xae\\x1f\\xf7(\\xff\\x00I\\n\\x16k\\xf8\\x1b}\\xf9\\x84\\x8f\\xdd\\xa8_j\\xbf\\xddp\\xff\\x00\\xb9G\\xfaHo\\xc4\\xff\\x00\\r?9\\xfaCt\\xf0z\\x80\\x00|\\xc5\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00W2\\xccM\\xdc\\x8eEl\\x98\\xd6K\\xad\\x93\\x05N)\\x0bK)p\\x94KN\\xe9\\x91\\x91\\x88\\xce\\x06\\xbc\\xf9\\xd4\\xaf\\xd5\\xed\\xf9\\x8b\\xb0\\x0fM>\\'\\x12\\x8ab\\x98\\x98\\xb4yD\\xfdan\\xa4\\xf05\\xe7\\xce\\xa5~\\xafo\\xcc8\\x1a\\xf3\\xe7R\\xbfW\\xb7\\xe6.\\xc07\\xbd\\xe2\\xf9zG\\xb0\\xa4\\xf05\\xe7\\xce\\xa5~\\xafo\\xcc8\\x1a\\xf3\\xe7R\\xbfW\\xb7\\xe6.\\xc0\\x1b\\xde/\\x97\\xa4{\\nO\\x03^|\\xeaW\\xea\\xf6\\xfc\\xc3\\x81\\xaf>u+\\xf5{~b\\xec\\x01\\xbd\\xe2\\xf9zG\\xb0\\xa4\\xf05\\xe7\\xce\\xa5~\\xafo\\xcc8\\x1a\\xf3\\xe7R\\xbfW\\xb7\\xe6.\\xc0\\x1b\\xde/\\x97\\xa4{\\nO\\x03^|\\xeaW\\xea\\xf6\\xfc\\xc3\\x81\\xaf>u+\\xf5{~b\\xec\\x01\\xbd\\xe2\\xf9zG\\xb0\\xa4\\xf05\\xe7\\xce\\xa5~\\xafo\\xcc8\\x1a\\xf3\\xe7R\\xbfW\\xb7\\xe6.\\xc0\\x1b\\xde/\\x97\\xa4{\\nO\\x03^|\\xeaW\\xea\\xf6\\xfc\\xc3\\x81\\xaf>u+\\xf5{~b\\xec\\x01\\xbd\\xe2\\xf9zG\\xb0\\xa4\\xf05\\xe7\\xce\\xa5~\\xafo\\xcc8\\x1a\\xf3\\xe7R\\xbfW\\xb7\\xe6.\\xc0\\x1b\\xde/\\x97\\xa4{\\nO\\x03^|\\xeaW\\xea\\xf6\\xfc\\xc3\\x81\\xaf>u+\\xf5{~b\\xec\\x01\\xbd\\xe2\\xf9zG\\xb0\\xa4\\xf05\\xe7\\xce\\xa5~\\xafo\\xcc8\\x1a\\xf3\\xe7R\\xbfW\\xb7\\xe6.\\xc0\\x1b\\xde/\\x97\\xa4{\\nO\\x03^|\\xeaW\\xea\\xf6\\xfc\\xc3\\x81\\xaf>u+\\xf5{~b\\xec\\x01\\xbd\\xe2\\xf9zG\\xb0\\xa4\\xf05\\xe7\\xce\\xa5~\\xafo\\xcc8\\x1a\\xf3\\xe7R\\xbfW\\xb7\\xe6.\\xc0\\x1b\\xde/\\x97\\xa4{\\nO\\x03^|\\xeaW\\xea\\xf6\\xfc\\xc3\\x81\\xaf>u+\\xf5{~b\\xec\\x01\\xbd\\xe2\\xf9zG\\xb0\\xa0\\xd8\\xec\\xe2\\xde\\xd2\\xbeL7\\xf2\\x95\\x9b\\x12\\x1aS.\\x12`6G\\xba\\xa22=\\x0f_\\xc4b\\xf3\\x19\\x82\\x8d\\x19\\xa6H\\xf7\\x89\\xb4\\x12\\x08\\xcf\\xe3\\xd0\\xb4\\x1f\\xa8\\x0eX\\x98\\xf5\\xe2\\xc4ES\\xc3\\xca#\\xe8\\\\\\x00\\x01\\xc1\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\xff\\xd9'"
+}
\ No newline at end of file
diff --git a/tests/metagpt/utils/test_mermaid.py b/tests/metagpt/utils/test_mermaid.py
index 367223332..89847994d 100644
--- a/tests/metagpt/utils/test_mermaid.py
+++ b/tests/metagpt/utils/test_mermaid.py
@@ -15,7 +15,7 @@ from metagpt.utils.mermaid import MMC1, mermaid_to_file
@pytest.mark.asyncio
@pytest.mark.parametrize("engine", ["nodejs", "ink"]) # TODO: playwright and pyppeteer
-async def test_mermaid(engine):
+async def test_mermaid(engine, mermaid_mocker):
# nodejs prerequisites: npm install -g @mermaid-js/mermaid-cli
# ink prerequisites: connected to internet
# playwright prerequisites: playwright install --with-deps chromium
diff --git a/tests/mock/mock_aiohttp.py b/tests/mock/mock_aiohttp.py
index 4690bf4b5..805c84b0c 100644
--- a/tests/mock/mock_aiohttp.py
+++ b/tests/mock/mock_aiohttp.py
@@ -10,6 +10,7 @@ class MockAioResponse:
check_funcs: dict[tuple[str, str], Callable[[dict], str]] = {}
rsp_cache: dict[str, str] = {}
name = "aiohttp"
+ status = 200
def __init__(self, session, method, url, **kwargs) -> None:
fn = self.check_funcs.get((method, url))
@@ -21,6 +22,7 @@ class MockAioResponse:
async def __aenter__(self):
if self.response:
await self.response.__aenter__()
+ self.status = self.response.status
elif self.mng:
self.response = await self.mng.__aenter__()
return self
@@ -39,3 +41,14 @@ class MockAioResponse:
data = await self.response.json(*args, **kwargs)
self.rsp_cache[self.key] = data
return data
+
+ @property
+ def content(self):
+ return self
+
+ async def read(self):
+ if self.key in self.rsp_cache:
+ return eval(self.rsp_cache[self.key])
+ data = await self.response.content.read()
+ self.rsp_cache[self.key] = str(data)
+ return data
From 5a2084cda87ba1753345e72889a6c18574092606 Mon Sep 17 00:00:00 2001
From: better629
Date: Thu, 8 Feb 2024 07:32:34 +0800
Subject: [PATCH 035/104] Revert "Feat add qianfan api support"
---
.github/workflows/fulltest.yaml | 1 +
.github/workflows/unittest.yaml | 2 +-
examples/llm_hello_world.py | 13 +-
metagpt/configs/llm_config.py | 8 +-
metagpt/provider/__init__.py | 2 -
metagpt/provider/base_llm.py | 25 +--
metagpt/provider/fireworks_api.py | 15 +-
metagpt/provider/google_gemini_api.py | 10 ++
metagpt/provider/ollama_api.py | 10 ++
metagpt/provider/open_llm_api.py | 13 +-
metagpt/provider/openai_api.py | 17 +-
metagpt/provider/qianfan_api.py | 152 ------------------
metagpt/provider/zhipuai_api.py | 10 ++
metagpt/utils/cost_manager.py | 4 +-
metagpt/utils/token_counter.py | 53 ------
requirements.txt | 1 -
tests/metagpt/provider/mock_llm_config.py | 12 --
tests/metagpt/provider/req_resp_const.py | 117 --------------
tests/metagpt/provider/test_anthropic_api.py | 12 +-
tests/metagpt/provider/test_base_llm.py | 53 +++---
tests/metagpt/provider/test_fireworks_llm.py | 76 ++++++---
.../provider/test_google_gemini_api.py | 36 +++--
tests/metagpt/provider/test_ollama_api.py | 30 ++--
tests/metagpt/provider/test_open_llm_api.py | 76 ++++++---
tests/metagpt/provider/test_qianfan_api.py | 56 -------
tests/metagpt/provider/test_spark_api.py | 44 ++---
tests/metagpt/provider/test_zhipuai_api.py | 38 +++--
tests/spark.yaml | 7 +
28 files changed, 319 insertions(+), 574 deletions(-)
delete mode 100644 metagpt/provider/qianfan_api.py
delete mode 100644 tests/metagpt/provider/req_resp_const.py
delete mode 100644 tests/metagpt/provider/test_qianfan_api.py
create mode 100644 tests/spark.yaml
diff --git a/.github/workflows/fulltest.yaml b/.github/workflows/fulltest.yaml
index 70c800481..f5c6049e1 100644
--- a/.github/workflows/fulltest.yaml
+++ b/.github/workflows/fulltest.yaml
@@ -54,6 +54,7 @@ jobs:
export ALLOW_OPENAI_API_CALL=0
echo "${{ secrets.METAGPT_KEY_YAML }}" | base64 -d > config/key.yaml
mkdir -p ~/.metagpt && echo "${{ secrets.METAGPT_CONFIG2_YAML }}" | base64 -d > ~/.metagpt/config2.yaml
+ echo "${{ secrets.SPARK_YAML }}" | base64 -d > ~/.metagpt/spark.yaml
pytest tests/ --doctest-modules --cov=./metagpt/ --cov-report=xml:cov.xml --cov-report=html:htmlcov --durations=20 | tee unittest.txt
- name: Show coverage report
run: |
diff --git a/.github/workflows/unittest.yaml b/.github/workflows/unittest.yaml
index afa9faba7..2e7e3ce2b 100644
--- a/.github/workflows/unittest.yaml
+++ b/.github/workflows/unittest.yaml
@@ -31,7 +31,7 @@ jobs:
- name: Test with pytest
run: |
export ALLOW_OPENAI_API_CALL=0
- mkdir -p ~/.metagpt && cp tests/config2.yaml ~/.metagpt/config2.yaml
+ mkdir -p ~/.metagpt && cp tests/config2.yaml ~/.metagpt/config2.yaml && cp tests/spark.yaml ~/.metagpt/spark.yaml
pytest tests/ --doctest-modules --cov=./metagpt/ --cov-report=xml:cov.xml --cov-report=html:htmlcov --durations=20 | tee unittest.txt
- name: Show coverage report
run: |
diff --git a/examples/llm_hello_world.py b/examples/llm_hello_world.py
index e22edbdf2..219a303c8 100644
--- a/examples/llm_hello_world.py
+++ b/examples/llm_hello_world.py
@@ -13,18 +13,7 @@ from metagpt.logs import logger
async def main():
llm = LLM()
- # llm type check
- id_ques = "what's your name"
- logger.info(f"{id_ques}: ")
- logger.info(await llm.aask(id_ques))
- logger.info("\n\n")
-
- logger.info(
- await llm.aask(
- "who are you", system_msgs=["act as a robot, answer 'I'am robot' if the question is 'who are you'"]
- )
- )
-
+ logger.info(await llm.aask("hello world"))
logger.info(await llm.aask_batch(["hi", "write python hello world."]))
hello_msg = [{"role": "user", "content": "count from 1 to 10. split by newline."}]
diff --git a/metagpt/configs/llm_config.py b/metagpt/configs/llm_config.py
index 1b05b5270..fb923d3e4 100644
--- a/metagpt/configs/llm_config.py
+++ b/metagpt/configs/llm_config.py
@@ -24,7 +24,6 @@ class LLMType(Enum):
METAGPT = "metagpt"
AZURE = "azure"
OLLAMA = "ollama"
- QIANFAN = "qianfan" # Baidu BCE
def __missing__(self, key):
return self.OPENAI
@@ -37,18 +36,13 @@ class LLMConfig(YamlModel):
Optional Fields in pydantic: https://docs.pydantic.dev/latest/migration/#required-optional-and-nullable-fields
"""
- api_key: str = "sk-"
+ api_key: str
api_type: LLMType = LLMType.OPENAI
base_url: str = "https://api.openai.com/v1"
api_version: Optional[str] = None
model: Optional[str] = None # also stands for DEPLOYMENT_NAME
- # For Cloud Service Provider like Baidu/ Alibaba
- access_key: Optional[str] = None
- secret_key: Optional[str] = None
- endpoint: Optional[str] = None # for self-deployed model on the cloud
-
# For Spark(Xunfei), maybe remove later
app_id: Optional[str] = None
api_secret: Optional[str] = None
diff --git a/metagpt/provider/__init__.py b/metagpt/provider/__init__.py
index 8c0aab836..675734811 100644
--- a/metagpt/provider/__init__.py
+++ b/metagpt/provider/__init__.py
@@ -16,7 +16,6 @@ from metagpt.provider.azure_openai_api import AzureOpenAILLM
from metagpt.provider.metagpt_api import MetaGPTLLM
from metagpt.provider.human_provider import HumanProvider
from metagpt.provider.spark_api import SparkLLM
-from metagpt.provider.qianfan_api import QianFanLLM
__all__ = [
"FireworksLLM",
@@ -29,5 +28,4 @@ __all__ = [
"OllamaLLM",
"HumanProvider",
"SparkLLM",
- "QianFanLLM",
]
diff --git a/metagpt/provider/base_llm.py b/metagpt/provider/base_llm.py
index 2f57b15aa..b144471b5 100644
--- a/metagpt/provider/base_llm.py
+++ b/metagpt/provider/base_llm.py
@@ -11,12 +11,11 @@ from abc import ABC, abstractmethod
from typing import Optional, Union
from openai import AsyncOpenAI
-from pydantic import BaseModel
from metagpt.configs.llm_config import LLMConfig
from metagpt.logs import logger
from metagpt.schema import Message
-from metagpt.utils.cost_manager import CostManager, Costs
+from metagpt.utils.cost_manager import CostManager
class BaseLLM(ABC):
@@ -68,28 +67,6 @@ class BaseLLM(ABC):
def _default_system_msg(self):
return self._system_msg(self.system_prompt)
- def _update_costs(self, usage: Union[dict, BaseModel], model: str = None, local_calc_usage: bool = True):
- """update each request's token cost
- Args:
- model (str): model name or in some scenarios called endpoint
- local_calc_usage (bool): some models don't calculate usage, it will overwrite LLMConfig.calc_usage
- """
- calc_usage = self.config.calc_usage and local_calc_usage
- model = model if model else self.model
- usage = usage.model_dump() if isinstance(usage, BaseModel) else usage
- if calc_usage and self.cost_manager:
- try:
- prompt_tokens = int(usage.get("prompt_tokens", 0))
- completion_tokens = int(usage.get("completion_tokens", 0))
- self.cost_manager.update_cost(prompt_tokens, completion_tokens, model)
- except Exception as e:
- logger.error(f"{self.__class__.__name__} updats costs failed! exp: {e}")
-
- def get_costs(self) -> Costs:
- if not self.cost_manager:
- return Costs(0, 0, 0, 0)
- return self.cost_manager.get_costs()
-
async def aask(
self,
msg: str,
diff --git a/metagpt/provider/fireworks_api.py b/metagpt/provider/fireworks_api.py
index e62a7066e..d56453a85 100644
--- a/metagpt/provider/fireworks_api.py
+++ b/metagpt/provider/fireworks_api.py
@@ -19,7 +19,7 @@ from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.logs import logger
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import OpenAILLM, log_and_reraise
-from metagpt.utils.cost_manager import CostManager
+from metagpt.utils.cost_manager import CostManager, Costs
MODEL_GRADE_TOKEN_COSTS = {
"-1": {"prompt": 0.0, "completion": 0.0}, # abnormal condition
@@ -81,6 +81,17 @@ class FireworksLLM(OpenAILLM):
kwargs = dict(api_key=self.config.api_key, base_url=self.config.base_url)
return kwargs
+ def _update_costs(self, usage: CompletionUsage):
+ if self.config.calc_usage and usage:
+ try:
+ # use FireworksCostManager not context.cost_manager
+ self.cost_manager.update_cost(usage.prompt_tokens, usage.completion_tokens, self.model)
+ except Exception as e:
+ logger.error(f"updating costs failed!, exp: {e}")
+
+ def get_costs(self) -> Costs:
+ return self.cost_manager.get_costs()
+
async def _achat_completion_stream(self, messages: list[dict], timeout=3) -> str:
response: AsyncStream[ChatCompletionChunk] = await self.aclient.chat.completions.create(
**self._cons_kwargs(messages), stream=True
@@ -102,7 +113,7 @@ class FireworksLLM(OpenAILLM):
usage = CompletionUsage(**chunk.usage)
full_content = "".join(collected_content)
- self._update_costs(usage.model_dump())
+ self._update_costs(usage)
return full_content
@retry(
diff --git a/metagpt/provider/google_gemini_api.py b/metagpt/provider/google_gemini_api.py
index 87ea81c80..2647ab16b 100644
--- a/metagpt/provider/google_gemini_api.py
+++ b/metagpt/provider/google_gemini_api.py
@@ -72,6 +72,16 @@ class GeminiLLM(BaseLLM):
kwargs = {"contents": messages, "generation_config": GenerationConfig(temperature=0.3), "stream": stream}
return kwargs
+ def _update_costs(self, usage: dict):
+ """update each request's token cost"""
+ if self.config.calc_usage:
+ try:
+ prompt_tokens = int(usage.get("prompt_tokens", 0))
+ completion_tokens = int(usage.get("completion_tokens", 0))
+ self.cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
+ except Exception as e:
+ logger.error(f"google gemini updats costs failed! exp: {e}")
+
def get_choice_text(self, resp: GenerateContentResponse) -> str:
return resp.text
diff --git a/metagpt/provider/ollama_api.py b/metagpt/provider/ollama_api.py
index 52e8dbe36..c9103b018 100644
--- a/metagpt/provider/ollama_api.py
+++ b/metagpt/provider/ollama_api.py
@@ -46,6 +46,16 @@ class OllamaLLM(BaseLLM):
kwargs = {"model": self.model, "messages": messages, "options": {"temperature": 0.3}, "stream": stream}
return kwargs
+ def _update_costs(self, usage: dict):
+ """update each request's token cost"""
+ if self.config.calc_usage:
+ try:
+ prompt_tokens = int(usage.get("prompt_tokens", 0))
+ completion_tokens = int(usage.get("completion_tokens", 0))
+ self._cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
+ except Exception as e:
+ logger.error(f"ollama updats costs failed! exp: {e}")
+
def get_choice_text(self, resp: dict) -> str:
"""get the resp content from llm response"""
assist_msg = resp.get("message", {})
diff --git a/metagpt/provider/open_llm_api.py b/metagpt/provider/open_llm_api.py
index 69371e379..a29b263a4 100644
--- a/metagpt/provider/open_llm_api.py
+++ b/metagpt/provider/open_llm_api.py
@@ -8,7 +8,7 @@ from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.logs import logger
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import OpenAILLM
-from metagpt.utils.cost_manager import TokenCostManager
+from metagpt.utils.cost_manager import Costs, TokenCostManager
from metagpt.utils.token_counter import count_message_tokens, count_string_tokens
@@ -34,3 +34,14 @@ class OpenLLM(OpenAILLM):
logger.error(f"usage calculation failed!: {e}")
return usage
+
+ def _update_costs(self, usage: CompletionUsage):
+ if self.config.calc_usage and usage:
+ try:
+ # use OpenLLMCostManager not CONFIG.cost_manager
+ self._cost_manager.update_cost(usage.prompt_tokens, usage.completion_tokens, self.model)
+ except Exception as e:
+ logger.error(f"updating costs failed!, exp: {e}")
+
+ def get_costs(self) -> Costs:
+ return self._cost_manager.get_costs()
diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 2ae14f437..fe41fb05f 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -29,7 +29,7 @@ from metagpt.provider.constant import GENERAL_FUNCTION_SCHEMA
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.schema import Message
from metagpt.utils.common import CodeParser, decode_image
-from metagpt.utils.cost_manager import CostManager
+from metagpt.utils.cost_manager import CostManager, Costs
from metagpt.utils.exceptions import handle_exception
from metagpt.utils.token_counter import (
count_message_tokens,
@@ -55,13 +55,16 @@ class OpenAILLM(BaseLLM):
def __init__(self, config: LLMConfig):
self.config = config
+ self._init_model()
self._init_client()
self.auto_max_tokens = False
self.cost_manager: Optional[CostManager] = None
+ def _init_model(self):
+ self.model = self.config.model # Used in _calc_usage & _cons_kwargs
+
def _init_client(self):
"""https://github.com/openai/openai-python#async-usage"""
- self.model = self.config.model # Used in _calc_usage & _cons_kwargs
kwargs = self._make_client_kwargs()
self.aclient = AsyncOpenAI(**kwargs)
@@ -237,6 +240,16 @@ class OpenAILLM(BaseLLM):
return usage
+ @handle_exception
+ def _update_costs(self, usage: CompletionUsage):
+ if self.config.calc_usage and usage and self.cost_manager:
+ self.cost_manager.update_cost(usage.prompt_tokens, usage.completion_tokens, self.model)
+
+ def get_costs(self) -> Costs:
+ if not self.cost_manager:
+ return Costs(0, 0, 0, 0)
+ return self.cost_manager.get_costs()
+
def _get_max_tokens(self, messages: list[dict]):
if not self.auto_max_tokens:
return self.config.max_token
diff --git a/metagpt/provider/qianfan_api.py b/metagpt/provider/qianfan_api.py
deleted file mode 100644
index 6f94b9cea..000000000
--- a/metagpt/provider/qianfan_api.py
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# @Desc : llm api of qianfan from Baidu, supports ERNIE(wen xin yi yan) and opensource models
-import copy
-import os
-
-import qianfan
-from qianfan import ChatCompletion
-from qianfan.resources.typing import JsonBody
-from tenacity import (
- after_log,
- retry,
- retry_if_exception_type,
- stop_after_attempt,
- wait_random_exponential,
-)
-
-from metagpt.configs.llm_config import LLMConfig, LLMType
-from metagpt.logs import log_llm_stream, logger
-from metagpt.provider.base_llm import BaseLLM
-from metagpt.provider.llm_provider_registry import register_provider
-from metagpt.provider.openai_api import log_and_reraise
-from metagpt.utils.cost_manager import CostManager
-from metagpt.utils.token_counter import (
- QianFan_EndPoint_TOKEN_COSTS,
- QianFan_MODEL_TOKEN_COSTS,
-)
-
-
-@register_provider(LLMType.QIANFAN)
-class QianFanLLM(BaseLLM):
- """
- Refs
- Auth: https://cloud.baidu.com/doc/WENXINWORKSHOP/s/3lmokh7n6#%E3%80%90%E6%8E%A8%E8%8D%90%E3%80%91%E4%BD%BF%E7%94%A8%E5%AE%89%E5%85%A8%E8%AE%A4%E8%AF%81aksk%E9%89%B4%E6%9D%83%E8%B0%83%E7%94%A8%E6%B5%81%E7%A8%8B
- Token Price: https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7#tokens%E5%90%8E%E4%BB%98%E8%B4%B9
- Models: https://cloud.baidu.com/doc/WENXINWORKSHOP/s/wlmhm7vuo#%E5%AF%B9%E8%AF%9Dchat
- https://cloud.baidu.com/doc/WENXINWORKSHOP/s/xlmokikxe#%E6%94%AF%E6%8C%81%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8
- """
-
- def __init__(self, config: LLMConfig):
- self.config = config
- self.use_system_prompt = False # only some ERNIE-x related models support system_prompt
- self.__init_qianfan()
- self.cost_manager = CostManager(token_costs=self.token_costs)
-
- def __init_qianfan(self):
- if self.config.access_key and self.config.secret_key:
- # for system level auth, use access_key and secret_key, recommended by official
- # set environment variable due to official recommendation
- os.environ.setdefault("QIANFAN_ACCESS_KEY", self.config.access_key)
- os.environ.setdefault("QIANFAN_SECRET_KEY", self.config.secret_key)
- elif self.config.api_key and self.config.secret_key:
- # for application level auth, use api_key and secret_key
- # set environment variable due to official recommendation
- os.environ.setdefault("QIANFAN_AK", self.config.api_key)
- os.environ.setdefault("QIANFAN_SK", self.config.secret_key)
- else:
- raise ValueError("Set the `access_key`&`secret_key` or `api_key`&`secret_key` first")
-
- support_system_pairs = [
- ("ERNIE-Bot-4", "completions_pro"), # (model, corresponding-endpoint)
- ("ERNIE-Bot-8k", "ernie_bot_8k"),
- ("ERNIE-Bot", "completions"),
- ("ERNIE-Bot-turbo", "eb-instant"),
- ("ERNIE-Speed", "ernie_speed"),
- ("EB-turbo-AppBuilder", "ai_apaas"),
- ]
- if self.config.model in [pair[0] for pair in support_system_pairs]:
- # only some ERNIE models support
- self.use_system_prompt = True
- if self.config.endpoint in [pair[1] for pair in support_system_pairs]:
- self.use_system_prompt = True
-
- assert not (self.config.model and self.config.endpoint), "Only set `model` or `endpoint` in the config"
- assert self.config.model or self.config.endpoint, "Should set one of `model` or `endpoint` in the config"
-
- self.token_costs = copy.deepcopy(QianFan_MODEL_TOKEN_COSTS)
- self.token_costs.update(QianFan_EndPoint_TOKEN_COSTS)
-
- # self deployed model on the cloud not to calculate usage, it charges resource pool rental fee
- self.calc_usage = self.config.calc_usage and self.config.endpoint is None
- self.aclient: ChatCompletion = qianfan.ChatCompletion()
-
- def _const_kwargs(self, messages: list[dict], stream: bool = False) -> dict:
- kwargs = {
- "messages": messages,
- "stream": stream,
- }
- if self.config.temperature > 0:
- # different model has default temperature. only set when it's specified.
- kwargs["temperature"] = self.config.temperature
- if self.config.endpoint:
- kwargs["endpoint"] = self.config.endpoint
- elif self.config.model:
- kwargs["model"] = self.config.model
-
- if self.use_system_prompt:
- # if the model support system prompt, extract and pass it
- if messages[0]["role"] == "system":
- kwargs["messages"] = messages[1:]
- kwargs["system"] = messages[0]["content"] # set system prompt here
- return kwargs
-
- def _update_costs(self, usage: dict):
- """update each request's token cost"""
- model_or_endpoint = self.config.model if self.config.model else self.config.endpoint
- local_calc_usage = True if model_or_endpoint in self.token_costs else False
- super()._update_costs(usage, model_or_endpoint, local_calc_usage)
-
- def get_choice_text(self, resp: JsonBody) -> str:
- return resp.get("result", "")
-
- def completion(self, messages: list[dict]) -> JsonBody:
- resp = self.aclient.do(**self._const_kwargs(messages=messages, stream=False))
- self._update_costs(resp.body.get("usage", {}))
- return resp.body
-
- async def _achat_completion(self, messages: list[dict]) -> JsonBody:
- resp = await self.aclient.ado(**self._const_kwargs(messages=messages, stream=False))
- self._update_costs(resp.body.get("usage", {}))
- return resp.body
-
- async def acompletion(self, messages: list[dict], timeout=3) -> JsonBody:
- return await self._achat_completion(messages)
-
- async def _achat_completion_stream(self, messages: list[dict]) -> str:
- resp = await self.aclient.ado(**self._const_kwargs(messages=messages, stream=True))
- collected_content = []
- usage = {}
- async for chunk in resp:
- content = chunk.body.get("result", "")
- usage = chunk.body.get("usage", {})
- log_llm_stream(content)
- collected_content.append(content)
- log_llm_stream("\n")
-
- self._update_costs(usage)
- full_content = "".join(collected_content)
- return full_content
-
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_random_exponential(min=1, max=60),
- after=after_log(logger, logger.level("WARNING").name),
- retry=retry_if_exception_type(ConnectionError),
- retry_error_callback=log_and_reraise,
- )
- async def acompletion_text(self, messages: list[dict], stream=False, timeout: int = 3) -> str:
- if stream:
- return await self._achat_completion_stream(messages)
- resp = await self._achat_completion(messages)
- return self.get_choice_text(resp)
diff --git a/metagpt/provider/zhipuai_api.py b/metagpt/provider/zhipuai_api.py
index 4cbee4038..9e8e5fb53 100644
--- a/metagpt/provider/zhipuai_api.py
+++ b/metagpt/provider/zhipuai_api.py
@@ -53,6 +53,16 @@ class ZhiPuAILLM(BaseLLM):
kwargs = {"model": self.model, "messages": messages, "stream": stream, "temperature": 0.3}
return kwargs
+ def _update_costs(self, usage: dict):
+ """update each request's token cost"""
+ if self.config.calc_usage:
+ try:
+ prompt_tokens = int(usage.get("prompt_tokens", 0))
+ completion_tokens = int(usage.get("completion_tokens", 0))
+ self.cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
+ except Exception as e:
+ logger.error(f"zhipuai updats costs failed! exp: {e}")
+
def completion(self, messages: list[dict], timeout=3) -> dict:
resp: Completion = self.llm.chat.completions.create(**self._const_kwargs(messages))
usage = resp.usage.model_dump()
diff --git a/metagpt/utils/cost_manager.py b/metagpt/utils/cost_manager.py
index 4e6b65b2c..c4c93f91f 100644
--- a/metagpt/utils/cost_manager.py
+++ b/metagpt/utils/cost_manager.py
@@ -29,7 +29,6 @@ class CostManager(BaseModel):
total_budget: float = 0
max_budget: float = 10.0
total_cost: float = 0
- token_costs: dict[str, dict[str, float]] = TOKEN_COSTS
def update_cost(self, prompt_tokens, completion_tokens, model):
"""
@@ -47,8 +46,7 @@ class CostManager(BaseModel):
return
cost = (
- prompt_tokens * self.token_costs[model]["prompt"]
- + completion_tokens * self.token_costs[model]["completion"]
+ prompt_tokens * TOKEN_COSTS[model]["prompt"] + completion_tokens * TOKEN_COSTS[model]["completion"]
) / 1000
self.total_cost += cost
logger.info(
diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py
index 2ec0edc99..65f5fe76f 100644
--- a/metagpt/utils/token_counter.py
+++ b/metagpt/utils/token_counter.py
@@ -38,59 +38,6 @@ TOKEN_COSTS = {
}
-"""
-QianFan Token Price https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7#tokens%E5%90%8E%E4%BB%98%E8%B4%B9
-Due to QianFan has multi price strategies, we unify `Tokens post-payment` as a statistical method.
-"""
-QianFan_MODEL_TOKEN_COSTS = {
- "ERNIE-Bot-4": {"prompt": 0.017, "completion": 0.017},
- "ERNIE-Bot-8k": {"prompt": 0.0034, "completion": 0.0067},
- "ERNIE-Bot": {"prompt": 0.017, "completion": 0.017},
- "ERNIE-Bot-turbo": {"prompt": 0.0011, "completion": 0.0011},
- "EB-turbo-AppBuilder": {"prompt": 0.0011, "completion": 0.0011},
- "ERNIE-Speed": {"prompt": 0.00056, "completion": 0.0011},
- "BLOOMZ-7B": {"prompt": 0.00056, "completion": 0.00056},
- "Llama-2-7B-Chat": {"prompt": 0.00056, "completion": 0.00056},
- "Llama-2-13B-Chat": {"prompt": 0.00084, "completion": 0.00084},
- "Llama-2-70B-Chat": {"prompt": 0.0049, "completion": 0.0049},
- "ChatGLM2-6B-32K": {"prompt": 0.00056, "completion": 0.00056},
- "AquilaChat-7B": {"prompt": 0.00056, "completion": 0.00056},
- "Mixtral-8x7B-Instruct": {"prompt": 0.0049, "completion": 0.0049},
- "SQLCoder-7B": {"prompt": 0.00056, "completion": 0.00056},
- "CodeLlama-7B-Instruct": {"prompt": 0.00056, "completion": 0.00056},
- "XuanYuan-70B-Chat-4bit": {"prompt": 0.0049, "completion": 0.0049},
- "Qianfan-BLOOMZ-7B-compressed": {"prompt": 0.00056, "completion": 0.00056},
- "Qianfan-Chinese-Llama-2-7B": {"prompt": 0.00056, "completion": 0.00056},
- "Qianfan-Chinese-Llama-2-13B": {"prompt": 0.00084, "completion": 0.00084},
- "ChatLaw": {"prompt": 0.0011, "completion": 0.0011},
- "Yi-34B-Chat": {"prompt": 0.0, "completion": 0.0},
-}
-
-QianFan_EndPoint_TOKEN_COSTS = {
- "completions_pro": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot-4"],
- "ernie_bot_8k": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot-8k"],
- "completions": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot"],
- "eb-instant": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot-turbo"],
- "ai_apaas": QianFan_MODEL_TOKEN_COSTS["EB-turbo-AppBuilder"],
- "ernie_speed": QianFan_MODEL_TOKEN_COSTS["ERNIE-Speed"],
- "bloomz_7b1": QianFan_MODEL_TOKEN_COSTS["BLOOMZ-7B"],
- "llama_2_7b": QianFan_MODEL_TOKEN_COSTS["Llama-2-7B-Chat"],
- "llama_2_13b": QianFan_MODEL_TOKEN_COSTS["Llama-2-13B-Chat"],
- "llama_2_70b": QianFan_MODEL_TOKEN_COSTS["Llama-2-70B-Chat"],
- "chatglm2_6b_32k": QianFan_MODEL_TOKEN_COSTS["ChatGLM2-6B-32K"],
- "aquilachat_7b": QianFan_MODEL_TOKEN_COSTS["AquilaChat-7B"],
- "mixtral_8x7b_instruct": QianFan_MODEL_TOKEN_COSTS["Mixtral-8x7B-Instruct"],
- "sqlcoder_7b": QianFan_MODEL_TOKEN_COSTS["SQLCoder-7B"],
- "codellama_7b_instruct": QianFan_MODEL_TOKEN_COSTS["CodeLlama-7B-Instruct"],
- "xuanyuan_70b_chat": QianFan_MODEL_TOKEN_COSTS["XuanYuan-70B-Chat-4bit"],
- "qianfan_bloomz_7b_compressed": QianFan_MODEL_TOKEN_COSTS["Qianfan-BLOOMZ-7B-compressed"],
- "qianfan_chinese_llama_2_7b": QianFan_MODEL_TOKEN_COSTS["Qianfan-Chinese-Llama-2-7B"],
- "qianfan_chinese_llama_2_13b": QianFan_MODEL_TOKEN_COSTS["Qianfan-Chinese-Llama-2-13B"],
- "chatlaw": QianFan_MODEL_TOKEN_COSTS["ChatLaw"],
- "yi_34b_chat": QianFan_MODEL_TOKEN_COSTS["Yi-34B-Chat"],
-}
-
-
TOKEN_MAX = {
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
diff --git a/requirements.txt b/requirements.txt
index b5d8d7d51..1426500ce 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -67,4 +67,3 @@ playwright>=1.26 # used at metagpt/tools/libs/web_scraping.py
anytree
ipywidgets==8.1.1
Pillow
-qianfan==0.3.1
diff --git a/tests/metagpt/provider/mock_llm_config.py b/tests/metagpt/provider/mock_llm_config.py
index e0afaa51e..e2f626a6a 100644
--- a/tests/metagpt/provider/mock_llm_config.py
+++ b/tests/metagpt/provider/mock_llm_config.py
@@ -42,15 +42,3 @@ mock_llm_config_zhipu = LLMConfig(
model="mock_zhipu_model",
proxy="http://localhost:8080",
)
-
-
-mock_llm_config_spark = LLMConfig(
- api_type="spark",
- app_id="xxx",
- api_key="xxx",
- api_secret="xxx",
- domain="generalv2",
- base_url="wss://spark-api.xf-yun.com/v3.1/chat",
-)
-
-mock_llm_config_qianfan = LLMConfig(api_type="qianfan", access_key="xxx", secret_key="xxx", model="ERNIE-Bot-turbo")
diff --git a/tests/metagpt/provider/req_resp_const.py b/tests/metagpt/provider/req_resp_const.py
deleted file mode 100644
index 73939e1c6..000000000
--- a/tests/metagpt/provider/req_resp_const.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# @Desc : default request & response data for provider unittest
-
-
-from openai.types.chat.chat_completion import (
- ChatCompletion,
- ChatCompletionMessage,
- Choice,
-)
-from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
-from openai.types.chat.chat_completion_chunk import Choice as AChoice
-from openai.types.chat.chat_completion_chunk import ChoiceDelta
-from openai.types.completion_usage import CompletionUsage
-from qianfan.resources.typing import QfResponse
-
-from metagpt.provider.base_llm import BaseLLM
-
-prompt = "who are you?"
-messages = [{"role": "user", "content": prompt}]
-
-resp_cont_tmpl = "I'm {name}"
-default_resp_cont = resp_cont_tmpl.format(name="GPT")
-
-
-# part of whole ChatCompletion of openai like structure
-def get_part_chat_completion(name: str) -> dict:
- part_chat_completion = {
- "choices": [
- {
- "index": 0,
- "message": {
- "role": "assistant",
- "content": resp_cont_tmpl.format(name=name),
- },
- "finish_reason": "stop",
- }
- ],
- "usage": {"completion_tokens": 22, "prompt_tokens": 19, "total_tokens": 41},
- }
- return part_chat_completion
-
-
-def get_openai_chat_completion(name: str) -> ChatCompletion:
- openai_chat_completion = ChatCompletion(
- id="cmpl-a6652c1bb181caae8dd19ad8",
- model="xx/xxx",
- object="chat.completion",
- created=1703300855,
- choices=[
- Choice(
- finish_reason="stop",
- index=0,
- message=ChatCompletionMessage(role="assistant", content=resp_cont_tmpl.format(name=name)),
- logprobs=None,
- )
- ],
- usage=CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202),
- )
- return openai_chat_completion
-
-
-def get_openai_chat_completion_chunk(name: str, usage_as_dict: bool = False) -> ChatCompletionChunk:
- usage = CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202)
- usage = usage if not usage_as_dict else usage.model_dump()
- openai_chat_completion_chunk = ChatCompletionChunk(
- id="cmpl-a6652c1bb181caae8dd19ad8",
- model="xx/xxx",
- object="chat.completion.chunk",
- created=1703300855,
- choices=[
- AChoice(
- delta=ChoiceDelta(role="assistant", content=resp_cont_tmpl.format(name=name)),
- finish_reason="stop",
- index=0,
- logprobs=None,
- )
- ],
- usage=usage,
- )
- return openai_chat_completion_chunk
-
-
-# For gemini
-gemini_messages = [{"role": "user", "parts": prompt}]
-
-
-# For QianFan
-qf_jsonbody_dict = {
- "id": "as-4v1h587fyv",
- "object": "chat.completion",
- "created": 1695021339,
- "result": "",
- "is_truncated": False,
- "need_clear_history": False,
- "usage": {"prompt_tokens": 7, "completion_tokens": 15, "total_tokens": 22},
-}
-
-
-def get_qianfan_response(name: str) -> QfResponse:
- qf_jsonbody_dict["result"] = resp_cont_tmpl.format(name=name)
- return QfResponse(code=200, body=qf_jsonbody_dict)
-
-
-# For llm general chat functions call
-async def llm_general_chat_funcs_test(llm: BaseLLM, prompt: str, messages: list[dict], resp_cont: str):
- resp = await llm.aask(prompt, stream=False)
- assert resp == resp_cont
-
- resp = await llm.aask(prompt)
- assert resp == resp_cont
-
- resp = await llm.acompletion_text(messages, stream=False)
- assert resp == resp_cont
-
- resp = await llm.acompletion_text(messages, stream=True)
- assert resp == resp_cont
diff --git a/tests/metagpt/provider/test_anthropic_api.py b/tests/metagpt/provider/test_anthropic_api.py
index 93cfd7dbc..6962ab064 100644
--- a/tests/metagpt/provider/test_anthropic_api.py
+++ b/tests/metagpt/provider/test_anthropic_api.py
@@ -8,25 +8,25 @@ from anthropic.resources.completions import Completion
from metagpt.provider.anthropic_api import Claude2
from tests.metagpt.provider.mock_llm_config import mock_llm_config
-from tests.metagpt.provider.req_resp_const import prompt, resp_cont_tmpl
-resp_cont = resp_cont_tmpl.format(name="Claude")
+prompt = "who are you"
+resp = "I'am Claude2"
def mock_anthropic_completions_create(self, model: str, prompt: str, max_tokens_to_sample: int) -> Completion:
- return Completion(id="xx", completion=resp_cont, model="claude-2", stop_reason="stop_sequence", type="completion")
+ return Completion(id="xx", completion=resp, model="claude-2", stop_reason="stop_sequence", type="completion")
async def mock_anthropic_acompletions_create(self, model: str, prompt: str, max_tokens_to_sample: int) -> Completion:
- return Completion(id="xx", completion=resp_cont, model="claude-2", stop_reason="stop_sequence", type="completion")
+ return Completion(id="xx", completion=resp, model="claude-2", stop_reason="stop_sequence", type="completion")
def test_claude2_ask(mocker):
mocker.patch("anthropic.resources.completions.Completions.create", mock_anthropic_completions_create)
- assert resp_cont == Claude2(mock_llm_config).ask(prompt)
+ assert resp == Claude2(mock_llm_config).ask(prompt)
@pytest.mark.asyncio
async def test_claude2_aask(mocker):
mocker.patch("anthropic.resources.completions.AsyncCompletions.create", mock_anthropic_acompletions_create)
- assert resp_cont == await Claude2(mock_llm_config).aask(prompt)
+ assert resp == await Claude2(mock_llm_config).aask(prompt)
diff --git a/tests/metagpt/provider/test_base_llm.py b/tests/metagpt/provider/test_base_llm.py
index cf44343bc..cc781f78a 100644
--- a/tests/metagpt/provider/test_base_llm.py
+++ b/tests/metagpt/provider/test_base_llm.py
@@ -11,13 +11,21 @@ import pytest
from metagpt.configs.llm_config import LLMConfig
from metagpt.provider.base_llm import BaseLLM
from metagpt.schema import Message
-from tests.metagpt.provider.req_resp_const import (
- default_resp_cont,
- get_part_chat_completion,
- prompt,
-)
-name = "GPT"
+default_chat_resp = {
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "I'am GPT",
+ },
+ "finish_reason": "stop",
+ }
+ ]
+}
+prompt_msg = "who are you"
+resp_content = default_chat_resp["choices"][0]["message"]["content"]
class MockBaseLLM(BaseLLM):
@@ -25,13 +33,16 @@ class MockBaseLLM(BaseLLM):
pass
def completion(self, messages: list[dict], timeout=3):
- return get_part_chat_completion(name)
+ return default_chat_resp
async def acompletion(self, messages: list[dict], timeout=3):
- return get_part_chat_completion(name)
+ return default_chat_resp
async def acompletion_text(self, messages: list[dict], stream=False, timeout=3) -> str:
- return default_resp_cont
+ return resp_content
+
+ async def close(self):
+ return default_chat_resp
def test_base_llm():
@@ -75,25 +86,25 @@ def test_base_llm():
choice_text = base_llm.get_choice_text(openai_funccall_resp)
assert choice_text == openai_funccall_resp["choices"][0]["message"]["content"]
- # resp = base_llm.ask(prompt)
- # assert resp == default_resp_cont
+ # resp = base_llm.ask(prompt_msg)
+ # assert resp == resp_content
- # resp = base_llm.ask_batch([prompt])
- # assert resp == default_resp_cont
+ # resp = base_llm.ask_batch([prompt_msg])
+ # assert resp == resp_content
- # resp = base_llm.ask_code([prompt])
- # assert resp == default_resp_cont
+ # resp = base_llm.ask_code([prompt_msg])
+ # assert resp == resp_content
@pytest.mark.asyncio
async def test_async_base_llm():
base_llm = MockBaseLLM()
- resp = await base_llm.aask(prompt)
- assert resp == default_resp_cont
+ resp = await base_llm.aask(prompt_msg)
+ assert resp == resp_content
- resp = await base_llm.aask_batch([prompt])
- assert resp == default_resp_cont
+ resp = await base_llm.aask_batch([prompt_msg])
+ assert resp == resp_content
- # resp = await base_llm.aask_code([prompt])
- # assert resp == default_resp_cont
+ # resp = await base_llm.aask_code([prompt_msg])
+ # assert resp == resp_content
diff --git a/tests/metagpt/provider/test_fireworks_llm.py b/tests/metagpt/provider/test_fireworks_llm.py
index 1c1aa9caa..66b55e5b2 100644
--- a/tests/metagpt/provider/test_fireworks_llm.py
+++ b/tests/metagpt/provider/test_fireworks_llm.py
@@ -3,7 +3,14 @@
# @Desc : the unittest of fireworks api
import pytest
+from openai.types.chat.chat_completion import (
+ ChatCompletion,
+ ChatCompletionMessage,
+ Choice,
+)
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
+from openai.types.chat.chat_completion_chunk import Choice as AChoice
+from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.completion_usage import CompletionUsage
from metagpt.provider.fireworks_api import (
@@ -13,19 +20,42 @@ from metagpt.provider.fireworks_api import (
)
from metagpt.utils.cost_manager import Costs
from tests.metagpt.provider.mock_llm_config import mock_llm_config
-from tests.metagpt.provider.req_resp_const import (
- get_openai_chat_completion,
- get_openai_chat_completion_chunk,
- llm_general_chat_funcs_test,
- messages,
- prompt,
- resp_cont_tmpl,
+
+resp_content = "I'm fireworks"
+default_resp = ChatCompletion(
+ id="cmpl-a6652c1bb181caae8dd19ad8",
+ model="accounts/fireworks/models/llama-v2-13b-chat",
+ object="chat.completion",
+ created=1703300855,
+ choices=[
+ Choice(
+ finish_reason="stop",
+ index=0,
+ message=ChatCompletionMessage(role="assistant", content=resp_content),
+ logprobs=None,
+ )
+ ],
+ usage=CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202),
)
-name = "fireworks"
-resp_cont = resp_cont_tmpl.format(name=name)
-default_resp = get_openai_chat_completion(name)
-default_resp_chunk = get_openai_chat_completion_chunk(name, usage_as_dict=True)
+default_resp_chunk = ChatCompletionChunk(
+ id=default_resp.id,
+ model=default_resp.model,
+ object="chat.completion.chunk",
+ created=default_resp.created,
+ choices=[
+ AChoice(
+ delta=ChoiceDelta(content=resp_content, role="assistant"),
+ finish_reason="stop",
+ index=0,
+ logprobs=None,
+ )
+ ],
+ usage=dict(default_resp.usage),
+)
+
+prompt_msg = "who are you"
+messages = [{"role": "user", "content": prompt_msg}]
def test_fireworks_costmanager():
@@ -58,17 +88,27 @@ async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs)
async def test_fireworks_acompletion(mocker):
mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
- fireworks_llm = FireworksLLM(mock_llm_config)
- fireworks_llm.model = "llama-v2-13b-chat"
+ fireworks_gpt = FireworksLLM(mock_llm_config)
+ fireworks_gpt.model = "llama-v2-13b-chat"
- fireworks_llm._update_costs(
+ fireworks_gpt._update_costs(
usage=CompletionUsage(prompt_tokens=500000, completion_tokens=500000, total_tokens=1000000)
)
- assert fireworks_llm.get_costs() == Costs(
+ assert fireworks_gpt.get_costs() == Costs(
total_prompt_tokens=500000, total_completion_tokens=500000, total_cost=0.5, total_budget=0
)
- resp = await fireworks_llm.acompletion(messages)
- assert resp.choices[0].message.content in resp_cont
+ resp = await fireworks_gpt.acompletion(messages)
+ assert resp.choices[0].message.content in resp_content
- await llm_general_chat_funcs_test(fireworks_llm, prompt, messages, resp_cont)
+ resp = await fireworks_gpt.aask(prompt_msg, stream=False)
+ assert resp == resp_content
+
+ resp = await fireworks_gpt.acompletion_text(messages, stream=False)
+ assert resp == resp_content
+
+ resp = await fireworks_gpt.acompletion_text(messages, stream=True)
+ assert resp == resp_content
+
+ resp = await fireworks_gpt.aask(prompt_msg)
+ assert resp == resp_content
diff --git a/tests/metagpt/provider/test_google_gemini_api.py b/tests/metagpt/provider/test_google_gemini_api.py
index 50c15ee19..404ae1e90 100644
--- a/tests/metagpt/provider/test_google_gemini_api.py
+++ b/tests/metagpt/provider/test_google_gemini_api.py
@@ -11,12 +11,6 @@ from google.generativeai.types import content_types
from metagpt.provider.google_gemini_api import GeminiLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config
-from tests.metagpt.provider.req_resp_const import (
- gemini_messages,
- llm_general_chat_funcs_test,
- prompt,
- resp_cont_tmpl,
-)
@dataclass
@@ -24,8 +18,10 @@ class MockGeminiResponse(ABC):
text: str
-resp_cont = resp_cont_tmpl.format(name="gemini")
-default_resp = MockGeminiResponse(text=resp_cont)
+prompt_msg = "who are you"
+messages = [{"role": "user", "parts": prompt_msg}]
+resp_content = "I'm gemini from google"
+default_resp = MockGeminiResponse(text=resp_content)
def mock_gemini_count_tokens(self, contents: content_types.ContentsType) -> glm.CountTokensResponse:
@@ -64,18 +60,28 @@ async def test_gemini_acompletion(mocker):
mock_gemini_generate_content_async,
)
- gemini_llm = GeminiLLM(mock_llm_config)
+ gemini_gpt = GeminiLLM(mock_llm_config)
- assert gemini_llm._user_msg(prompt) == {"role": "user", "parts": [prompt]}
- assert gemini_llm._assistant_msg(prompt) == {"role": "model", "parts": [prompt]}
+ assert gemini_gpt._user_msg(prompt_msg) == {"role": "user", "parts": [prompt_msg]}
+ assert gemini_gpt._assistant_msg(prompt_msg) == {"role": "model", "parts": [prompt_msg]}
- usage = gemini_llm.get_usage(gemini_messages, resp_cont)
+ usage = gemini_gpt.get_usage(messages, resp_content)
assert usage == {"prompt_tokens": 20, "completion_tokens": 20}
- resp = gemini_llm.completion(gemini_messages)
+ resp = gemini_gpt.completion(messages)
assert resp == default_resp
- resp = await gemini_llm.acompletion(gemini_messages)
+ resp = await gemini_gpt.acompletion(messages)
assert resp.text == default_resp.text
- await llm_general_chat_funcs_test(gemini_llm, prompt, gemini_messages, resp_cont)
+ resp = await gemini_gpt.aask(prompt_msg, stream=False)
+ assert resp == resp_content
+
+ resp = await gemini_gpt.acompletion_text(messages, stream=False)
+ assert resp == resp_content
+
+ resp = await gemini_gpt.acompletion_text(messages, stream=True)
+ assert resp == resp_content
+
+ resp = await gemini_gpt.aask(prompt_msg)
+ assert resp == resp_content
diff --git a/tests/metagpt/provider/test_ollama_api.py b/tests/metagpt/provider/test_ollama_api.py
index af2e929e9..5d942598b 100644
--- a/tests/metagpt/provider/test_ollama_api.py
+++ b/tests/metagpt/provider/test_ollama_api.py
@@ -9,15 +9,12 @@ import pytest
from metagpt.provider.ollama_api import OllamaLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config
-from tests.metagpt.provider.req_resp_const import (
- llm_general_chat_funcs_test,
- messages,
- prompt,
- resp_cont_tmpl,
-)
-resp_cont = resp_cont_tmpl.format(name="ollama")
-default_resp = {"message": {"role": "assistant", "content": resp_cont}}
+prompt_msg = "who are you"
+messages = [{"role": "user", "content": prompt_msg}]
+
+resp_content = "I'm ollama"
+default_resp = {"message": {"role": "assistant", "content": resp_content}}
async def mock_ollama_arequest(self, stream: bool = False, **kwargs) -> Tuple[Any, Any, bool]:
@@ -44,12 +41,19 @@ async def mock_ollama_arequest(self, stream: bool = False, **kwargs) -> Tuple[An
async def test_gemini_acompletion(mocker):
mocker.patch("metagpt.provider.general_api_requestor.GeneralAPIRequestor.arequest", mock_ollama_arequest)
- ollama_llm = OllamaLLM(mock_llm_config)
+ ollama_gpt = OllamaLLM(mock_llm_config)
- resp = await ollama_llm.acompletion(messages)
+ resp = await ollama_gpt.acompletion(messages)
assert resp["message"]["content"] == default_resp["message"]["content"]
- resp = await ollama_llm.aask(prompt, stream=False)
- assert resp == resp_cont
+ resp = await ollama_gpt.aask(prompt_msg, stream=False)
+ assert resp == resp_content
- await llm_general_chat_funcs_test(ollama_llm, prompt, messages, resp_cont)
+ resp = await ollama_gpt.acompletion_text(messages, stream=False)
+ assert resp == resp_content
+
+ resp = await ollama_gpt.acompletion_text(messages, stream=True)
+ assert resp == resp_content
+
+ resp = await ollama_gpt.aask(prompt_msg)
+ assert resp == resp_content
diff --git a/tests/metagpt/provider/test_open_llm_api.py b/tests/metagpt/provider/test_open_llm_api.py
index aa38b95a6..fc7b510cc 100644
--- a/tests/metagpt/provider/test_open_llm_api.py
+++ b/tests/metagpt/provider/test_open_llm_api.py
@@ -3,26 +3,53 @@
# @Desc :
import pytest
+from openai.types.chat.chat_completion import (
+ ChatCompletion,
+ ChatCompletionMessage,
+ Choice,
+)
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
+from openai.types.chat.chat_completion_chunk import Choice as AChoice
+from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.completion_usage import CompletionUsage
from metagpt.provider.open_llm_api import OpenLLM
-from metagpt.utils.cost_manager import CostManager, Costs
+from metagpt.utils.cost_manager import Costs
from tests.metagpt.provider.mock_llm_config import mock_llm_config
-from tests.metagpt.provider.req_resp_const import (
- get_openai_chat_completion,
- get_openai_chat_completion_chunk,
- llm_general_chat_funcs_test,
- messages,
- prompt,
- resp_cont_tmpl,
+
+resp_content = "I'm llama2"
+default_resp = ChatCompletion(
+ id="cmpl-a6652c1bb181caae8dd19ad8",
+ model="llama-v2-13b-chat",
+ object="chat.completion",
+ created=1703302755,
+ choices=[
+ Choice(
+ finish_reason="stop",
+ index=0,
+ message=ChatCompletionMessage(role="assistant", content=resp_content),
+ logprobs=None,
+ )
+ ],
)
-name = "llama2-7b"
-resp_cont = resp_cont_tmpl.format(name=name)
-default_resp = get_openai_chat_completion(name)
+default_resp_chunk = ChatCompletionChunk(
+ id=default_resp.id,
+ model=default_resp.model,
+ object="chat.completion.chunk",
+ created=default_resp.created,
+ choices=[
+ AChoice(
+ delta=ChoiceDelta(content=resp_content, role="assistant"),
+ finish_reason="stop",
+ index=0,
+ logprobs=None,
+ )
+ ],
+)
-default_resp_chunk = get_openai_chat_completion_chunk(name)
+prompt_msg = "who are you"
+messages = [{"role": "user", "content": prompt_msg}]
async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs) -> ChatCompletionChunk:
@@ -41,16 +68,25 @@ async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs)
async def test_openllm_acompletion(mocker):
mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
- openllm_llm = OpenLLM(mock_llm_config)
- openllm_llm.model = "llama-v2-13b-chat"
+ openllm_gpt = OpenLLM(mock_llm_config)
+ openllm_gpt.model = "llama-v2-13b-chat"
- openllm_llm.cost_manager = CostManager()
- openllm_llm._update_costs(usage=CompletionUsage(prompt_tokens=100, completion_tokens=100, total_tokens=200))
- assert openllm_llm.get_costs() == Costs(
+ openllm_gpt._update_costs(usage=CompletionUsage(prompt_tokens=100, completion_tokens=100, total_tokens=200))
+ assert openllm_gpt.get_costs() == Costs(
total_prompt_tokens=100, total_completion_tokens=100, total_cost=0, total_budget=0
)
- resp = await openllm_llm.acompletion(messages)
- assert resp.choices[0].message.content in resp_cont
+ resp = await openllm_gpt.acompletion(messages)
+ assert resp.choices[0].message.content in resp_content
- await llm_general_chat_funcs_test(openllm_llm, prompt, messages, resp_cont)
+ resp = await openllm_gpt.aask(prompt_msg, stream=False)
+ assert resp == resp_content
+
+ resp = await openllm_gpt.acompletion_text(messages, stream=False)
+ assert resp == resp_content
+
+ resp = await openllm_gpt.acompletion_text(messages, stream=True)
+ assert resp == resp_content
+
+ resp = await openllm_gpt.aask(prompt_msg)
+ assert resp == resp_content
diff --git a/tests/metagpt/provider/test_qianfan_api.py b/tests/metagpt/provider/test_qianfan_api.py
deleted file mode 100644
index 28341425c..000000000
--- a/tests/metagpt/provider/test_qianfan_api.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# @Desc : the unittest of qianfan api
-
-from typing import AsyncIterator, Union
-
-import pytest
-from qianfan.resources.typing import JsonBody, QfResponse
-
-from metagpt.provider.qianfan_api import QianFanLLM
-from tests.metagpt.provider.mock_llm_config import mock_llm_config_qianfan
-from tests.metagpt.provider.req_resp_const import (
- get_qianfan_response,
- llm_general_chat_funcs_test,
- messages,
- prompt,
- resp_cont_tmpl,
-)
-
-name = "ERNIE-Bot-turbo"
-resp_cont = resp_cont_tmpl.format(name=name)
-
-
-def mock_qianfan_do(self, messages: list[dict], model: str, stream: bool = False, system: str = None) -> QfResponse:
- return get_qianfan_response(name=name)
-
-
-async def mock_qianfan_ado(
- self, messages: list[dict], model: str, stream: bool = True, system: str = None
-) -> Union[QfResponse, AsyncIterator[QfResponse]]:
- resps = [get_qianfan_response(name=name)]
- if stream:
-
- async def aresp_iterator(resps: list[JsonBody]):
- for resp in resps:
- yield resp
-
- return aresp_iterator(resps)
- else:
- return resps[0]
-
-
-@pytest.mark.asyncio
-async def test_qianfan_acompletion(mocker):
- mocker.patch("qianfan.resources.llm.chat_completion.ChatCompletion.do", mock_qianfan_do)
- mocker.patch("qianfan.resources.llm.chat_completion.ChatCompletion.ado", mock_qianfan_ado)
-
- qianfan_llm = QianFanLLM(mock_llm_config_qianfan)
-
- resp = qianfan_llm.completion(messages)
- assert resp.get("result") == resp_cont
-
- resp = await qianfan_llm.acompletion(messages)
- assert resp.get("result") == resp_cont
-
- await llm_general_chat_funcs_test(qianfan_llm, prompt, messages, resp_cont)
diff --git a/tests/metagpt/provider/test_spark_api.py b/tests/metagpt/provider/test_spark_api.py
index 9c278267d..f5a6f66fd 100644
--- a/tests/metagpt/provider/test_spark_api.py
+++ b/tests/metagpt/provider/test_spark_api.py
@@ -4,18 +4,12 @@
import pytest
+from metagpt.config2 import Config
from metagpt.provider.spark_api import GetMessageFromWeb, SparkLLM
-from tests.metagpt.provider.mock_llm_config import (
- mock_llm_config,
- mock_llm_config_spark,
-)
-from tests.metagpt.provider.req_resp_const import (
- llm_general_chat_funcs_test,
- prompt,
- resp_cont_tmpl,
-)
+from tests.metagpt.provider.mock_llm_config import mock_llm_config
-resp_cont = resp_cont_tmpl.format(name="Spark")
+prompt_msg = "who are you"
+resp_content = "I'm Spark"
class MockWebSocketApp(object):
@@ -29,7 +23,7 @@ class MockWebSocketApp(object):
def test_get_msg_from_web(mocker):
mocker.patch("websocket.WebSocketApp", MockWebSocketApp)
- get_msg_from_web = GetMessageFromWeb(prompt, mock_llm_config)
+ get_msg_from_web = GetMessageFromWeb(prompt_msg, mock_llm_config)
assert get_msg_from_web.gen_params()["parameter"]["chat"]["domain"] == "mock_domain"
ret = get_msg_from_web.run()
@@ -37,26 +31,34 @@ def test_get_msg_from_web(mocker):
def mock_spark_get_msg_from_web_run(self) -> str:
- return resp_cont
+ return resp_content
@pytest.mark.asyncio
-async def test_spark_aask(mocker):
- mocker.patch("metagpt.provider.spark_api.GetMessageFromWeb.run", mock_spark_get_msg_from_web_run)
-
- llm = SparkLLM(mock_llm_config_spark)
+async def test_spark_aask():
+ llm = SparkLLM(Config.from_home("spark.yaml").llm)
resp = await llm.aask("Hello!")
- assert resp == resp_cont
+ print(resp)
@pytest.mark.asyncio
async def test_spark_acompletion(mocker):
mocker.patch("metagpt.provider.spark_api.GetMessageFromWeb.run", mock_spark_get_msg_from_web_run)
- spark_llm = SparkLLM(mock_llm_config)
+ spark_gpt = SparkLLM(mock_llm_config)
- resp = await spark_llm.acompletion([])
- assert resp == resp_cont
+ resp = await spark_gpt.acompletion([])
+ assert resp == resp_content
- await llm_general_chat_funcs_test(spark_llm, prompt, prompt, resp_cont)
+ resp = await spark_gpt.aask(prompt_msg, stream=False)
+ assert resp == resp_content
+
+ resp = await spark_gpt.acompletion_text([], stream=False)
+ assert resp == resp_content
+
+ resp = await spark_gpt.acompletion_text([], stream=True)
+ assert resp == resp_content
+
+ resp = await spark_gpt.aask(prompt_msg)
+ assert resp == resp_content
diff --git a/tests/metagpt/provider/test_zhipuai_api.py b/tests/metagpt/provider/test_zhipuai_api.py
index c51010122..ad2ececa2 100644
--- a/tests/metagpt/provider/test_zhipuai_api.py
+++ b/tests/metagpt/provider/test_zhipuai_api.py
@@ -6,24 +6,22 @@ import pytest
from metagpt.provider.zhipuai_api import ZhiPuAILLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_zhipu
-from tests.metagpt.provider.req_resp_const import (
- get_part_chat_completion,
- llm_general_chat_funcs_test,
- messages,
- prompt,
- resp_cont_tmpl,
-)
-name = "ChatGLM-4"
-resp_cont = resp_cont_tmpl.format(name=name)
-default_resp = get_part_chat_completion(name)
+prompt_msg = "who are you"
+messages = [{"role": "user", "content": prompt_msg}]
+
+resp_content = "I'm chatglm-turbo"
+default_resp = {
+ "choices": [{"finish_reason": "stop", "index": 0, "message": {"content": resp_content, "role": "assistant"}}],
+ "usage": {"completion_tokens": 22, "prompt_tokens": 19, "total_tokens": 41},
+}
async def mock_zhipuai_acreate_stream(self, **kwargs):
class MockResponse(object):
async def _aread(self):
class Iterator(object):
- events = [{"choices": [{"index": 0, "delta": {"content": resp_cont, "role": "assistant"}}]}]
+ events = [{"choices": [{"index": 0, "delta": {"content": resp_content, "role": "assistant"}}]}]
async def __aiter__(self):
for event in self.events:
@@ -48,12 +46,22 @@ async def test_zhipuai_acompletion(mocker):
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.acreate", mock_zhipuai_acreate)
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.acreate_stream", mock_zhipuai_acreate_stream)
- zhipu_llm = ZhiPuAILLM(mock_llm_config_zhipu)
+ zhipu_gpt = ZhiPuAILLM(mock_llm_config_zhipu)
- resp = await zhipu_llm.acompletion(messages)
- assert resp["choices"][0]["message"]["content"] == resp_cont
+ resp = await zhipu_gpt.acompletion(messages)
+ assert resp["choices"][0]["message"]["content"] == resp_content
- await llm_general_chat_funcs_test(zhipu_llm, prompt, messages, resp_cont)
+ resp = await zhipu_gpt.aask(prompt_msg, stream=False)
+ assert resp == resp_content
+
+ resp = await zhipu_gpt.acompletion_text(messages, stream=False)
+ assert resp == resp_content
+
+ resp = await zhipu_gpt.acompletion_text(messages, stream=True)
+ assert resp == resp_content
+
+ resp = await zhipu_gpt.aask(prompt_msg)
+ assert resp == resp_content
def test_zhipuai_proxy():
diff --git a/tests/spark.yaml b/tests/spark.yaml
new file mode 100644
index 000000000..a5bbd98bd
--- /dev/null
+++ b/tests/spark.yaml
@@ -0,0 +1,7 @@
+llm:
+ api_type: "spark"
+ app_id: "xxx"
+ api_key: "xxx"
+ api_secret: "xxx"
+ domain: "generalv2"
+ base_url: "wss://spark-api.xf-yun.com/v3.1/chat"
\ No newline at end of file
From b0b5c3d1c523295f10ad06e2edfe86f9cdd4c68a Mon Sep 17 00:00:00 2001
From: yzlin
Date: Thu, 8 Feb 2024 11:19:36 +0800
Subject: [PATCH 036/104] update cache
---
tests/data/rsp_cache.json | 34 +++++++++++++++-------------------
1 file changed, 15 insertions(+), 19 deletions(-)
diff --git a/tests/data/rsp_cache.json b/tests/data/rsp_cache.json
index 75fc9ceb2..26a3bd494 100644
--- a/tests/data/rsp_cache.json
+++ b/tests/data/rsp_cache.json
@@ -355,7 +355,7 @@
"code": "print(data.head())"
},
"\n # Context:\n user: run analysis on sklearn iris dataset\n # Task:\n Based on the context, write a plan or modify an existing plan of what you should do to achieve the goal. A plan consists of one to 5 tasks.\n If you are modifying an existing plan, carefully follow the instruction, don't make unnecessary changes. Give the whole plan unless instructed to modify only one task of the plan.\n If you encounter errors on the current task, revise and output the current single task only.\n Output a list of jsons following the format:\n ```json\n [\n {\n \"task_id\": str = \"unique identifier for a task in plan, can be an ordinal\",\n \"dependent_task_ids\": list[str] = \"ids of tasks prerequisite to this task\",\n \"instruction\": \"what you should do in this task, one short phrase or sentence\",\n },\n ...\n ]\n ```\n ": "```json\n[\n {\n \"task_id\": \"1\",\n \"dependent_task_ids\": [],\n \"instruction\": \"Import the Iris dataset from sklearn.datasets\"\n },\n {\n \"task_id\": \"2\",\n \"dependent_task_ids\": [\"1\"],\n \"instruction\": \"Perform exploratory data analysis to understand the dataset\"\n },\n {\n \"task_id\": \"3\",\n \"dependent_task_ids\": [\"2\"],\n \"instruction\": \"Preprocess the data to prepare it for modeling\"\n },\n {\n \"task_id\": \"4\",\n \"dependent_task_ids\": [\"3\"],\n \"instruction\": \"Split the dataset into training and testing sets\"\n },\n {\n \"task_id\": \"5\",\n \"dependent_task_ids\": [\"4\"],\n \"instruction\": \"Train a classifier using the training set and evaluate it using the test set\"\n }\n]\n```",
- "[{\"role\": \"user\", \"content\": \"\\nPlease assign a task type to each task in the list below from the given categories:\\nTask 1: Import the Iris dataset from sklearn.datasets\\nTask 2: Perform exploratory data analysis to understand the dataset\\nTask 3: Preprocess the data to prepare it for modeling\\nTask 4: Split the dataset into training and testing sets\\nTask 5: Train a classifier using the training set and evaluate it using the test set\\n\\n## All Task Type:\\n- **eda**: For performing exploratory data analysis\\n- **data_preprocess**: Only for changing value inplace.\\n- **feature_engineering**: Only for creating new columns for input data.\\n- **model_train**: Only for training model.\\n- **model_evaluate**: Only for evaluating model.\\n- **stable_diffusion**: Related to text2image, image2image using stable diffusion model.\\n- **image2webpage**: For converting image into webpage code.\\n- **web_scraping**: For scraping data from web pages.\\n- **other**: Any tools not in the defined categories\\n\"}]": {
+ "[{\"role\": \"user\", \"content\": \"\\nPlease assign a task type to each task in the list below from the given categories:\\nTask 1: Import the Iris dataset from sklearn.datasets\\nTask 2: Perform exploratory data analysis to understand the dataset\\nTask 3: Preprocess the data to prepare it for modeling\\nTask 4: Split the dataset into training and testing sets\\nTask 5: Train a classifier using the training set and evaluate it using the test set\\n\\n## All Task Type:\\n- **eda**: For performing exploratory data analysis\\n- **data_preprocess**: Only for changing value inplace.\\n- **email_login**: For logging to an email.\\n- **feature_engineering**: Only for creating new columns for input data.\\n- **model_train**: Only for training model.\\n- **model_evaluate**: Only for evaluating model.\\n- **stable_diffusion**: Related to text2image, image2image using stable diffusion model.\\n- **image2webpage**: For converting image into webpage code.\\n- **web_scraping**: For scraping data from web pages.\\n- **other**: Any tools not in the defined categories\\n\"}]": {
"task_type": [
"other",
"eda",
@@ -366,34 +366,30 @@
]
},
"\n # Context:\n user: \n## User Requirement\nRun data analysis on sklearn Iris dataset, include a plot\n## Context\n\n## Current Plan\n[]\n## Current Task\n{}\n\n # Task:\n Based on the context, write a plan or modify an existing plan of what you should do to achieve the goal. A plan consists of one to 3 tasks.\n If you are modifying an existing plan, carefully follow the instruction, don't make unnecessary changes. Give the whole plan unless instructed to modify only one task of the plan.\n If you encounter errors on the current task, revise and output the current single task only.\n Output a list of jsons following the format:\n ```json\n [\n {\n \"task_id\": str = \"unique identifier for a task in plan, can be an ordinal\",\n \"dependent_task_ids\": list[str] = \"ids of tasks prerequisite to this task\",\n \"instruction\": \"what you should do in this task, one short phrase or sentence\",\n },\n ...\n ]\n ```\n ": "```json\n[\n {\n \"task_id\": \"1\",\n \"dependent_task_ids\": [],\n \"instruction\": \"Load the sklearn Iris dataset.\"\n },\n {\n \"task_id\": \"2\",\n \"dependent_task_ids\": [\"1\"],\n \"instruction\": \"Perform exploratory data analysis on the Iris dataset.\"\n },\n {\n \"task_id\": \"3\",\n \"dependent_task_ids\": [\"2\"],\n \"instruction\": \"Create a plot visualizing the Iris dataset.\"\n }\n]\n```",
- "[{\"role\": \"user\", \"content\": \"\\nPlease assign a task type to each task in the list below from the given categories:\\nTask 1: Load the sklearn Iris dataset.\\nTask 2: Perform exploratory data analysis on the Iris dataset.\\nTask 3: Create a plot visualizing the Iris dataset.\\n\\n## All Task Type:\\n- **eda**: For performing exploratory data analysis\\n- **data_preprocess**: Only for changing value inplace.\\n- **feature_engineering**: Only for creating new columns for input data.\\n- **model_train**: Only for training model.\\n- **model_evaluate**: Only for evaluating model.\\n- **stable_diffusion**: Related to text2image, image2image using stable diffusion model.\\n- **image2webpage**: For converting image into webpage code.\\n- **web_scraping**: For scraping data from web pages.\\n- **other**: Any tools not in the defined categories\\n\"}]": {
+ "[{\"role\": \"user\", \"content\": \"\\nPlease assign a task type to each task in the list below from the given categories:\\nTask 1: Load the sklearn Iris dataset.\\nTask 2: Perform exploratory data analysis on the Iris dataset.\\nTask 3: Create a plot visualizing the Iris dataset.\\n\\n## All Task Type:\\n- **eda**: For performing exploratory data analysis\\n- **data_preprocess**: Only for changing value inplace.\\n- **email_login**: For logging to an email.\\n- **feature_engineering**: Only for creating new columns for input data.\\n- **model_train**: Only for training model.\\n- **model_evaluate**: Only for evaluating model.\\n- **stable_diffusion**: Related to text2image, image2image using stable diffusion model.\\n- **image2webpage**: For converting image into webpage code.\\n- **web_scraping**: For scraping data from web pages.\\n- **other**: Any tools not in the defined categories\\n\"}]": {
"task_type": [
- "other",
+ "data_preprocess",
"eda",
"other"
]
},
- "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"1\\\",\\\"dependent_task_ids\\\":[],\\\"instruction\\\":\\\"Load the sklearn Iris dataset.\\\",\\\"task_type\\\":\\\"other\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": {
- "code": "from sklearn.datasets import load_iris\niris_data = load_iris()"
+ "[{\"role\": \"user\", \"content\": \"\\n## User Requirement:\\nLoad the sklearn Iris dataset.\\n\\n## Task\\nRecommend up to five tools from 'Available Tools' that can help solve the 'User Requirement'. \\n\\n## Available Tools:\\n{'FillMissingValue': 'Completing missing values with simple strategies.', 'MinMaxScale': 'Transform features by scaling each feature to a range, which is (0, 1).', 'StandardScale': 'Standardize features by removing the mean and scaling to unit variance.', 'MaxAbsScale': 'Scale each feature by its maximum absolute value.', 'RobustScale': 'Apply the RobustScaler to scale features using statistics that are robust to outliers.', 'OrdinalEncode': 'Encode categorical features as ordinal integers.', 'OneHotEncode': 'Apply one-hot encoding to specified categorical columns, the original columns will be dropped.', 'LabelEncode': 'Apply label encoding to specified categorical columns in-place.'}\\n\\n## Tool Selection and Instructions:\\n- Select tools most relevant to completing the 'User Requirement'.\\n- If you believe that no tools are suitable, indicate with an empty list.\\n- Only list the names of the tools, not the full schema of each tool.\\n- Ensure selected tools are listed in 'Available Tools'.\\n\"}]": {
+ "recommend_tools": []
},
- "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"2\\\",\\\"dependent_task_ids\\\":[\\\"1\\\"],\\\"instruction\\\":\\\"Perform exploratory data analysis on the Iris dataset.\\\",\\\"task_type\\\":\\\"eda\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": {
- "code": "from sklearn import datasets\nimport pandas as pd\n\n# Load Iris dataset\niris = datasets.load_iris()\ndf_iris = pd.DataFrame(data=iris.data, columns=iris.feature_names)\ndf_iris['target'] = iris.target\n\n# Display basic information about the dataset\nprint(df_iris.info())\n\n# Display statistical summary of the dataset\nprint(df_iris.describe())\n\n# Display the first few rows of the dataset\nprint(df_iris.head())\n\n# Display the distribution of the target variable\ntarget_counts = df_iris['target'].value_counts()\nprint(target_counts)"
+ "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"data_preprocess\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"1\\\",\\\"dependent_task_ids\\\":[],\\\"instruction\\\":\\\"Load the sklearn Iris dataset.\\\",\\\"task_type\\\":\\\"data_preprocess\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\nThe current task is about data preprocessing, please note the following:\\n- Monitor data types per column, applying appropriate methods.\\n- Ensure operations are on existing dataset columns.\\n- Avoid writing processed data to files.\\n- Avoid any change to label column, such as standardization, etc.\\n- Prefer alternatives to one-hot encoding for categorical data.\\n- Only encode or scale necessary columns to allow for potential feature-specific engineering tasks (like time_extract, binning, extraction, etc.) later.\\n- Each step do data preprocessing to train, must do same for test separately at the same time.\\n\\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": {
+ "code": "from sklearn.datasets import load_iris\niris_data = load_iris()\nX, y = iris_data.data, iris_data.target"
},
- "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"3\\\",\\\"dependent_task_ids\\\":[\\\"2\\\"],\\\"instruction\\\":\\\"Create a plot visualizing the Iris dataset.\\\",\\\"task_type\\\":\\\"other\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": {
- "code": "from sklearn import datasets\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Load Iris dataset\niris = datasets.load_iris()\niris_df = pd.DataFrame(data=iris.data, columns=iris.feature_names)\niris_df['target'] = iris.target\niris_df['target_name'] = iris_df['target'].apply(lambda x: iris.target_names[x])\n\n# Plotting\nfig, ax = plt.subplots(figsize=(12, 8))\nfor target, target_name in zip(iris.target_names, iris.target_names):\n subset = iris_df[iris_df['target_name'] == target_name]\n ax.scatter(subset[iris.feature_names[0]], subset[iris.feature_names[1]], label=target_name)\n\nax.set_xlabel(iris.feature_names[0])\nax.set_ylabel(iris.feature_names[1])\nax.legend()\nplt.show()"
+ "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"data_preprocess\\\",\\n \\\"code\\\": \\\"from sklearn.datasets import load_iris\\\\niris_data = load_iris()\\\\nX, y = iris_data.data, iris_data.target\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"2\\\",\\\"dependent_task_ids\\\":[\\\"1\\\"],\\\"instruction\\\":\\\"Perform exploratory data analysis on the Iris dataset.\\\",\\\"task_type\\\":\\\"eda\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\nThe current task is about exploratory data analysis, please note the following:\\n- Distinguish column types with `select_dtypes` for tailored analysis and visualization, such as correlation.\\n- Remember to `import numpy as np` before using Numpy functions.\\n\\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": {
+ "code": "import numpy as np\nimport pandas as pd\n\n# Convert the sklearn dataset to a pandas DataFrame for easier manipulation\nfeature_names = iris_data.feature_names\niris_df = pd.DataFrame(X, columns=feature_names)\n\n# Add target column to the DataFrame\niris_df['species'] = pd.Categorical.from_codes(iris_data.target, iris_data.target_names)\n\n# Display basic information about the dataset\niris_info = iris_df.info()\n\n# Display basic statistics about the dataset\niris_description = iris_df.describe()\n\n# Check for missing values\nmissing_values = iris_df.isnull().sum()\n\n# Distinguish column types\nnumerical_cols = iris_df.select_dtypes(include=[np.number]).columns.tolist()\ncategorical_cols = iris_df.select_dtypes(include=['category']).columns.tolist()\n\n# Display the first few rows of the DataFrame\nhead = iris_df.head()\n\n# Output the results\n(iris_info, iris_description, missing_values, numerical_cols, categorical_cols, head)"
},
- "[{\"role\": \"user\", \"content\": \"\\n# Background\\nAs a data scientist, you need to help user to achieve their goal [This is a titanic passenger survival dataset, your goal is to predict passenger survival outcome. The target column is Survived. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report accuracy on the eval data. Train data path: 'tests/data/ml_datasets/titanic/split_train.csv', eval data path: 'tests/data/ml_datasets/titanic/split_eval.csv'.] step-by-step in an continuous Jupyter notebook.\\n\\n## Done Tasks\\n```python\\n\\n```end\\n\\n## Current Task\\nPerform exploratory data analysis on the train dataset to understand the features and target variable.\\n\\n# Latest Data Info\\nLatest data info after previous tasks:\\n\\n\\n# Task\\nWrite complete code for 'Current Task'. And avoid duplicating code from 'Done Tasks', such as repeated import of packages, reading data, etc.\\nSpecifically, \\n\\n# Output Example:\\nwhen current task is \\\"train a lightgbm model on training data\\\", the code can be like:\\n```python\\n# Step 1: check data type and convert to numeric\\nobj_cols = train.select_dtypes(include='object').columns.tolist()\\n\\nfor col in obj_cols:\\n encoder = LabelEncoder()\\n train[col] = encoder.fit_transform(train[col].unique().tolist() + ['unknown'])\\n test[col] = test[col].apply(lambda x: x if x in encoder.classes_ else 'unknown')\\n test[col] = encoder.transform(test[col])\\n\\n# Step 2: train lightgbm model\\nmodel = LGBMClassifier()\\nmodel.fit(train, y_train)\\n```end\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n\"}]": {
- "code": "# Perform exploratory data analysis on the train dataset\ndf_train = pd.read_csv('tests/data/ml_datasets/titanic/split_train.csv')\n\n# Display the first few rows of the dataset\ndisplay(df_train.head())\n\n# Summary statistics for numerical features\ndisplay(df_train.describe())\n\n# Summary information about the dataset including the data types and number of non-null values\ndisplay(df_train.info())\n\n# Distribution of the target variable 'Survived'\nsurvival_counts = df_train['Survived'].value_counts()\nprint(\"Survival counts:\\n\", survival_counts)\n\n# Visualizations\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Distribution of the target variable\nsns.countplot(x='Survived', data=df_train)\nplt.title('Distribution of Survival')\nplt.show()\n\n# Correlation matrix heatmap to understand the relationship between features\nplt.figure(figsize=(10, 8))\nsns.heatmap(df_train.corr(), annot=True, fmt='.2f')\nplt.title('Correlation Matrix')\nplt.show()\n\n# Pairplot to visualize the pairwise relationships between features\nsns.pairplot(df_train, hue='Survived')\nplt.title('Pairplot of Features')\nplt.show()"
+ "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"data_preprocess\\\",\\n \\\"code\\\": \\\"from sklearn.datasets import load_iris\\\\niris_data = load_iris()\\\\nX, y = iris_data.data, iris_data.target\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"import numpy as np\\\\nimport pandas as pd\\\\n\\\\n# Convert the sklearn dataset to a pandas DataFrame for easier manipulation\\\\nfeature_names = iris_data.feature_names\\\\niris_df = pd.DataFrame(X, columns=feature_names)\\\\n\\\\n# Add target column to the DataFrame\\\\niris_df['species'] = pd.Categorical.from_codes(iris_data.target, iris_data.target_names)\\\\n\\\\n# Display basic information about the dataset\\\\niris_info = iris_df.info()\\\\n\\\\n# Display basic statistics about the dataset\\\\niris_description = iris_df.describe()\\\\n\\\\n# Check for missing values\\\\nmissing_values = iris_df.isnull().sum()\\\\n\\\\n# Distinguish column types\\\\nnumerical_cols = iris_df.select_dtypes(include=[np.number]).columns.tolist()\\\\ncategorical_cols = iris_df.select_dtypes(include=['category']).columns.tolist()\\\\n\\\\n# Display the first few rows of the DataFrame\\\\nhead = iris_df.head()\\\\n\\\\n# Output the results\\\\n(iris_info, iris_description, missing_values, numerical_cols, categorical_cols, head)\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"3\\\",\\\"dependent_task_ids\\\":[\\\"2\\\"],\\\"instruction\\\":\\\"Create a plot visualizing the Iris dataset.\\\",\\\"task_type\\\":\\\"other\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": {
+ "code": "import matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Set the style of seaborn\nsns.set(style='whitegrid')\n\n# Create a pairplot to visualize the relationships between the features\npairplot = sns.pairplot(iris_df, hue='species')\nplt.show()"
+ },
+ "[{\"role\": \"user\", \"content\": \"\\n# Background\\nAs a data scientist, you need to help user to achieve their goal [This is a titanic passenger survival dataset, your goal is to predict passenger survival outcome. The target column is Survived. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report accuracy on the eval data. Train data path: 'tests/data/ml_datasets/titanic/split_train.csv', eval data path: 'tests/data/ml_datasets/titanic/split_eval.csv'.] step-by-step in an continuous Jupyter notebook.\\n\\n## Done Tasks\\n```python\\n\\n```end\\n\\n## Current Task\\nPerform exploratory data analysis on the train dataset to understand the features and target variable.\\n\\n# Latest Data Info\\nLatest data info after previous tasks:\\n\\n\\n# Task\\nWrite complete code for 'Current Task'. And avoid duplicating code from 'Done Tasks', such as repeated import of packages, reading data, etc.\\nSpecifically, \\nThe current task is about exploratory data analysis, please note the following:\\n- Distinguish column types with `select_dtypes` for tailored analysis and visualization, such as correlation.\\n- Remember to `import numpy as np` before using Numpy functions.\\n\\n\\n# Output Example:\\nwhen current task is \\\"train a lightgbm model on training data\\\", the code can be like:\\n```python\\n# Step 1: check data type and convert to numeric\\nobj_cols = train.select_dtypes(include='object').columns.tolist()\\n\\nfor col in obj_cols:\\n encoder = LabelEncoder()\\n train[col] = encoder.fit_transform(train[col].unique().tolist() + ['unknown'])\\n test[col] = test[col].apply(lambda x: x if x in encoder.classes_ else 'unknown')\\n test[col] = encoder.transform(test[col])\\n\\n# Step 2: train lightgbm model\\nmodel = LGBMClassifier()\\nmodel.fit(train, y_train)\\n```end\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n\"}]": {
+ "code": "# Perform exploratory data analysis on the train dataset\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Read the train dataset\ntrain_data = pd.read_csv('tests/data/ml_datasets/titanic/split_train.csv')\n\n# Display the first few rows of the dataset\ndisplay(train_data.head())\n\n# Summary statistics for numerical features\nprint(train_data.describe())\n\n# Summary statistics for categorical features\nprint(train_data.describe(include=['O']))\n\n# Check for missing values\nprint(train_data.isnull().sum())\n\n# Distribution of the target variable\nsns.countplot(x='Survived', data=train_data)\nplt.title('Distribution of Survival on the Titanic')\nplt.show()\n\n# Correlation matrix for numerical features\nnumerical_features = train_data.select_dtypes(include=[np.number])\ncorrelation_matrix = numerical_features.corr()\nplt.figure(figsize=(10, 8))\nsns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', center=0)\nplt.title('Correlation Matrix for Numerical Features')\nplt.show()\n\n# Pairplot for selected features\nselected_features = ['Survived', 'Pclass', 'Age', 'SibSp', 'Parch', 'Fare']\nsns.pairplot(train_data[selected_features], hue='Survived')\nplt.show()\n\n# Boxplot for categorical features vs Survived\nfor column in train_data.select_dtypes(include=['O']).columns:\n if column != 'Survived':\n plt.figure(figsize=(10, 5))\n sns.boxplot(x='Survived', y=column, data=train_data)\n plt.title(f'Survived vs {column}')\n plt.show()\n"
},
"[{\"role\": \"system\", \"content\": \"You are an AI Python assistant. You will be given your previous implementation code of a task, runtime error results, and a hint to change the implementation appropriately. Write your full implementation \"}, {\"role\": \"user\", \"content\": \"\\nHere is an example for you.\\n\\nExample 1:\\n[previous impl]:\\n```python\\ndef add(a: int, b: int) -> int:\\n \\\"\\\"\\\"\\n Given integers a and b, return the total value of a and b.\\n \\\"\\\"\\\"\\n return a - b\\n```\\n\\n[runtime Error]:\\nTested passed:\\n\\nTests failed:\\nassert add(1, 2) == 3 # output: -1\\nassert add(1, 2) == 4 # output: -1\\n\\n[reflection on previous impl]:\\nThe implementation failed the test cases where the input integers are 1 and 2. The issue arises because the code does not add the two integers together, but instead subtracts the second integer from the first. To fix this issue, we should change the operator from `-` to `+` in the return statement. This will ensure that the function returns the correct output for the given input.\\n\\n[improved impl]:\\n```python\\ndef add(a: int, b: int) -> int:\\n \\\"\\\"\\\"\\n Given integers a and b, return the total value of a and b.\\n \\\"\\\"\\\"\\n return a + b\\n```\\n\\n[context]\\nSolve the problem in Python:\\ndef sort_array(arr):\\n \\\"\\\"\\\"\\n In this Kata, you have to sort an array of non-negative integers according to\\n number of ones in their binary representation in ascending order.\\n For similar number of ones, sort based on decimal value.\\n\\n It must be implemented like this:\\n >>> sort_array([1, 5, 2, 3, 4]) == [1, 2, 3, 4, 5]\\n >>> sort_array([-2, -3, -4, -5, -6]) == [-6, -5, -4, -3, -2]\\n >>> sort_array([1, 0, 2, 3, 4]) [0, 1, 2, 3, 4]\\n \\\"\\\"\\\"\\n\\n\\n[previous impl]\\n\\ndef sort_array(arr):\\n # Helper function to count the number of ones in the binary representation\\n def count_ones(n):\\n return bin(n).count('1')\\n \\n # Sort the array using a custom key function\\n # The key function returns a tuple (number of ones, value) for each element\\n # This ensures that if two elements have the same number of ones, they are sorted by their value\\n sorted_arr = sorted(arr, key=lambda x: (count_ones(x), x))\\n \\n return sorted_arr\\n```\\n\\n[runtime Error]\\n[user: Tested passed:\\n\\nTests failed:\\nassert sort_array([1, 5, 2, 3, 4]) == [1, 2, 3, 4, 5] # output: [1, 2, 4, 3, 5]\\n]\\n\\nAnalysis the error step by step, provide me improve method and code. Remember to follow [context] requirement. Don't forget write code for steps behind the error step.\\n[reflection on previous impl]:\\nxxx\\n\"}]": {
"reflection": "The implementation failed the test case where the input array is [1, 5, 2, 3, 4]. The issue arises because the code does not handle the sorting of elements with the same number of ones in their binary representation correctly. According to the context, for similar number of ones, the elements should be sorted based on their decimal value in ascending order. However, the output [1, 2, 4, 3, 5] shows that 3 and 4 are not in the correct order, indicating that the sorting by decimal value is not functioning as intended. To fix this issue, we should ensure that the sorting is stable, so that when two elements have the same number of ones, they maintain their relative order if their decimal values are already in ascending order.",
"improved_impl": "def sort_array(arr):\n # Helper function to count the number of ones in the binary representation\n def count_ones(n):\n return bin(n).count('1')\n \n # Sort the array using a custom key function\n # The key function returns a tuple (number of ones, value) for each element\n # This ensures that if two elements have the same number of ones, they are sorted by their value\n # We use a stable sorting algorithm to maintain the relative order of elements with the same number of ones\n sorted_arr = sorted(arr, key=lambda x: (count_ones(x), x), reverse=True)\n sorted_arr = sorted(sorted_arr, key=count_ones)\n \n return sorted_arr\n"
- },
- "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"from sklearn.datasets import load_iris\\\\niris_data = load_iris()\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"2\\\",\\\"dependent_task_ids\\\":[\\\"1\\\"],\\\"instruction\\\":\\\"Perform exploratory data analysis on the Iris dataset.\\\",\\\"task_type\\\":\\\"eda\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": {
- "code": "import pandas as pd\n\n# Convert the Iris dataset to a DataFrame for easier manipulation\niris_df = pd.DataFrame(data=iris_data.data, columns=iris_data.feature_names)\niris_df['target'] = iris_data.target\niris_df['target_names'] = iris_df['target'].apply(lambda x: iris_data.target_names[x])\n\n# Display basic information about the dataset\niris_df.info()\n\n# Display statistical summary of the dataset\niris_df.describe()\n\n# Display the first few rows of the dataset\ndisplay(iris_df.head())"
- },
- "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"from sklearn.datasets import load_iris\\\\niris_data = load_iris()\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"import pandas as pd\\\\n\\\\n# Convert the Iris dataset to a DataFrame for easier manipulation\\\\niris_df = pd.DataFrame(data=iris_data.data, columns=iris_data.feature_names)\\\\niris_df['target'] = iris_data.target\\\\niris_df['target_names'] = iris_df['target'].apply(lambda x: iris_data.target_names[x])\\\\n\\\\n# Display basic information about the dataset\\\\niris_df.info()\\\\n\\\\n# Display statistical summary of the dataset\\\\niris_df.describe()\\\\n\\\\n# Display the first few rows of the dataset\\\\ndisplay(iris_df.head())\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"3\\\",\\\"dependent_task_ids\\\":[\\\"2\\\"],\\\"instruction\\\":\\\"Create a plot visualizing the Iris dataset.\\\",\\\"task_type\\\":\\\"other\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": {
- "code": "import matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Set the style of seaborn\nsns.set_style('whitegrid')\n\n# Pairplot to visualize the relationships between features\nsns.pairplot(iris_df, hue='target_names', markers='+')\nplt.show()"
- },
- "\n## context\n\n\n-----\n\n## format example\n[CONTENT]\n{\n \"invoice\": \"False\"\n}\n[/CONTENT]\n\n## nodes: \": # \"\n- invoice: # if it's a invoice file, return True else False\n\n\n## constraint\nLanguage: Please use the same language as Human INPUT.\nFormat: output wrapped inside [CONTENT][/CONTENT] like format example, nothing else.\n\n## action\nFollow instructions of nodes, generate output and make sure it follows the format example.\n": "[CONTENT]\n{\n \"invoice\": \"True\"\n}\n[/CONTENT]"
+ }
}
\ No newline at end of file
From 27c82c17e1230f50dcd6f126ebbaa882c0a15d56 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Thu, 8 Feb 2024 11:50:10 +0800
Subject: [PATCH 037/104] recover a cache for other tests
---
tests/data/rsp_cache.json | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tests/data/rsp_cache.json b/tests/data/rsp_cache.json
index 26a3bd494..d3058fbd0 100644
--- a/tests/data/rsp_cache.json
+++ b/tests/data/rsp_cache.json
@@ -391,5 +391,6 @@
"[{\"role\": \"system\", \"content\": \"You are an AI Python assistant. You will be given your previous implementation code of a task, runtime error results, and a hint to change the implementation appropriately. Write your full implementation \"}, {\"role\": \"user\", \"content\": \"\\nHere is an example for you.\\n\\nExample 1:\\n[previous impl]:\\n```python\\ndef add(a: int, b: int) -> int:\\n \\\"\\\"\\\"\\n Given integers a and b, return the total value of a and b.\\n \\\"\\\"\\\"\\n return a - b\\n```\\n\\n[runtime Error]:\\nTested passed:\\n\\nTests failed:\\nassert add(1, 2) == 3 # output: -1\\nassert add(1, 2) == 4 # output: -1\\n\\n[reflection on previous impl]:\\nThe implementation failed the test cases where the input integers are 1 and 2. The issue arises because the code does not add the two integers together, but instead subtracts the second integer from the first. To fix this issue, we should change the operator from `-` to `+` in the return statement. This will ensure that the function returns the correct output for the given input.\\n\\n[improved impl]:\\n```python\\ndef add(a: int, b: int) -> int:\\n \\\"\\\"\\\"\\n Given integers a and b, return the total value of a and b.\\n \\\"\\\"\\\"\\n return a + b\\n```\\n\\n[context]\\nSolve the problem in Python:\\ndef sort_array(arr):\\n \\\"\\\"\\\"\\n In this Kata, you have to sort an array of non-negative integers according to\\n number of ones in their binary representation in ascending order.\\n For similar number of ones, sort based on decimal value.\\n\\n It must be implemented like this:\\n >>> sort_array([1, 5, 2, 3, 4]) == [1, 2, 3, 4, 5]\\n >>> sort_array([-2, -3, -4, -5, -6]) == [-6, -5, -4, -3, -2]\\n >>> sort_array([1, 0, 2, 3, 4]) [0, 1, 2, 3, 4]\\n \\\"\\\"\\\"\\n\\n\\n[previous impl]\\n\\ndef sort_array(arr):\\n # Helper function to count the number of ones in the binary representation\\n def count_ones(n):\\n return bin(n).count('1')\\n \\n # Sort the array using a custom key function\\n # The key function returns a tuple (number of ones, value) for each element\\n # This ensures that if two elements have the same number of ones, they are sorted by their value\\n sorted_arr = sorted(arr, key=lambda x: (count_ones(x), x))\\n \\n return sorted_arr\\n```\\n\\n[runtime Error]\\n[user: Tested passed:\\n\\nTests failed:\\nassert sort_array([1, 5, 2, 3, 4]) == [1, 2, 3, 4, 5] # output: [1, 2, 4, 3, 5]\\n]\\n\\nAnalysis the error step by step, provide me improve method and code. Remember to follow [context] requirement. Don't forget write code for steps behind the error step.\\n[reflection on previous impl]:\\nxxx\\n\"}]": {
"reflection": "The implementation failed the test case where the input array is [1, 5, 2, 3, 4]. The issue arises because the code does not handle the sorting of elements with the same number of ones in their binary representation correctly. According to the context, for similar number of ones, the elements should be sorted based on their decimal value in ascending order. However, the output [1, 2, 4, 3, 5] shows that 3 and 4 are not in the correct order, indicating that the sorting by decimal value is not functioning as intended. To fix this issue, we should ensure that the sorting is stable, so that when two elements have the same number of ones, they maintain their relative order if their decimal values are already in ascending order.",
"improved_impl": "def sort_array(arr):\n # Helper function to count the number of ones in the binary representation\n def count_ones(n):\n return bin(n).count('1')\n \n # Sort the array using a custom key function\n # The key function returns a tuple (number of ones, value) for each element\n # This ensures that if two elements have the same number of ones, they are sorted by their value\n # We use a stable sorting algorithm to maintain the relative order of elements with the same number of ones\n sorted_arr = sorted(arr, key=lambda x: (count_ones(x), x), reverse=True)\n sorted_arr = sorted(sorted_arr, key=count_ones)\n \n return sorted_arr\n"
- }
+ },
+ "\n## context\n\n\n-----\n\n## format example\n[CONTENT]\n{\n \"invoice\": \"False\"\n}\n[/CONTENT]\n\n## nodes: \": # \"\n- invoice: # if it's a invoice file, return True else False\n\n\n## constraint\nLanguage: Please use the same language as Human INPUT.\nFormat: output wrapped inside [CONTENT][/CONTENT] like format example, nothing else.\n\n## action\nFollow instructions of nodes, generate output and make sure it follows the format example.\n": "[CONTENT]\n{\n \"invoice\": \"True\"\n}\n[/CONTENT]"
}
\ No newline at end of file
From f781021d5ad8b5d75eb4e93f6f4e8a7ded2f8c2f Mon Sep 17 00:00:00 2001
From: mannaandpoem <1580466765@qq.com>
Date: Thu, 8 Feb 2024 14:21:39 +0800
Subject: [PATCH 038/104] remove debug
---
metagpt/actions/ci/execute_nb_code.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/metagpt/actions/ci/execute_nb_code.py b/metagpt/actions/ci/execute_nb_code.py
index 0ff00de8f..a8c9c3085 100644
--- a/metagpt/actions/ci/execute_nb_code.py
+++ b/metagpt/actions/ci/execute_nb_code.py
@@ -99,7 +99,7 @@ class ExecuteNbCode(Action):
for i, output in enumerate(outputs):
if output["output_type"] == "stream" and not any(
tag in output["text"]
- for tag in ["| INFO | metagpt", "| ERROR | metagpt", "| WARNING | metagpt"]
+ for tag in ["| INFO | metagpt", "| ERROR | metagpt", "| WARNING | metagpt", "DEBUG"]
):
parsed_output += output["text"]
elif output["output_type"] == "display_data":
From 74c5807d0111397424e2a946f3001c731f9ae134 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Thu, 8 Feb 2024 17:09:07 +0800
Subject: [PATCH 039/104] add ml example
---
examples/ci/machine_learning.py | 13 +++++++++++++
examples/ci/ml_engineer_with_tools.py | 4 ++--
2 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/examples/ci/machine_learning.py b/examples/ci/machine_learning.py
index e69de29bb..9eda981ac 100644
--- a/examples/ci/machine_learning.py
+++ b/examples/ci/machine_learning.py
@@ -0,0 +1,13 @@
+import asyncio
+
+from metagpt.roles.ci.code_interpreter import CodeInterpreter
+
+
+async def main(requirement: str):
+ role = CodeInterpreter(auto_run=True, use_tools=False)
+ await role.run(requirement)
+
+
+if __name__ == "__main__":
+ requirement = "Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy."
+ asyncio.run(main(requirement))
diff --git a/examples/ci/ml_engineer_with_tools.py b/examples/ci/ml_engineer_with_tools.py
index 1c73a1dd0..66d37316b 100644
--- a/examples/ci/ml_engineer_with_tools.py
+++ b/examples/ci/ml_engineer_with_tools.py
@@ -3,8 +3,8 @@ import asyncio
from metagpt.roles.ci.ml_engineer import MLEngineer
-async def main(requirement: str, auto_run: bool = True, use_tools: bool = True):
- role = MLEngineer(goal=requirement, auto_run=auto_run, use_tools=use_tools)
+async def main(requirement: str):
+ role = MLEngineer(auto_run=True, use_tools=True)
await role.run(requirement)
From 66d891fe81cbca40dad61d00ddda3003c3d45f12 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Thu, 8 Feb 2024 18:30:20 +0800
Subject: [PATCH 040/104] add example readme
---
examples/ci/README.md | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
create mode 100644 examples/ci/README.md
diff --git a/examples/ci/README.md b/examples/ci/README.md
new file mode 100644
index 000000000..d526b94c7
--- /dev/null
+++ b/examples/ci/README.md
@@ -0,0 +1,18 @@
+# Code Interpreter (CI)
+
+## What is CodeInterpreter
+CodeInterpreter is an agent who solves problems through codes. It understands user requirements, makes plans, writes codes for execution, and uses tools if necessary. These capabilities enable it to tackle a wide range of scenarios, please check out the examples below.
+
+## Example List
+- Data visualization
+- Machine learning modeling
+- Image background removal
+- Solve math problems
+- Receipt OCR
+- Tool usage: web page imitation
+- Tool usage: web crawling
+- Tool usage: text2image
+- Tool usage: email summarization and response
+- More on the way!
+
+Please see [here](https://docs.deepwisdom.ai/main/en/guide/use_cases/agent/code_interpreter/ci_intro.html) for detailed explanation.
\ No newline at end of file
From 25fd941dfc5a5c1c2f8a5ff305e30372ebb6e707 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Thu, 8 Feb 2024 21:35:55 +0800
Subject: [PATCH 041/104] update readme and roadmap
---
README.md | 2 ++
docs/ROADMAP.md | 16 ++++++++--------
2 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/README.md b/README.md
index b6f31901b..378df43b9 100644
--- a/README.md
+++ b/README.md
@@ -26,6 +26,8 @@ # MetaGPT: The Multi-Agent Framework
## News
+🚀 Feb. 08, 2024: [v0.7.0](https://github.com/geekan/MetaGPT/releases/tag/v0.7.0) released, introducing much easier configuration interface, as well as a versatile agent CodeInterpreter.
+
🚀 Jan. 16, 2024: Our paper [MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework
](https://arxiv.org/abs/2308.00352) accepted for oral presentation **(top 1.2%)** at ICLR 2024, **ranking #1** in the LLM-based Agent category.
diff --git a/docs/ROADMAP.md b/docs/ROADMAP.md
index 4bb530bf2..ec17cc0ce 100644
--- a/docs/ROADMAP.md
+++ b/docs/ROADMAP.md
@@ -35,14 +35,14 @@ ### Tasks
3. Strategies
1. Support ReAct strategy (experimentation done with game agents)
2. Support CoT strategy (experimentation done with game agents)
- 3. Support ToT strategy
+ 3. ~~Support ToT strategy~~ (v0.6.0)
4. Support Reflection strategy (experimentation done with game agents)
- 5. Support planning
+ 5. ~~Support planning~~ (v0.7.0)
4. Actions
1. ~~Implementation: Search~~ (v0.2.1)
2. Implementation: Knowledge search, supporting 10+ data formats
- 3. Implementation: Data EDA (expected v0.7.0)
- 4. Implementation: Review & Revise (expected v0.7.0)
+ 3. ~~Implementation: Data EDA~~ (v0.7.0)
+ 4. ~~Implementation: Review & Revise~~ (v0.7.0)
5. ~~Implementation: Add Document~~ (v0.5.0)
6. ~~Implementation: Delete Document~~ (v0.5.0)
7. Implementation: Self-training
@@ -50,7 +50,7 @@ ### Tasks
9. Implementation: Generate reliable unit tests based on YAPI
10. Implementation: Self-evaluation
11. Implementation: AI Invocation
- 12. Implementation: Learning and using third-party standard libraries
+ 12. ~~Implementation: Learning and using third-party standard libraries~~ (v0.7.0)
13. Implementation: Data collection
14. Implementation: AI training
15. ~~Implementation: Run code~~ (v0.2.1)
@@ -63,14 +63,14 @@ ### Tasks
7. Roles
1. Perfect the action pool/skill pool for each role
2. E-commerce seller
- 3. Data analyst (expected v0.7.0)
+ 3. ~~Data analyst~~ (v0.7.0)
4. News observer
5. ~~Institutional researcher~~ (v0.2.1)
8. Evaluation
1. Support an evaluation on a game dataset (experimentation done with game agents)
2. Reproduce papers, implement full skill acquisition for a single game role, achieving SOTA results (experimentation done with game agents)
- 3. Support an evaluation on a math dataset (expected v0.7.0)
- 4. Reproduce papers, achieving SOTA results for current mathematical problem solving process
+ 3. Support an evaluation on a math dataset (expected v0.8.0)
+ 4. Reproduce papers, achieving SOTA results for current mathematical problem solving process (expected v0.8.0)
9. LLM
1. Support Claude underlying API
2. ~~Support Azure asynchronous API~~
From 1d5e5b75d342d34afd9acb222171449309a69bcd Mon Sep 17 00:00:00 2001
From: yzlin
Date: Thu, 8 Feb 2024 22:15:43 +0800
Subject: [PATCH 042/104] add __init__ to avoid pypi import failure
---
metagpt/actions/ci/__init__.py | 0
metagpt/roles/ci/__init__.py | 0
2 files changed, 0 insertions(+), 0 deletions(-)
create mode 100644 metagpt/actions/ci/__init__.py
create mode 100644 metagpt/roles/ci/__init__.py
diff --git a/metagpt/actions/ci/__init__.py b/metagpt/actions/ci/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/metagpt/roles/ci/__init__.py b/metagpt/roles/ci/__init__.py
new file mode 100644
index 000000000..e69de29bb
From c602c69c7da79843fef0023b36e2d9259380c94e Mon Sep 17 00:00:00 2001
From: yzlin
Date: Thu, 8 Feb 2024 22:40:38 +0800
Subject: [PATCH 043/104] add __init__ for pypi packaging
---
metagpt/prompts/ci/__init__.py | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 metagpt/prompts/ci/__init__.py
diff --git a/metagpt/prompts/ci/__init__.py b/metagpt/prompts/ci/__init__.py
new file mode 100644
index 000000000..e69de29bb
From e2b2ff32da48097024499c93bcac4643dab770ba Mon Sep 17 00:00:00 2001
From: yzlin
Date: Fri, 9 Feb 2024 10:44:22 +0800
Subject: [PATCH 044/104] update readme
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 378df43b9..d3a035abe 100644
--- a/README.md
+++ b/README.md
@@ -26,7 +26,7 @@ # MetaGPT: The Multi-Agent Framework
## News
-🚀 Feb. 08, 2024: [v0.7.0](https://github.com/geekan/MetaGPT/releases/tag/v0.7.0) released, introducing much easier configuration interface, as well as a versatile agent CodeInterpreter.
+🚀 Feb. 08, 2024: [v0.7.0](https://github.com/geekan/MetaGPT/releases/tag/v0.7.0) released, supporting assigning different LLMs to different Roles. We also introduced CodeInterpreter, a powerful agent capable of solving a wide range of real-world problems.
🚀 Jan. 16, 2024: Our paper [MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework
](https://arxiv.org/abs/2308.00352) accepted for oral presentation **(top 1.2%)** at ICLR 2024, **ranking #1** in the LLM-based Agent category.
From 8c24c97a53717a40b02fef09e0b90e01a0babb19 Mon Sep 17 00:00:00 2001
From: geekan
Date: Sun, 18 Feb 2024 11:31:53 +0800
Subject: [PATCH 045/104] tuning details
---
README.md | 2 +-
config/config2.yaml.example | 8 ++++----
docs/scripts/coverage.sh | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/README.md b/README.md
index d3a035abe..f4b04a5fd 100644
--- a/README.md
+++ b/README.md
@@ -62,7 +62,7 @@ ### Pip installation
```bash
pip install metagpt
-metagpt --init-config # create ~/.metagpt/config2.yaml, modify it to your own config
+metagpt --init-config # it will create ~/.metagpt/config2.yaml, just modify it to your needs
metagpt "Create a 2048 game" # this will create a repo in ./workspace
```
diff --git a/config/config2.yaml.example b/config/config2.yaml.example
index 2217f1b2c..eb1721625 100644
--- a/config/config2.yaml.example
+++ b/config/config2.yaml.example
@@ -1,10 +1,12 @@
llm:
- api_type: "openai" # or azure / ollama etc.
+ api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
base_url: "YOUR_BASE_URL"
api_key: "YOUR_API_KEY"
model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
+ repair_llm_output: true # when the output is not a valid json, try to repair it
+ proxy: "YOUR_PROXY" # for LLM API requests
-proxy: "YOUR_PROXY"
+proxy: "YOUR_PROXY" # for tools like requests, playwright, selenium, etc.
search:
api_type: "google"
@@ -41,5 +43,3 @@ iflytek_api_key: "YOUR_API_KEY"
iflytek_api_secret: "YOUR_API_SECRET"
metagpt_tti_url: "YOUR_MODEL_URL"
-
-repair_llm_output: true
diff --git a/docs/scripts/coverage.sh b/docs/scripts/coverage.sh
index a56571399..22e479200 100755
--- a/docs/scripts/coverage.sh
+++ b/docs/scripts/coverage.sh
@@ -1 +1 @@
-coverage run --source ./metagpt -m pytest --durations=0 --timeout=100 && coverage report -m && coverage html && open htmlcov/index.html
+coverage run --source ./metagpt -m pytest -n 8 --durations=0 --timeout=100 && coverage report -m && coverage html && open htmlcov/index.html
From 38f6160eb43ff113f30b4331bfabaffad7eebe9d Mon Sep 17 00:00:00 2001
From: geekan
Date: Sun, 18 Feb 2024 11:33:23 +0800
Subject: [PATCH 046/104] rename example to get yaml syntax highlights
---
config/{config2.yaml.example => config2.example.yaml} | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename config/{config2.yaml.example => config2.example.yaml} (100%)
diff --git a/config/config2.yaml.example b/config/config2.example.yaml
similarity index 100%
rename from config/config2.yaml.example
rename to config/config2.example.yaml
From 10b183bae30dca29b8f99678a4f6808b327deff5 Mon Sep 17 00:00:00 2001
From: geekan
Date: Sun, 18 Feb 2024 12:15:52 +0800
Subject: [PATCH 047/104] add desc to config
---
config/config2.yaml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/config/config2.yaml b/config/config2.yaml
index 2c4ca636f..84b201c4c 100644
--- a/config/config2.yaml
+++ b/config/config2.yaml
@@ -1,3 +1,6 @@
+# Check https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml for full configuration example.
+# It reflects https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
llm:
+ api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
api_key: "YOUR_API_KEY"
model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
\ No newline at end of file
From de8d75f15f7ed81ea62a4b56a293a5e7c0181dc0 Mon Sep 17 00:00:00 2001
From: geekan
Date: Sun, 18 Feb 2024 12:19:03 +0800
Subject: [PATCH 048/104] add desc to config
---
config/config2.yaml | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/config/config2.yaml b/config/config2.yaml
index 84b201c4c..8e5825b57 100644
--- a/config/config2.yaml
+++ b/config/config2.yaml
@@ -1,6 +1,7 @@
-# Check https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml for full configuration example.
-# It reflects https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
+# Full Example: https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml
+# Reflected Code: https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
llm:
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
- api_key: "YOUR_API_KEY"
- model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
\ No newline at end of file
+ model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
+ base_url: "https://api.openai.com/v1" # or forward url / other llm url
+ api_key: "YOUR_API_KEY"
\ No newline at end of file
From 1ab317c045eae206db3720b51e1a5a503abc178b Mon Sep 17 00:00:00 2001
From: geekan
Date: Sun, 18 Feb 2024 12:21:20 +0800
Subject: [PATCH 049/104] add config details
---
README.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/README.md b/README.md
index f4b04a5fd..96de349ab 100644
--- a/README.md
+++ b/README.md
@@ -62,6 +62,7 @@ ### Pip installation
```bash
pip install metagpt
+# configuration details: https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
metagpt --init-config # it will create ~/.metagpt/config2.yaml, just modify it to your needs
metagpt "Create a 2048 game" # this will create a repo in ./workspace
```
From 54d2a718ed17658fbc576a99c7ccb2655c08f2f6 Mon Sep 17 00:00:00 2001
From: geekan
Date: Sun, 18 Feb 2024 12:22:03 +0800
Subject: [PATCH 050/104] add config details
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 96de349ab..877b6d363 100644
--- a/README.md
+++ b/README.md
@@ -62,7 +62,7 @@ ### Pip installation
```bash
pip install metagpt
-# configuration details: https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
+# https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
metagpt --init-config # it will create ~/.metagpt/config2.yaml, just modify it to your needs
metagpt "Create a 2048 game" # this will create a repo in ./workspace
```
From 801490516b61503b4160fdef90e757db585d5f1a Mon Sep 17 00:00:00 2001
From: geekan
Date: Sun, 18 Feb 2024 13:45:49 +0800
Subject: [PATCH 051/104] refine readme
---
README.md | 43 ++++++++++++++++++++++---------------------
1 file changed, 22 insertions(+), 21 deletions(-)
diff --git a/README.md b/README.md
index 877b6d363..0ffdbd876 100644
--- a/README.md
+++ b/README.md
@@ -64,6 +64,27 @@ ### Pip installation
pip install metagpt
# https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
metagpt --init-config # it will create ~/.metagpt/config2.yaml, just modify it to your needs
+```
+
+### Configuration
+
+In `~/.metagpt/config2.yaml`, you can configure the following:
+
+```yaml
+# Full Example: https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml
+# Reflected Code: https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
+llm:
+ api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
+ model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
+ base_url: "https://api.openai.com/v1" # or forward url / other llm url
+ api_key: "YOUR_API_KEY"
+```
+
+### Usage
+
+After installation, you can use it as CLI
+
+```bash
metagpt "Create a 2048 game" # this will create a repo in ./workspace
```
@@ -76,27 +97,7 @@ # https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
```
detail installation please refer to [cli_install](https://docs.deepwisdom.ai/main/en/guide/get_started/installation.html#install-stable-version)
-
-### Docker installation
-> Note: In the Windows, you need to replace "/opt/metagpt" with a directory that Docker has permission to create, such as "D:\Users\x\metagpt"
-
-```bash
-# Step 1: Download metagpt official image and prepare config2.yaml
-docker pull metagpt/metagpt:latest
-mkdir -p /opt/metagpt/{config,workspace}
-docker run --rm metagpt/metagpt:latest cat /app/metagpt/config/config2.yaml > /opt/metagpt/config/config2.yaml
-vim /opt/metagpt/config/config2.yaml # Change the config
-
-# Step 2: Run metagpt demo with container
-docker run --rm \
- --privileged \
- -v /opt/metagpt/config/config2.yaml:/app/metagpt/config/config2.yaml \
- -v /opt/metagpt/workspace:/app/metagpt/workspace \
- metagpt/metagpt:latest \
- metagpt "Create a 2048 game"
-```
-
-detail installation please refer to [docker_install](https://docs.deepwisdom.ai/main/en/guide/get_started/installation.html#install-with-docker)
+ or [docker_install](https://docs.deepwisdom.ai/main/en/guide/get_started/installation.html#install-with-docker)
### QuickStart & Demo Video
- Try it on [MetaGPT Huggingface Space](https://huggingface.co/spaces/deepwisdom/MetaGPT)
From b60a2cff79f00d13f8fccf78bf5df969059f0c24 Mon Sep 17 00:00:00 2001
From: geekan
Date: Sun, 18 Feb 2024 13:48:24 +0800
Subject: [PATCH 052/104] refine readme
---
README.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/README.md b/README.md
index 0ffdbd876..000168233 100644
--- a/README.md
+++ b/README.md
@@ -68,11 +68,9 @@ # https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
### Configuration
-In `~/.metagpt/config2.yaml`, you can configure the following:
+You can configure `~/.metagpt/config2.yaml` according to the [example](https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml):
```yaml
-# Full Example: https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml
-# Reflected Code: https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
llm:
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
@@ -80,6 +78,8 @@ # Reflected Code: https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
api_key: "YOUR_API_KEY"
```
+And you can check the [configuration doc](https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html) for more details.
+
### Usage
After installation, you can use it as CLI
From 3acefa3fc47a6fa5eb61919d69a2ca1e0660b096 Mon Sep 17 00:00:00 2001
From: geekan
Date: Sun, 18 Feb 2024 14:06:20 +0800
Subject: [PATCH 053/104] refine readme
---
README.md | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/README.md b/README.md
index 000168233..1903bebe1 100644
--- a/README.md
+++ b/README.md
@@ -68,7 +68,7 @@ # https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
### Configuration
-You can configure `~/.metagpt/config2.yaml` according to the [example](https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml):
+You can configure `~/.metagpt/config2.yaml` according to the [example](https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml) and [doc](https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html):
```yaml
llm:
@@ -78,8 +78,6 @@ ### Configuration
api_key: "YOUR_API_KEY"
```
-And you can check the [configuration doc](https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html) for more details.
-
### Usage
After installation, you can use it as CLI
From 82a0d03e313f6b985f0c815ee061ee272ae012d4 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Sun, 18 Feb 2024 17:06:39 +0800
Subject: [PATCH 054/104] unified Interpreter naming
---
README.md | 2 +-
examples/ci/README.md | 18 ------------------
examples/mi/README.md | 18 ++++++++++++++++++
examples/{ci => mi}/crawl_webpage.py | 6 +++---
examples/{ci => mi}/data_visualization.py | 6 +++---
examples/{ci => mi}/email_summary.py | 6 +++---
examples/{ci => mi}/imitate_webpage.py | 6 +++---
examples/{ci => mi}/machine_learning.py | 6 +++---
examples/{ci => mi}/ml_engineer_with_tools.py | 2 +-
examples/{ci => mi}/ocr_receipt.py | 8 ++++----
examples/{ci => mi}/rm_image_background.py | 6 +++---
examples/{ci => mi}/sd_tool_usage.py | 6 +++---
examples/{ci => mi}/solve_math_problems.py | 6 +++---
metagpt/actions/__init__.py | 6 +++---
metagpt/actions/{ci => mi}/__init__.py | 0
metagpt/actions/{ci => mi}/ask_review.py | 0
metagpt/actions/{ci => mi}/debug_code.py | 2 +-
metagpt/actions/{ci => mi}/execute_nb_code.py | 0
metagpt/actions/{ci => mi}/ml_action.py | 6 +++---
.../actions/{ci => mi}/write_analysis_code.py | 2 +-
metagpt/actions/{ci => mi}/write_plan.py | 2 +-
metagpt/prompts/{ci => mi}/__init__.py | 0
metagpt/prompts/{ci => mi}/ml_action.py | 0
.../prompts/{ci => mi}/write_analysis_code.py | 0
metagpt/roles/{ci => mi}/__init__.py | 0
.../code_interpreter.py => mi/interpreter.py} | 12 ++++++------
metagpt/roles/{ci => mi}/ml_engineer.py | 10 +++++-----
metagpt/strategy/planner.py | 4 ++--
metagpt/strategy/solver.py | 4 ++--
.../actions/{ci => mi}/test_ask_review.py | 2 +-
.../actions/{ci => mi}/test_debug_code.py | 2 +-
.../actions/{ci => mi}/test_execute_nb_code.py | 2 +-
.../actions/{ci => mi}/test_ml_action.py | 2 +-
.../{ci => mi}/test_write_analysis_code.py | 4 ++--
.../actions/{ci => mi}/test_write_plan.py | 2 +-
.../test_interpreter.py} | 12 ++++++------
.../roles/{ci => mi}/test_ml_engineer.py | 10 +++++-----
tests/metagpt/utils/test_save_code.py | 2 +-
38 files changed, 91 insertions(+), 91 deletions(-)
delete mode 100644 examples/ci/README.md
create mode 100644 examples/mi/README.md
rename examples/{ci => mi}/crawl_webpage.py (76%)
rename examples/{ci => mi}/data_visualization.py (55%)
rename examples/{ci => mi}/email_summary.py (90%)
rename examples/{ci => mi}/imitate_webpage.py (81%)
rename examples/{ci => mi}/machine_learning.py (66%)
rename examples/{ci => mi}/ml_engineer_with_tools.py (94%)
rename examples/{ci => mi}/ocr_receipt.py (72%)
rename examples/{ci => mi}/rm_image_background.py (71%)
rename examples/{ci => mi}/sd_tool_usage.py (70%)
rename examples/{ci => mi}/solve_math_problems.py (66%)
rename metagpt/actions/{ci => mi}/__init__.py (100%)
rename metagpt/actions/{ci => mi}/ask_review.py (100%)
rename metagpt/actions/{ci => mi}/debug_code.py (98%)
rename metagpt/actions/{ci => mi}/execute_nb_code.py (100%)
rename metagpt/actions/{ci => mi}/ml_action.py (93%)
rename metagpt/actions/{ci => mi}/write_analysis_code.py (99%)
rename metagpt/actions/{ci => mi}/write_plan.py (98%)
rename metagpt/prompts/{ci => mi}/__init__.py (100%)
rename metagpt/prompts/{ci => mi}/ml_action.py (100%)
rename metagpt/prompts/{ci => mi}/write_analysis_code.py (100%)
rename metagpt/roles/{ci => mi}/__init__.py (100%)
rename metagpt/roles/{ci/code_interpreter.py => mi/interpreter.py} (91%)
rename metagpt/roles/{ci => mi}/ml_engineer.py (89%)
rename tests/metagpt/actions/{ci => mi}/test_ask_review.py (84%)
rename tests/metagpt/actions/{ci => mi}/test_debug_code.py (96%)
rename tests/metagpt/actions/{ci => mi}/test_execute_nb_code.py (98%)
rename tests/metagpt/actions/{ci => mi}/test_ml_action.py (95%)
rename tests/metagpt/actions/{ci => mi}/test_write_analysis_code.py (99%)
rename tests/metagpt/actions/{ci => mi}/test_write_plan.py (95%)
rename tests/metagpt/roles/{ci/test_code_interpreter.py => mi/test_interpreter.py} (59%)
rename tests/metagpt/roles/{ci => mi}/test_ml_engineer.py (91%)
diff --git a/README.md b/README.md
index d3a035abe..a1aa5ded8 100644
--- a/README.md
+++ b/README.md
@@ -26,7 +26,7 @@ # MetaGPT: The Multi-Agent Framework
## News
-🚀 Feb. 08, 2024: [v0.7.0](https://github.com/geekan/MetaGPT/releases/tag/v0.7.0) released, supporting assigning different LLMs to different Roles. We also introduced CodeInterpreter, a powerful agent capable of solving a wide range of real-world problems.
+🚀 Feb. 08, 2024: [v0.7.0](https://github.com/geekan/MetaGPT/releases/tag/v0.7.0) released, supporting assigning different LLMs to different Roles. We also introduced [Interpreter](https://github.com/geekan/MetaGPT/blob/main/examples/mi/README.md), a powerful agent capable of solving a wide range of real-world problems.
🚀 Jan. 16, 2024: Our paper [MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework
](https://arxiv.org/abs/2308.00352) accepted for oral presentation **(top 1.2%)** at ICLR 2024, **ranking #1** in the LLM-based Agent category.
diff --git a/examples/ci/README.md b/examples/ci/README.md
deleted file mode 100644
index d526b94c7..000000000
--- a/examples/ci/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Code Interpreter (CI)
-
-## What is CodeInterpreter
-CodeInterpreter is an agent who solves problems through codes. It understands user requirements, makes plans, writes codes for execution, and uses tools if necessary. These capabilities enable it to tackle a wide range of scenarios, please check out the examples below.
-
-## Example List
-- Data visualization
-- Machine learning modeling
-- Image background removal
-- Solve math problems
-- Receipt OCR
-- Tool usage: web page imitation
-- Tool usage: web crawling
-- Tool usage: text2image
-- Tool usage: email summarization and response
-- More on the way!
-
-Please see [here](https://docs.deepwisdom.ai/main/en/guide/use_cases/agent/code_interpreter/ci_intro.html) for detailed explanation.
\ No newline at end of file
diff --git a/examples/mi/README.md b/examples/mi/README.md
new file mode 100644
index 000000000..1734ba388
--- /dev/null
+++ b/examples/mi/README.md
@@ -0,0 +1,18 @@
+# MetaGPT Interpreter (MI)
+
+## What is Interpreter
+Interpreter is an agent who solves problems through codes. It understands user requirements, makes plans, writes codes for execution, and uses tools if necessary. These capabilities enable it to tackle a wide range of scenarios, please check out the examples below.
+
+## Example List
+- Data visualization
+- Machine learning modeling
+- Image background removal
+- Solve math problems
+- Receipt OCR
+- Tool usage: web page imitation
+- Tool usage: web crawling
+- Tool usage: text2image
+- Tool usage: email summarization and response
+- More on the way!
+
+Please see [here](https://docs.deepwisdom.ai/main/en/guide/use_cases/agent/interpreter/mi_intro.html) for detailed explanation.
\ No newline at end of file
diff --git a/examples/ci/crawl_webpage.py b/examples/mi/crawl_webpage.py
similarity index 76%
rename from examples/ci/crawl_webpage.py
rename to examples/mi/crawl_webpage.py
index 2db9e407b..b5d2fb3d0 100644
--- a/examples/ci/crawl_webpage.py
+++ b/examples/mi/crawl_webpage.py
@@ -5,15 +5,15 @@
@File : crawl_webpage.py
"""
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main():
prompt = """Get data from `paperlist` table in https://papercopilot.com/statistics/iclr-statistics/iclr-2024-statistics/,
and save it to a csv file. paper title must include `multiagent` or `large language model`. *notice: print key variables*"""
- ci = CodeInterpreter(goal=prompt, use_tools=True)
+ mi = Interpreter(use_tools=True)
- await ci.run(prompt)
+ await mi.run(prompt)
if __name__ == "__main__":
diff --git a/examples/ci/data_visualization.py b/examples/mi/data_visualization.py
similarity index 55%
rename from examples/ci/data_visualization.py
rename to examples/mi/data_visualization.py
index 8dc4340d4..2e4acc9b4 100644
--- a/examples/ci/data_visualization.py
+++ b/examples/mi/data_visualization.py
@@ -1,11 +1,11 @@
import asyncio
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main(requirement: str = ""):
- code_interpreter = CodeInterpreter(use_tools=False)
- await code_interpreter.run(requirement)
+ mi = Interpreter(use_tools=False)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/examples/ci/email_summary.py b/examples/mi/email_summary.py
similarity index 90%
rename from examples/ci/email_summary.py
rename to examples/mi/email_summary.py
index d6fa283ca..e1511c5b0 100644
--- a/examples/ci/email_summary.py
+++ b/examples/mi/email_summary.py
@@ -6,7 +6,7 @@
"""
import os
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main():
@@ -22,9 +22,9 @@ async def main():
Firstly, Please help me fetch the latest 5 senders and full letter contents.
Then, summarize each of the 5 emails into one sentence (you can do this by yourself, no need to import other models to do this) and output them in a markdown format."""
- ci = CodeInterpreter(use_tools=True)
+ mi = Interpreter(use_tools=True)
- await ci.run(prompt)
+ await mi.run(prompt)
if __name__ == "__main__":
diff --git a/examples/ci/imitate_webpage.py b/examples/mi/imitate_webpage.py
similarity index 81%
rename from examples/ci/imitate_webpage.py
rename to examples/mi/imitate_webpage.py
index 6a83d3a33..0e9ca731d 100644
--- a/examples/ci/imitate_webpage.py
+++ b/examples/mi/imitate_webpage.py
@@ -5,7 +5,7 @@
@Author : mannaandpoem
@File : imitate_webpage.py
"""
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main():
@@ -15,9 +15,9 @@ Firstly, utilize Selenium and WebDriver for rendering.
Secondly, convert image to a webpage including HTML, CSS and JS in one go.
Finally, save webpage in a text file.
Note: All required dependencies and environments have been fully installed and configured."""
- ci = CodeInterpreter(use_tools=True)
+ mi = Interpreter(use_tools=True)
- await ci.run(prompt)
+ await mi.run(prompt)
if __name__ == "__main__":
diff --git a/examples/ci/machine_learning.py b/examples/mi/machine_learning.py
similarity index 66%
rename from examples/ci/machine_learning.py
rename to examples/mi/machine_learning.py
index 9eda981ac..ee3b47f26 100644
--- a/examples/ci/machine_learning.py
+++ b/examples/mi/machine_learning.py
@@ -1,11 +1,11 @@
import asyncio
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main(requirement: str):
- role = CodeInterpreter(auto_run=True, use_tools=False)
- await role.run(requirement)
+ mi = Interpreter(auto_run=True, use_tools=False)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/examples/ci/ml_engineer_with_tools.py b/examples/mi/ml_engineer_with_tools.py
similarity index 94%
rename from examples/ci/ml_engineer_with_tools.py
rename to examples/mi/ml_engineer_with_tools.py
index 66d37316b..9d0e7f951 100644
--- a/examples/ci/ml_engineer_with_tools.py
+++ b/examples/mi/ml_engineer_with_tools.py
@@ -1,6 +1,6 @@
import asyncio
-from metagpt.roles.ci.ml_engineer import MLEngineer
+from metagpt.roles.mi.ml_engineer import MLEngineer
async def main(requirement: str):
diff --git a/examples/ci/ocr_receipt.py b/examples/mi/ocr_receipt.py
similarity index 72%
rename from examples/ci/ocr_receipt.py
rename to examples/mi/ocr_receipt.py
index dc2db2d01..ffa5cff05 100644
--- a/examples/ci/ocr_receipt.py
+++ b/examples/mi/ocr_receipt.py
@@ -1,16 +1,16 @@
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main():
# Notice: pip install metagpt[ocr] before using this example
image_path = "image.jpg"
language = "English"
- requirement = f"""This is a {language} invoice image.
+ requirement = f"""This is a {language} receipt image.
Your goal is to perform OCR on images using PaddleOCR, then extract the total amount from ocr text results, and finally save as table. Image path: {image_path}.
NOTE: The environments for Paddle and PaddleOCR are all ready and has been fully installed."""
- ci = CodeInterpreter()
+ mi = Interpreter()
- await ci.run(requirement)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/examples/ci/rm_image_background.py b/examples/mi/rm_image_background.py
similarity index 71%
rename from examples/ci/rm_image_background.py
rename to examples/mi/rm_image_background.py
index 83dbbdc77..57e89b103 100644
--- a/examples/ci/rm_image_background.py
+++ b/examples/mi/rm_image_background.py
@@ -1,11 +1,11 @@
import asyncio
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main(requirement: str = ""):
- code_interpreter = CodeInterpreter(use_tools=False)
- await code_interpreter.run(requirement)
+ mi = Interpreter(use_tools=False)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/examples/ci/sd_tool_usage.py b/examples/mi/sd_tool_usage.py
similarity index 70%
rename from examples/ci/sd_tool_usage.py
rename to examples/mi/sd_tool_usage.py
index b4642af23..f8507a411 100644
--- a/examples/ci/sd_tool_usage.py
+++ b/examples/mi/sd_tool_usage.py
@@ -4,12 +4,12 @@
# @Desc :
import asyncio
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main(requirement: str = ""):
- code_interpreter = CodeInterpreter(use_tools=True, goal=requirement)
- await code_interpreter.run(requirement)
+ mi = Interpreter(use_tools=True, goal=requirement)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/examples/ci/solve_math_problems.py b/examples/mi/solve_math_problems.py
similarity index 66%
rename from examples/ci/solve_math_problems.py
rename to examples/mi/solve_math_problems.py
index 8c660975b..dce2edb00 100644
--- a/examples/ci/solve_math_problems.py
+++ b/examples/mi/solve_math_problems.py
@@ -1,11 +1,11 @@
import asyncio
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main(requirement: str = ""):
- code_interpreter = CodeInterpreter(use_tools=False)
- await code_interpreter.run(requirement)
+ mi = Interpreter(use_tools=False)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/metagpt/actions/__init__.py b/metagpt/actions/__init__.py
index 363b4fd33..19a7c10d5 100644
--- a/metagpt/actions/__init__.py
+++ b/metagpt/actions/__init__.py
@@ -22,9 +22,9 @@ from metagpt.actions.write_code_review import WriteCodeReview
from metagpt.actions.write_prd import WritePRD
from metagpt.actions.write_prd_review import WritePRDReview
from metagpt.actions.write_test import WriteTest
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
-from metagpt.actions.ci.write_analysis_code import WriteCodeWithoutTools, WriteCodeWithTools
-from metagpt.actions.ci.write_plan import WritePlan
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.write_analysis_code import WriteCodeWithoutTools, WriteCodeWithTools
+from metagpt.actions.mi.write_plan import WritePlan
class ActionType(Enum):
diff --git a/metagpt/actions/ci/__init__.py b/metagpt/actions/mi/__init__.py
similarity index 100%
rename from metagpt/actions/ci/__init__.py
rename to metagpt/actions/mi/__init__.py
diff --git a/metagpt/actions/ci/ask_review.py b/metagpt/actions/mi/ask_review.py
similarity index 100%
rename from metagpt/actions/ci/ask_review.py
rename to metagpt/actions/mi/ask_review.py
diff --git a/metagpt/actions/ci/debug_code.py b/metagpt/actions/mi/debug_code.py
similarity index 98%
rename from metagpt/actions/ci/debug_code.py
rename to metagpt/actions/mi/debug_code.py
index 4a6617dc6..db3b7a9db 100644
--- a/metagpt/actions/ci/debug_code.py
+++ b/metagpt/actions/mi/debug_code.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from metagpt.actions.ci.write_analysis_code import BaseWriteAnalysisCode
+from metagpt.actions.mi.write_analysis_code import BaseWriteAnalysisCode
from metagpt.logs import logger
from metagpt.schema import Message
from metagpt.utils.common import create_func_call_config
diff --git a/metagpt/actions/ci/execute_nb_code.py b/metagpt/actions/mi/execute_nb_code.py
similarity index 100%
rename from metagpt/actions/ci/execute_nb_code.py
rename to metagpt/actions/mi/execute_nb_code.py
diff --git a/metagpt/actions/ci/ml_action.py b/metagpt/actions/mi/ml_action.py
similarity index 93%
rename from metagpt/actions/ci/ml_action.py
rename to metagpt/actions/mi/ml_action.py
index e18d0fd20..60b2fb547 100644
--- a/metagpt/actions/ci/ml_action.py
+++ b/metagpt/actions/mi/ml_action.py
@@ -3,14 +3,14 @@ from __future__ import annotations
from typing import Tuple
from metagpt.actions import Action
-from metagpt.actions.ci.write_analysis_code import WriteCodeWithTools
-from metagpt.prompts.ci.ml_action import (
+from metagpt.actions.mi.write_analysis_code import WriteCodeWithTools
+from metagpt.prompts.mi.ml_action import (
ML_GENERATE_CODE_PROMPT,
ML_TOOL_USAGE_PROMPT,
PRINT_DATA_COLUMNS,
UPDATE_DATA_COLUMNS,
)
-from metagpt.prompts.ci.write_analysis_code import CODE_GENERATOR_WITH_TOOLS
+from metagpt.prompts.mi.write_analysis_code import CODE_GENERATOR_WITH_TOOLS
from metagpt.schema import Message, Plan
from metagpt.utils.common import create_func_call_config, remove_comments
diff --git a/metagpt/actions/ci/write_analysis_code.py b/metagpt/actions/mi/write_analysis_code.py
similarity index 99%
rename from metagpt/actions/ci/write_analysis_code.py
rename to metagpt/actions/mi/write_analysis_code.py
index 421eb0804..b3d0632b6 100644
--- a/metagpt/actions/ci/write_analysis_code.py
+++ b/metagpt/actions/mi/write_analysis_code.py
@@ -10,7 +10,7 @@ from typing import Tuple
from metagpt.actions import Action
from metagpt.logs import logger
-from metagpt.prompts.ci.write_analysis_code import (
+from metagpt.prompts.mi.write_analysis_code import (
CODE_GENERATOR_WITH_TOOLS,
SELECT_FUNCTION_TOOLS,
TOOL_RECOMMENDATION_PROMPT,
diff --git a/metagpt/actions/ci/write_plan.py b/metagpt/actions/mi/write_plan.py
similarity index 98%
rename from metagpt/actions/ci/write_plan.py
rename to metagpt/actions/mi/write_plan.py
index dd9363260..8067d7b87 100644
--- a/metagpt/actions/ci/write_plan.py
+++ b/metagpt/actions/mi/write_plan.py
@@ -12,7 +12,7 @@ from typing import Tuple
from metagpt.actions import Action
from metagpt.logs import logger
-from metagpt.prompts.ci.write_analysis_code import (
+from metagpt.prompts.mi.write_analysis_code import (
ASSIGN_TASK_TYPE_CONFIG,
ASSIGN_TASK_TYPE_PROMPT,
)
diff --git a/metagpt/prompts/ci/__init__.py b/metagpt/prompts/mi/__init__.py
similarity index 100%
rename from metagpt/prompts/ci/__init__.py
rename to metagpt/prompts/mi/__init__.py
diff --git a/metagpt/prompts/ci/ml_action.py b/metagpt/prompts/mi/ml_action.py
similarity index 100%
rename from metagpt/prompts/ci/ml_action.py
rename to metagpt/prompts/mi/ml_action.py
diff --git a/metagpt/prompts/ci/write_analysis_code.py b/metagpt/prompts/mi/write_analysis_code.py
similarity index 100%
rename from metagpt/prompts/ci/write_analysis_code.py
rename to metagpt/prompts/mi/write_analysis_code.py
diff --git a/metagpt/roles/ci/__init__.py b/metagpt/roles/mi/__init__.py
similarity index 100%
rename from metagpt/roles/ci/__init__.py
rename to metagpt/roles/mi/__init__.py
diff --git a/metagpt/roles/ci/code_interpreter.py b/metagpt/roles/mi/interpreter.py
similarity index 91%
rename from metagpt/roles/ci/code_interpreter.py
rename to metagpt/roles/mi/interpreter.py
index 404c93b81..fa50098e9 100644
--- a/metagpt/roles/ci/code_interpreter.py
+++ b/metagpt/roles/mi/interpreter.py
@@ -2,9 +2,9 @@ from __future__ import annotations
from pydantic import Field
-from metagpt.actions.ci.ask_review import ReviewConst
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
-from metagpt.actions.ci.write_analysis_code import (
+from metagpt.actions.mi.ask_review import ReviewConst
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.write_analysis_code import (
WriteCodeWithoutTools,
WriteCodeWithTools,
)
@@ -13,9 +13,9 @@ from metagpt.roles import Role
from metagpt.schema import Message, Task, TaskResult
-class CodeInterpreter(Role):
- name: str = "Charlie"
- profile: str = "CodeInterpreter"
+class Interpreter(Role):
+ name: str = "Ivy"
+ profile: str = "Interpreter"
auto_run: bool = True
use_tools: bool = False
execute_code: ExecuteNbCode = Field(default_factory=ExecuteNbCode, exclude=True)
diff --git a/metagpt/roles/ci/ml_engineer.py b/metagpt/roles/mi/ml_engineer.py
similarity index 89%
rename from metagpt/roles/ci/ml_engineer.py
rename to metagpt/roles/mi/ml_engineer.py
index f8bcb2c89..78d605d3e 100644
--- a/metagpt/roles/ci/ml_engineer.py
+++ b/metagpt/roles/mi/ml_engineer.py
@@ -1,13 +1,13 @@
-from metagpt.actions.ci.debug_code import DebugCode
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
-from metagpt.actions.ci.ml_action import UpdateDataColumns, WriteCodeWithToolsML
+from metagpt.actions.mi.debug_code import DebugCode
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.ml_action import UpdateDataColumns, WriteCodeWithToolsML
from metagpt.logs import logger
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
from metagpt.tools.tool_type import ToolType
from metagpt.utils.common import any_to_str
-class MLEngineer(CodeInterpreter):
+class MLEngineer(Interpreter):
name: str = "Mark"
profile: str = "MLEngineer"
debug_context: list = []
diff --git a/metagpt/strategy/planner.py b/metagpt/strategy/planner.py
index fd635df39..851d45893 100644
--- a/metagpt/strategy/planner.py
+++ b/metagpt/strategy/planner.py
@@ -4,8 +4,8 @@ import json
from pydantic import BaseModel, Field
-from metagpt.actions.ci.ask_review import AskReview, ReviewConst
-from metagpt.actions.ci.write_plan import (
+from metagpt.actions.mi.ask_review import AskReview, ReviewConst
+from metagpt.actions.mi.write_plan import (
WritePlan,
precheck_update_plan_from_rsp,
update_plan_from_rsp,
diff --git a/metagpt/strategy/solver.py b/metagpt/strategy/solver.py
index e7d61a881..ab16d82bd 100644
--- a/metagpt/strategy/solver.py
+++ b/metagpt/strategy/solver.py
@@ -49,8 +49,8 @@ class TOTSolver(BaseSolver):
raise NotImplementedError
-class CodeInterpreterSolver(BaseSolver):
- """CodeInterpreterSolver: Write&Run code in the graph"""
+class InterpreterSolver(BaseSolver):
+ """InterpreterSolver: Write&Run code in the graph"""
async def solve(self):
raise NotImplementedError
diff --git a/tests/metagpt/actions/ci/test_ask_review.py b/tests/metagpt/actions/mi/test_ask_review.py
similarity index 84%
rename from tests/metagpt/actions/ci/test_ask_review.py
rename to tests/metagpt/actions/mi/test_ask_review.py
index 4f02fe10b..92e8bd046 100644
--- a/tests/metagpt/actions/ci/test_ask_review.py
+++ b/tests/metagpt/actions/mi/test_ask_review.py
@@ -1,6 +1,6 @@
import pytest
-from metagpt.actions.ci.ask_review import AskReview
+from metagpt.actions.mi.ask_review import AskReview
@pytest.mark.asyncio
diff --git a/tests/metagpt/actions/ci/test_debug_code.py b/tests/metagpt/actions/mi/test_debug_code.py
similarity index 96%
rename from tests/metagpt/actions/ci/test_debug_code.py
rename to tests/metagpt/actions/mi/test_debug_code.py
index 0307ac17e..24cfcef10 100644
--- a/tests/metagpt/actions/ci/test_debug_code.py
+++ b/tests/metagpt/actions/mi/test_debug_code.py
@@ -5,7 +5,7 @@
import pytest
-from metagpt.actions.ci.debug_code import DebugCode
+from metagpt.actions.mi.debug_code import DebugCode
from metagpt.schema import Message
ErrorStr = """Tested passed:
diff --git a/tests/metagpt/actions/ci/test_execute_nb_code.py b/tests/metagpt/actions/mi/test_execute_nb_code.py
similarity index 98%
rename from tests/metagpt/actions/ci/test_execute_nb_code.py
rename to tests/metagpt/actions/mi/test_execute_nb_code.py
index 72a85dd08..59a814054 100644
--- a/tests/metagpt/actions/ci/test_execute_nb_code.py
+++ b/tests/metagpt/actions/mi/test_execute_nb_code.py
@@ -1,6 +1,6 @@
import pytest
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode, truncate
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode, truncate
@pytest.mark.asyncio
diff --git a/tests/metagpt/actions/ci/test_ml_action.py b/tests/metagpt/actions/mi/test_ml_action.py
similarity index 95%
rename from tests/metagpt/actions/ci/test_ml_action.py
rename to tests/metagpt/actions/mi/test_ml_action.py
index 5d9507094..27d47b0e3 100644
--- a/tests/metagpt/actions/ci/test_ml_action.py
+++ b/tests/metagpt/actions/mi/test_ml_action.py
@@ -1,6 +1,6 @@
import pytest
-from metagpt.actions.ci.ml_action import WriteCodeWithToolsML
+from metagpt.actions.mi.ml_action import WriteCodeWithToolsML
from metagpt.schema import Plan, Task
diff --git a/tests/metagpt/actions/ci/test_write_analysis_code.py b/tests/metagpt/actions/mi/test_write_analysis_code.py
similarity index 99%
rename from tests/metagpt/actions/ci/test_write_analysis_code.py
rename to tests/metagpt/actions/mi/test_write_analysis_code.py
index 95c7dfca8..6c2228222 100644
--- a/tests/metagpt/actions/ci/test_write_analysis_code.py
+++ b/tests/metagpt/actions/mi/test_write_analysis_code.py
@@ -2,8 +2,8 @@ import asyncio
import pytest
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
-from metagpt.actions.ci.write_analysis_code import (
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.write_analysis_code import (
WriteCodeWithoutTools,
WriteCodeWithTools,
)
diff --git a/tests/metagpt/actions/ci/test_write_plan.py b/tests/metagpt/actions/mi/test_write_plan.py
similarity index 95%
rename from tests/metagpt/actions/ci/test_write_plan.py
rename to tests/metagpt/actions/mi/test_write_plan.py
index 3eb80ca3e..97632ea44 100644
--- a/tests/metagpt/actions/ci/test_write_plan.py
+++ b/tests/metagpt/actions/mi/test_write_plan.py
@@ -1,6 +1,6 @@
import pytest
-from metagpt.actions.ci.write_plan import (
+from metagpt.actions.mi.write_plan import (
Plan,
Task,
WritePlan,
diff --git a/tests/metagpt/roles/ci/test_code_interpreter.py b/tests/metagpt/roles/mi/test_interpreter.py
similarity index 59%
rename from tests/metagpt/roles/ci/test_code_interpreter.py
rename to tests/metagpt/roles/mi/test_interpreter.py
index 9d2f2429b..3bae4a1ac 100644
--- a/tests/metagpt/roles/ci/test_code_interpreter.py
+++ b/tests/metagpt/roles/mi/test_interpreter.py
@@ -1,23 +1,23 @@
import pytest
from metagpt.logs import logger
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
@pytest.mark.asyncio
@pytest.mark.parametrize("auto_run", [(True), (False)])
-async def test_code_interpreter(mocker, auto_run):
- mocker.patch("metagpt.actions.ci.execute_nb_code.ExecuteNbCode.run", return_value=("a successful run", True))
+async def test_interpreter(mocker, auto_run):
+ mocker.patch("metagpt.actions.mi.execute_nb_code.ExecuteNbCode.run", return_value=("a successful run", True))
mocker.patch("builtins.input", return_value="confirm")
requirement = "Run data analysis on sklearn Iris dataset, include a plot"
tools = []
- ci = CodeInterpreter(auto_run=auto_run, use_tools=True, tools=tools)
- rsp = await ci.run(requirement)
+ mi = Interpreter(auto_run=auto_run, use_tools=True, tools=tools)
+ rsp = await mi.run(requirement)
logger.info(rsp)
assert len(rsp.content) > 0
- finished_tasks = ci.planner.plan.get_finished_tasks()
+ finished_tasks = mi.planner.plan.get_finished_tasks()
assert len(finished_tasks) > 0
assert len(finished_tasks[0].code) > 0 # check one task to see if code is recorded
diff --git a/tests/metagpt/roles/ci/test_ml_engineer.py b/tests/metagpt/roles/mi/test_ml_engineer.py
similarity index 91%
rename from tests/metagpt/roles/ci/test_ml_engineer.py
rename to tests/metagpt/roles/mi/test_ml_engineer.py
index 3bf9f3b92..921ac8822 100644
--- a/tests/metagpt/roles/ci/test_ml_engineer.py
+++ b/tests/metagpt/roles/mi/test_ml_engineer.py
@@ -1,16 +1,16 @@
import pytest
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
from metagpt.logs import logger
-from metagpt.roles.ci.ml_engineer import MLEngineer
+from metagpt.roles.mi.ml_engineer import MLEngineer
from metagpt.schema import Message, Plan, Task
from metagpt.tools.tool_type import ToolType
-from tests.metagpt.actions.ci.test_debug_code import CODE, DebugContext, ErrorStr
+from tests.metagpt.actions.mi.test_debug_code import CODE, DebugContext, ErrorStr
def test_mle_init():
- ci = MLEngineer(goal="test", auto_run=True, use_tools=True, tools=["tool1", "tool2"])
- assert ci.tools == []
+ mle = MLEngineer(goal="test", auto_run=True, use_tools=True, tools=["tool1", "tool2"])
+ assert mle.tools == []
MockPlan = Plan(
diff --git a/tests/metagpt/utils/test_save_code.py b/tests/metagpt/utils/test_save_code.py
index 35ad84baf..9df2650f3 100644
--- a/tests/metagpt/utils/test_save_code.py
+++ b/tests/metagpt/utils/test_save_code.py
@@ -6,7 +6,7 @@
import nbformat
import pytest
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
from metagpt.utils.common import read_json_file
from metagpt.utils.save_code import DATA_PATH, save_code_file
From 2fc18b49f9707c4004d854720bc11e5579f82464 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Sun, 18 Feb 2024 17:06:39 +0800
Subject: [PATCH 055/104] unified Interpreter naming
---
README.md | 2 +-
examples/ci/README.md | 18 ------------------
examples/mi/README.md | 18 ++++++++++++++++++
examples/{ci => mi}/crawl_webpage.py | 6 +++---
examples/{ci => mi}/data_visualization.py | 6 +++---
examples/{ci => mi}/email_summary.py | 6 +++---
examples/{ci => mi}/imitate_webpage.py | 6 +++---
examples/{ci => mi}/machine_learning.py | 6 +++---
examples/{ci => mi}/ml_engineer_with_tools.py | 2 +-
examples/{ci => mi}/ocr_receipt.py | 8 ++++----
examples/{ci => mi}/rm_image_background.py | 6 +++---
examples/{ci => mi}/sd_tool_usage.py | 6 +++---
examples/{ci => mi}/solve_math_problems.py | 6 +++---
metagpt/actions/__init__.py | 6 +++---
metagpt/actions/{ci => mi}/__init__.py | 0
metagpt/actions/{ci => mi}/ask_review.py | 0
metagpt/actions/{ci => mi}/debug_code.py | 2 +-
metagpt/actions/{ci => mi}/execute_nb_code.py | 0
metagpt/actions/{ci => mi}/ml_action.py | 6 +++---
.../actions/{ci => mi}/write_analysis_code.py | 2 +-
metagpt/actions/{ci => mi}/write_plan.py | 2 +-
metagpt/prompts/{ci => mi}/__init__.py | 0
metagpt/prompts/{ci => mi}/ml_action.py | 0
.../prompts/{ci => mi}/write_analysis_code.py | 0
metagpt/roles/{ci => mi}/__init__.py | 0
.../code_interpreter.py => mi/interpreter.py} | 12 ++++++------
metagpt/roles/{ci => mi}/ml_engineer.py | 10 +++++-----
metagpt/strategy/planner.py | 4 ++--
metagpt/strategy/solver.py | 4 ++--
.../actions/{ci => mi}/test_ask_review.py | 2 +-
.../actions/{ci => mi}/test_debug_code.py | 2 +-
.../actions/{ci => mi}/test_execute_nb_code.py | 2 +-
.../actions/{ci => mi}/test_ml_action.py | 2 +-
.../{ci => mi}/test_write_analysis_code.py | 4 ++--
.../actions/{ci => mi}/test_write_plan.py | 2 +-
.../test_interpreter.py} | 12 ++++++------
.../roles/{ci => mi}/test_ml_engineer.py | 10 +++++-----
tests/metagpt/utils/test_save_code.py | 2 +-
38 files changed, 91 insertions(+), 91 deletions(-)
delete mode 100644 examples/ci/README.md
create mode 100644 examples/mi/README.md
rename examples/{ci => mi}/crawl_webpage.py (76%)
rename examples/{ci => mi}/data_visualization.py (55%)
rename examples/{ci => mi}/email_summary.py (90%)
rename examples/{ci => mi}/imitate_webpage.py (81%)
rename examples/{ci => mi}/machine_learning.py (66%)
rename examples/{ci => mi}/ml_engineer_with_tools.py (94%)
rename examples/{ci => mi}/ocr_receipt.py (72%)
rename examples/{ci => mi}/rm_image_background.py (71%)
rename examples/{ci => mi}/sd_tool_usage.py (70%)
rename examples/{ci => mi}/solve_math_problems.py (66%)
rename metagpt/actions/{ci => mi}/__init__.py (100%)
rename metagpt/actions/{ci => mi}/ask_review.py (100%)
rename metagpt/actions/{ci => mi}/debug_code.py (98%)
rename metagpt/actions/{ci => mi}/execute_nb_code.py (100%)
rename metagpt/actions/{ci => mi}/ml_action.py (93%)
rename metagpt/actions/{ci => mi}/write_analysis_code.py (99%)
rename metagpt/actions/{ci => mi}/write_plan.py (98%)
rename metagpt/prompts/{ci => mi}/__init__.py (100%)
rename metagpt/prompts/{ci => mi}/ml_action.py (100%)
rename metagpt/prompts/{ci => mi}/write_analysis_code.py (100%)
rename metagpt/roles/{ci => mi}/__init__.py (100%)
rename metagpt/roles/{ci/code_interpreter.py => mi/interpreter.py} (91%)
rename metagpt/roles/{ci => mi}/ml_engineer.py (89%)
rename tests/metagpt/actions/{ci => mi}/test_ask_review.py (84%)
rename tests/metagpt/actions/{ci => mi}/test_debug_code.py (96%)
rename tests/metagpt/actions/{ci => mi}/test_execute_nb_code.py (98%)
rename tests/metagpt/actions/{ci => mi}/test_ml_action.py (95%)
rename tests/metagpt/actions/{ci => mi}/test_write_analysis_code.py (99%)
rename tests/metagpt/actions/{ci => mi}/test_write_plan.py (95%)
rename tests/metagpt/roles/{ci/test_code_interpreter.py => mi/test_interpreter.py} (59%)
rename tests/metagpt/roles/{ci => mi}/test_ml_engineer.py (91%)
diff --git a/README.md b/README.md
index d3a035abe..a1aa5ded8 100644
--- a/README.md
+++ b/README.md
@@ -26,7 +26,7 @@ # MetaGPT: The Multi-Agent Framework
## News
-🚀 Feb. 08, 2024: [v0.7.0](https://github.com/geekan/MetaGPT/releases/tag/v0.7.0) released, supporting assigning different LLMs to different Roles. We also introduced CodeInterpreter, a powerful agent capable of solving a wide range of real-world problems.
+🚀 Feb. 08, 2024: [v0.7.0](https://github.com/geekan/MetaGPT/releases/tag/v0.7.0) released, supporting assigning different LLMs to different Roles. We also introduced [Interpreter](https://github.com/geekan/MetaGPT/blob/main/examples/mi/README.md), a powerful agent capable of solving a wide range of real-world problems.
🚀 Jan. 16, 2024: Our paper [MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework
](https://arxiv.org/abs/2308.00352) accepted for oral presentation **(top 1.2%)** at ICLR 2024, **ranking #1** in the LLM-based Agent category.
diff --git a/examples/ci/README.md b/examples/ci/README.md
deleted file mode 100644
index d526b94c7..000000000
--- a/examples/ci/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Code Interpreter (CI)
-
-## What is CodeInterpreter
-CodeInterpreter is an agent who solves problems through codes. It understands user requirements, makes plans, writes codes for execution, and uses tools if necessary. These capabilities enable it to tackle a wide range of scenarios, please check out the examples below.
-
-## Example List
-- Data visualization
-- Machine learning modeling
-- Image background removal
-- Solve math problems
-- Receipt OCR
-- Tool usage: web page imitation
-- Tool usage: web crawling
-- Tool usage: text2image
-- Tool usage: email summarization and response
-- More on the way!
-
-Please see [here](https://docs.deepwisdom.ai/main/en/guide/use_cases/agent/code_interpreter/ci_intro.html) for detailed explanation.
\ No newline at end of file
diff --git a/examples/mi/README.md b/examples/mi/README.md
new file mode 100644
index 000000000..1734ba388
--- /dev/null
+++ b/examples/mi/README.md
@@ -0,0 +1,18 @@
+# MetaGPT Interpreter (MI)
+
+## What is Interpreter
+Interpreter is an agent who solves problems through codes. It understands user requirements, makes plans, writes codes for execution, and uses tools if necessary. These capabilities enable it to tackle a wide range of scenarios, please check out the examples below.
+
+## Example List
+- Data visualization
+- Machine learning modeling
+- Image background removal
+- Solve math problems
+- Receipt OCR
+- Tool usage: web page imitation
+- Tool usage: web crawling
+- Tool usage: text2image
+- Tool usage: email summarization and response
+- More on the way!
+
+Please see [here](https://docs.deepwisdom.ai/main/en/guide/use_cases/agent/interpreter/mi_intro.html) for detailed explanation.
\ No newline at end of file
diff --git a/examples/ci/crawl_webpage.py b/examples/mi/crawl_webpage.py
similarity index 76%
rename from examples/ci/crawl_webpage.py
rename to examples/mi/crawl_webpage.py
index 2db9e407b..b5d2fb3d0 100644
--- a/examples/ci/crawl_webpage.py
+++ b/examples/mi/crawl_webpage.py
@@ -5,15 +5,15 @@
@File : crawl_webpage.py
"""
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main():
prompt = """Get data from `paperlist` table in https://papercopilot.com/statistics/iclr-statistics/iclr-2024-statistics/,
and save it to a csv file. paper title must include `multiagent` or `large language model`. *notice: print key variables*"""
- ci = CodeInterpreter(goal=prompt, use_tools=True)
+ mi = Interpreter(use_tools=True)
- await ci.run(prompt)
+ await mi.run(prompt)
if __name__ == "__main__":
diff --git a/examples/ci/data_visualization.py b/examples/mi/data_visualization.py
similarity index 55%
rename from examples/ci/data_visualization.py
rename to examples/mi/data_visualization.py
index 8dc4340d4..2e4acc9b4 100644
--- a/examples/ci/data_visualization.py
+++ b/examples/mi/data_visualization.py
@@ -1,11 +1,11 @@
import asyncio
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main(requirement: str = ""):
- code_interpreter = CodeInterpreter(use_tools=False)
- await code_interpreter.run(requirement)
+ mi = Interpreter(use_tools=False)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/examples/ci/email_summary.py b/examples/mi/email_summary.py
similarity index 90%
rename from examples/ci/email_summary.py
rename to examples/mi/email_summary.py
index d6fa283ca..e1511c5b0 100644
--- a/examples/ci/email_summary.py
+++ b/examples/mi/email_summary.py
@@ -6,7 +6,7 @@
"""
import os
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main():
@@ -22,9 +22,9 @@ async def main():
Firstly, Please help me fetch the latest 5 senders and full letter contents.
Then, summarize each of the 5 emails into one sentence (you can do this by yourself, no need to import other models to do this) and output them in a markdown format."""
- ci = CodeInterpreter(use_tools=True)
+ mi = Interpreter(use_tools=True)
- await ci.run(prompt)
+ await mi.run(prompt)
if __name__ == "__main__":
diff --git a/examples/ci/imitate_webpage.py b/examples/mi/imitate_webpage.py
similarity index 81%
rename from examples/ci/imitate_webpage.py
rename to examples/mi/imitate_webpage.py
index 6a83d3a33..0e9ca731d 100644
--- a/examples/ci/imitate_webpage.py
+++ b/examples/mi/imitate_webpage.py
@@ -5,7 +5,7 @@
@Author : mannaandpoem
@File : imitate_webpage.py
"""
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main():
@@ -15,9 +15,9 @@ Firstly, utilize Selenium and WebDriver for rendering.
Secondly, convert image to a webpage including HTML, CSS and JS in one go.
Finally, save webpage in a text file.
Note: All required dependencies and environments have been fully installed and configured."""
- ci = CodeInterpreter(use_tools=True)
+ mi = Interpreter(use_tools=True)
- await ci.run(prompt)
+ await mi.run(prompt)
if __name__ == "__main__":
diff --git a/examples/ci/machine_learning.py b/examples/mi/machine_learning.py
similarity index 66%
rename from examples/ci/machine_learning.py
rename to examples/mi/machine_learning.py
index 9eda981ac..ee3b47f26 100644
--- a/examples/ci/machine_learning.py
+++ b/examples/mi/machine_learning.py
@@ -1,11 +1,11 @@
import asyncio
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main(requirement: str):
- role = CodeInterpreter(auto_run=True, use_tools=False)
- await role.run(requirement)
+ mi = Interpreter(auto_run=True, use_tools=False)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/examples/ci/ml_engineer_with_tools.py b/examples/mi/ml_engineer_with_tools.py
similarity index 94%
rename from examples/ci/ml_engineer_with_tools.py
rename to examples/mi/ml_engineer_with_tools.py
index 66d37316b..9d0e7f951 100644
--- a/examples/ci/ml_engineer_with_tools.py
+++ b/examples/mi/ml_engineer_with_tools.py
@@ -1,6 +1,6 @@
import asyncio
-from metagpt.roles.ci.ml_engineer import MLEngineer
+from metagpt.roles.mi.ml_engineer import MLEngineer
async def main(requirement: str):
diff --git a/examples/ci/ocr_receipt.py b/examples/mi/ocr_receipt.py
similarity index 72%
rename from examples/ci/ocr_receipt.py
rename to examples/mi/ocr_receipt.py
index dc2db2d01..ffa5cff05 100644
--- a/examples/ci/ocr_receipt.py
+++ b/examples/mi/ocr_receipt.py
@@ -1,16 +1,16 @@
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main():
# Notice: pip install metagpt[ocr] before using this example
image_path = "image.jpg"
language = "English"
- requirement = f"""This is a {language} invoice image.
+ requirement = f"""This is a {language} receipt image.
Your goal is to perform OCR on images using PaddleOCR, then extract the total amount from ocr text results, and finally save as table. Image path: {image_path}.
NOTE: The environments for Paddle and PaddleOCR are all ready and has been fully installed."""
- ci = CodeInterpreter()
+ mi = Interpreter()
- await ci.run(requirement)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/examples/ci/rm_image_background.py b/examples/mi/rm_image_background.py
similarity index 71%
rename from examples/ci/rm_image_background.py
rename to examples/mi/rm_image_background.py
index 83dbbdc77..57e89b103 100644
--- a/examples/ci/rm_image_background.py
+++ b/examples/mi/rm_image_background.py
@@ -1,11 +1,11 @@
import asyncio
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main(requirement: str = ""):
- code_interpreter = CodeInterpreter(use_tools=False)
- await code_interpreter.run(requirement)
+ mi = Interpreter(use_tools=False)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/examples/ci/sd_tool_usage.py b/examples/mi/sd_tool_usage.py
similarity index 70%
rename from examples/ci/sd_tool_usage.py
rename to examples/mi/sd_tool_usage.py
index b4642af23..f8507a411 100644
--- a/examples/ci/sd_tool_usage.py
+++ b/examples/mi/sd_tool_usage.py
@@ -4,12 +4,12 @@
# @Desc :
import asyncio
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main(requirement: str = ""):
- code_interpreter = CodeInterpreter(use_tools=True, goal=requirement)
- await code_interpreter.run(requirement)
+ mi = Interpreter(use_tools=True, goal=requirement)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/examples/ci/solve_math_problems.py b/examples/mi/solve_math_problems.py
similarity index 66%
rename from examples/ci/solve_math_problems.py
rename to examples/mi/solve_math_problems.py
index 8c660975b..dce2edb00 100644
--- a/examples/ci/solve_math_problems.py
+++ b/examples/mi/solve_math_problems.py
@@ -1,11 +1,11 @@
import asyncio
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
async def main(requirement: str = ""):
- code_interpreter = CodeInterpreter(use_tools=False)
- await code_interpreter.run(requirement)
+ mi = Interpreter(use_tools=False)
+ await mi.run(requirement)
if __name__ == "__main__":
diff --git a/metagpt/actions/__init__.py b/metagpt/actions/__init__.py
index 363b4fd33..19a7c10d5 100644
--- a/metagpt/actions/__init__.py
+++ b/metagpt/actions/__init__.py
@@ -22,9 +22,9 @@ from metagpt.actions.write_code_review import WriteCodeReview
from metagpt.actions.write_prd import WritePRD
from metagpt.actions.write_prd_review import WritePRDReview
from metagpt.actions.write_test import WriteTest
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
-from metagpt.actions.ci.write_analysis_code import WriteCodeWithoutTools, WriteCodeWithTools
-from metagpt.actions.ci.write_plan import WritePlan
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.write_analysis_code import WriteCodeWithoutTools, WriteCodeWithTools
+from metagpt.actions.mi.write_plan import WritePlan
class ActionType(Enum):
diff --git a/metagpt/actions/ci/__init__.py b/metagpt/actions/mi/__init__.py
similarity index 100%
rename from metagpt/actions/ci/__init__.py
rename to metagpt/actions/mi/__init__.py
diff --git a/metagpt/actions/ci/ask_review.py b/metagpt/actions/mi/ask_review.py
similarity index 100%
rename from metagpt/actions/ci/ask_review.py
rename to metagpt/actions/mi/ask_review.py
diff --git a/metagpt/actions/ci/debug_code.py b/metagpt/actions/mi/debug_code.py
similarity index 98%
rename from metagpt/actions/ci/debug_code.py
rename to metagpt/actions/mi/debug_code.py
index 4a6617dc6..db3b7a9db 100644
--- a/metagpt/actions/ci/debug_code.py
+++ b/metagpt/actions/mi/debug_code.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from metagpt.actions.ci.write_analysis_code import BaseWriteAnalysisCode
+from metagpt.actions.mi.write_analysis_code import BaseWriteAnalysisCode
from metagpt.logs import logger
from metagpt.schema import Message
from metagpt.utils.common import create_func_call_config
diff --git a/metagpt/actions/ci/execute_nb_code.py b/metagpt/actions/mi/execute_nb_code.py
similarity index 100%
rename from metagpt/actions/ci/execute_nb_code.py
rename to metagpt/actions/mi/execute_nb_code.py
diff --git a/metagpt/actions/ci/ml_action.py b/metagpt/actions/mi/ml_action.py
similarity index 93%
rename from metagpt/actions/ci/ml_action.py
rename to metagpt/actions/mi/ml_action.py
index e18d0fd20..60b2fb547 100644
--- a/metagpt/actions/ci/ml_action.py
+++ b/metagpt/actions/mi/ml_action.py
@@ -3,14 +3,14 @@ from __future__ import annotations
from typing import Tuple
from metagpt.actions import Action
-from metagpt.actions.ci.write_analysis_code import WriteCodeWithTools
-from metagpt.prompts.ci.ml_action import (
+from metagpt.actions.mi.write_analysis_code import WriteCodeWithTools
+from metagpt.prompts.mi.ml_action import (
ML_GENERATE_CODE_PROMPT,
ML_TOOL_USAGE_PROMPT,
PRINT_DATA_COLUMNS,
UPDATE_DATA_COLUMNS,
)
-from metagpt.prompts.ci.write_analysis_code import CODE_GENERATOR_WITH_TOOLS
+from metagpt.prompts.mi.write_analysis_code import CODE_GENERATOR_WITH_TOOLS
from metagpt.schema import Message, Plan
from metagpt.utils.common import create_func_call_config, remove_comments
diff --git a/metagpt/actions/ci/write_analysis_code.py b/metagpt/actions/mi/write_analysis_code.py
similarity index 99%
rename from metagpt/actions/ci/write_analysis_code.py
rename to metagpt/actions/mi/write_analysis_code.py
index 421eb0804..b3d0632b6 100644
--- a/metagpt/actions/ci/write_analysis_code.py
+++ b/metagpt/actions/mi/write_analysis_code.py
@@ -10,7 +10,7 @@ from typing import Tuple
from metagpt.actions import Action
from metagpt.logs import logger
-from metagpt.prompts.ci.write_analysis_code import (
+from metagpt.prompts.mi.write_analysis_code import (
CODE_GENERATOR_WITH_TOOLS,
SELECT_FUNCTION_TOOLS,
TOOL_RECOMMENDATION_PROMPT,
diff --git a/metagpt/actions/ci/write_plan.py b/metagpt/actions/mi/write_plan.py
similarity index 98%
rename from metagpt/actions/ci/write_plan.py
rename to metagpt/actions/mi/write_plan.py
index dd9363260..8067d7b87 100644
--- a/metagpt/actions/ci/write_plan.py
+++ b/metagpt/actions/mi/write_plan.py
@@ -12,7 +12,7 @@ from typing import Tuple
from metagpt.actions import Action
from metagpt.logs import logger
-from metagpt.prompts.ci.write_analysis_code import (
+from metagpt.prompts.mi.write_analysis_code import (
ASSIGN_TASK_TYPE_CONFIG,
ASSIGN_TASK_TYPE_PROMPT,
)
diff --git a/metagpt/prompts/ci/__init__.py b/metagpt/prompts/mi/__init__.py
similarity index 100%
rename from metagpt/prompts/ci/__init__.py
rename to metagpt/prompts/mi/__init__.py
diff --git a/metagpt/prompts/ci/ml_action.py b/metagpt/prompts/mi/ml_action.py
similarity index 100%
rename from metagpt/prompts/ci/ml_action.py
rename to metagpt/prompts/mi/ml_action.py
diff --git a/metagpt/prompts/ci/write_analysis_code.py b/metagpt/prompts/mi/write_analysis_code.py
similarity index 100%
rename from metagpt/prompts/ci/write_analysis_code.py
rename to metagpt/prompts/mi/write_analysis_code.py
diff --git a/metagpt/roles/ci/__init__.py b/metagpt/roles/mi/__init__.py
similarity index 100%
rename from metagpt/roles/ci/__init__.py
rename to metagpt/roles/mi/__init__.py
diff --git a/metagpt/roles/ci/code_interpreter.py b/metagpt/roles/mi/interpreter.py
similarity index 91%
rename from metagpt/roles/ci/code_interpreter.py
rename to metagpt/roles/mi/interpreter.py
index 404c93b81..fa50098e9 100644
--- a/metagpt/roles/ci/code_interpreter.py
+++ b/metagpt/roles/mi/interpreter.py
@@ -2,9 +2,9 @@ from __future__ import annotations
from pydantic import Field
-from metagpt.actions.ci.ask_review import ReviewConst
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
-from metagpt.actions.ci.write_analysis_code import (
+from metagpt.actions.mi.ask_review import ReviewConst
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.write_analysis_code import (
WriteCodeWithoutTools,
WriteCodeWithTools,
)
@@ -13,9 +13,9 @@ from metagpt.roles import Role
from metagpt.schema import Message, Task, TaskResult
-class CodeInterpreter(Role):
- name: str = "Charlie"
- profile: str = "CodeInterpreter"
+class Interpreter(Role):
+ name: str = "Ivy"
+ profile: str = "Interpreter"
auto_run: bool = True
use_tools: bool = False
execute_code: ExecuteNbCode = Field(default_factory=ExecuteNbCode, exclude=True)
diff --git a/metagpt/roles/ci/ml_engineer.py b/metagpt/roles/mi/ml_engineer.py
similarity index 89%
rename from metagpt/roles/ci/ml_engineer.py
rename to metagpt/roles/mi/ml_engineer.py
index f8bcb2c89..78d605d3e 100644
--- a/metagpt/roles/ci/ml_engineer.py
+++ b/metagpt/roles/mi/ml_engineer.py
@@ -1,13 +1,13 @@
-from metagpt.actions.ci.debug_code import DebugCode
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
-from metagpt.actions.ci.ml_action import UpdateDataColumns, WriteCodeWithToolsML
+from metagpt.actions.mi.debug_code import DebugCode
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.ml_action import UpdateDataColumns, WriteCodeWithToolsML
from metagpt.logs import logger
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
from metagpt.tools.tool_type import ToolType
from metagpt.utils.common import any_to_str
-class MLEngineer(CodeInterpreter):
+class MLEngineer(Interpreter):
name: str = "Mark"
profile: str = "MLEngineer"
debug_context: list = []
diff --git a/metagpt/strategy/planner.py b/metagpt/strategy/planner.py
index fd635df39..851d45893 100644
--- a/metagpt/strategy/planner.py
+++ b/metagpt/strategy/planner.py
@@ -4,8 +4,8 @@ import json
from pydantic import BaseModel, Field
-from metagpt.actions.ci.ask_review import AskReview, ReviewConst
-from metagpt.actions.ci.write_plan import (
+from metagpt.actions.mi.ask_review import AskReview, ReviewConst
+from metagpt.actions.mi.write_plan import (
WritePlan,
precheck_update_plan_from_rsp,
update_plan_from_rsp,
diff --git a/metagpt/strategy/solver.py b/metagpt/strategy/solver.py
index e7d61a881..ab16d82bd 100644
--- a/metagpt/strategy/solver.py
+++ b/metagpt/strategy/solver.py
@@ -49,8 +49,8 @@ class TOTSolver(BaseSolver):
raise NotImplementedError
-class CodeInterpreterSolver(BaseSolver):
- """CodeInterpreterSolver: Write&Run code in the graph"""
+class InterpreterSolver(BaseSolver):
+ """InterpreterSolver: Write&Run code in the graph"""
async def solve(self):
raise NotImplementedError
diff --git a/tests/metagpt/actions/ci/test_ask_review.py b/tests/metagpt/actions/mi/test_ask_review.py
similarity index 84%
rename from tests/metagpt/actions/ci/test_ask_review.py
rename to tests/metagpt/actions/mi/test_ask_review.py
index 4f02fe10b..92e8bd046 100644
--- a/tests/metagpt/actions/ci/test_ask_review.py
+++ b/tests/metagpt/actions/mi/test_ask_review.py
@@ -1,6 +1,6 @@
import pytest
-from metagpt.actions.ci.ask_review import AskReview
+from metagpt.actions.mi.ask_review import AskReview
@pytest.mark.asyncio
diff --git a/tests/metagpt/actions/ci/test_debug_code.py b/tests/metagpt/actions/mi/test_debug_code.py
similarity index 96%
rename from tests/metagpt/actions/ci/test_debug_code.py
rename to tests/metagpt/actions/mi/test_debug_code.py
index 0307ac17e..24cfcef10 100644
--- a/tests/metagpt/actions/ci/test_debug_code.py
+++ b/tests/metagpt/actions/mi/test_debug_code.py
@@ -5,7 +5,7 @@
import pytest
-from metagpt.actions.ci.debug_code import DebugCode
+from metagpt.actions.mi.debug_code import DebugCode
from metagpt.schema import Message
ErrorStr = """Tested passed:
diff --git a/tests/metagpt/actions/ci/test_execute_nb_code.py b/tests/metagpt/actions/mi/test_execute_nb_code.py
similarity index 98%
rename from tests/metagpt/actions/ci/test_execute_nb_code.py
rename to tests/metagpt/actions/mi/test_execute_nb_code.py
index 72a85dd08..59a814054 100644
--- a/tests/metagpt/actions/ci/test_execute_nb_code.py
+++ b/tests/metagpt/actions/mi/test_execute_nb_code.py
@@ -1,6 +1,6 @@
import pytest
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode, truncate
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode, truncate
@pytest.mark.asyncio
diff --git a/tests/metagpt/actions/ci/test_ml_action.py b/tests/metagpt/actions/mi/test_ml_action.py
similarity index 95%
rename from tests/metagpt/actions/ci/test_ml_action.py
rename to tests/metagpt/actions/mi/test_ml_action.py
index 5d9507094..27d47b0e3 100644
--- a/tests/metagpt/actions/ci/test_ml_action.py
+++ b/tests/metagpt/actions/mi/test_ml_action.py
@@ -1,6 +1,6 @@
import pytest
-from metagpt.actions.ci.ml_action import WriteCodeWithToolsML
+from metagpt.actions.mi.ml_action import WriteCodeWithToolsML
from metagpt.schema import Plan, Task
diff --git a/tests/metagpt/actions/ci/test_write_analysis_code.py b/tests/metagpt/actions/mi/test_write_analysis_code.py
similarity index 99%
rename from tests/metagpt/actions/ci/test_write_analysis_code.py
rename to tests/metagpt/actions/mi/test_write_analysis_code.py
index 95c7dfca8..6c2228222 100644
--- a/tests/metagpt/actions/ci/test_write_analysis_code.py
+++ b/tests/metagpt/actions/mi/test_write_analysis_code.py
@@ -2,8 +2,8 @@ import asyncio
import pytest
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
-from metagpt.actions.ci.write_analysis_code import (
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.write_analysis_code import (
WriteCodeWithoutTools,
WriteCodeWithTools,
)
diff --git a/tests/metagpt/actions/ci/test_write_plan.py b/tests/metagpt/actions/mi/test_write_plan.py
similarity index 95%
rename from tests/metagpt/actions/ci/test_write_plan.py
rename to tests/metagpt/actions/mi/test_write_plan.py
index 3eb80ca3e..97632ea44 100644
--- a/tests/metagpt/actions/ci/test_write_plan.py
+++ b/tests/metagpt/actions/mi/test_write_plan.py
@@ -1,6 +1,6 @@
import pytest
-from metagpt.actions.ci.write_plan import (
+from metagpt.actions.mi.write_plan import (
Plan,
Task,
WritePlan,
diff --git a/tests/metagpt/roles/ci/test_code_interpreter.py b/tests/metagpt/roles/mi/test_interpreter.py
similarity index 59%
rename from tests/metagpt/roles/ci/test_code_interpreter.py
rename to tests/metagpt/roles/mi/test_interpreter.py
index 9d2f2429b..3bae4a1ac 100644
--- a/tests/metagpt/roles/ci/test_code_interpreter.py
+++ b/tests/metagpt/roles/mi/test_interpreter.py
@@ -1,23 +1,23 @@
import pytest
from metagpt.logs import logger
-from metagpt.roles.ci.code_interpreter import CodeInterpreter
+from metagpt.roles.mi.interpreter import Interpreter
@pytest.mark.asyncio
@pytest.mark.parametrize("auto_run", [(True), (False)])
-async def test_code_interpreter(mocker, auto_run):
- mocker.patch("metagpt.actions.ci.execute_nb_code.ExecuteNbCode.run", return_value=("a successful run", True))
+async def test_interpreter(mocker, auto_run):
+ mocker.patch("metagpt.actions.mi.execute_nb_code.ExecuteNbCode.run", return_value=("a successful run", True))
mocker.patch("builtins.input", return_value="confirm")
requirement = "Run data analysis on sklearn Iris dataset, include a plot"
tools = []
- ci = CodeInterpreter(auto_run=auto_run, use_tools=True, tools=tools)
- rsp = await ci.run(requirement)
+ mi = Interpreter(auto_run=auto_run, use_tools=True, tools=tools)
+ rsp = await mi.run(requirement)
logger.info(rsp)
assert len(rsp.content) > 0
- finished_tasks = ci.planner.plan.get_finished_tasks()
+ finished_tasks = mi.planner.plan.get_finished_tasks()
assert len(finished_tasks) > 0
assert len(finished_tasks[0].code) > 0 # check one task to see if code is recorded
diff --git a/tests/metagpt/roles/ci/test_ml_engineer.py b/tests/metagpt/roles/mi/test_ml_engineer.py
similarity index 91%
rename from tests/metagpt/roles/ci/test_ml_engineer.py
rename to tests/metagpt/roles/mi/test_ml_engineer.py
index 3bf9f3b92..921ac8822 100644
--- a/tests/metagpt/roles/ci/test_ml_engineer.py
+++ b/tests/metagpt/roles/mi/test_ml_engineer.py
@@ -1,16 +1,16 @@
import pytest
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
from metagpt.logs import logger
-from metagpt.roles.ci.ml_engineer import MLEngineer
+from metagpt.roles.mi.ml_engineer import MLEngineer
from metagpt.schema import Message, Plan, Task
from metagpt.tools.tool_type import ToolType
-from tests.metagpt.actions.ci.test_debug_code import CODE, DebugContext, ErrorStr
+from tests.metagpt.actions.mi.test_debug_code import CODE, DebugContext, ErrorStr
def test_mle_init():
- ci = MLEngineer(goal="test", auto_run=True, use_tools=True, tools=["tool1", "tool2"])
- assert ci.tools == []
+ mle = MLEngineer(goal="test", auto_run=True, use_tools=True, tools=["tool1", "tool2"])
+ assert mle.tools == []
MockPlan = Plan(
diff --git a/tests/metagpt/utils/test_save_code.py b/tests/metagpt/utils/test_save_code.py
index 35ad84baf..9df2650f3 100644
--- a/tests/metagpt/utils/test_save_code.py
+++ b/tests/metagpt/utils/test_save_code.py
@@ -6,7 +6,7 @@
import nbformat
import pytest
-from metagpt.actions.ci.execute_nb_code import ExecuteNbCode
+from metagpt.actions.mi.execute_nb_code import ExecuteNbCode
from metagpt.utils.common import read_json_file
from metagpt.utils.save_code import DATA_PATH, save_code_file
From 10553b22080c674bdeb82987dcefa79c4fc9ec28 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Sun, 18 Feb 2024 19:00:05 +0800
Subject: [PATCH 056/104] release v0.7.1
---
setup.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/setup.py b/setup.py
index be3956ea4..4efcebdc1 100644
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@ extras_require["dev"] = (["pylint~=3.0.3", "black~=23.3.0", "isort~=5.12.0", "pr
setup(
name="metagpt",
- version="0.7.0",
+ version="0.7.1",
description="The Multi-Agent Framework",
long_description=long_description,
long_description_content_type="text/markdown",
From cec879f71b84667297356b99e49ad68cf1f503db Mon Sep 17 00:00:00 2001
From: better629
Date: Sun, 18 Feb 2024 20:01:27 +0800
Subject: [PATCH 057/104] update repair llm with backslash problem
---
metagpt/utils/repair_llm_raw_output.py | 12 ++++++++++++
tests/metagpt/utils/test_repair_llm_raw_output.py | 5 +++++
2 files changed, 17 insertions(+)
diff --git a/metagpt/utils/repair_llm_raw_output.py b/metagpt/utils/repair_llm_raw_output.py
index 06484f71d..b8756e8c6 100644
--- a/metagpt/utils/repair_llm_raw_output.py
+++ b/metagpt/utils/repair_llm_raw_output.py
@@ -119,6 +119,7 @@ def repair_json_format(output: str) -> str:
logger.info(f"repair_json_format: {'}]'}")
elif output.startswith("{") and output.endswith("]"):
output = output[:-1] + "}"
+
# remove comments in output json string, after json value content, maybe start with #, maybe start with //
arr = output.split("\n")
new_arr = []
@@ -208,6 +209,17 @@ def repair_invalid_json(output: str, error: str) -> str:
elif (rline[col_no] in ["'", '"']) and (line.startswith('"') or line.startswith("'")) and "," not in line:
# problem, `"""` or `'''` without `,`
new_line = f",{line}"
+ elif col_no - 1 >= 0 and rline[col_no - 1] in ['"', "'"]:
+ # backslash problem like \" in the output
+ char = rline[col_no - 1]
+ nearest_char_idx = rline[col_no:].find(char)
+ new_line = (
+ rline[: col_no - 1]
+ + "\\"
+ + rline[col_no - 1 : col_no + nearest_char_idx]
+ + "\\"
+ + rline[col_no + nearest_char_idx :]
+ )
elif '",' not in line and "," not in line and '"' not in line:
new_line = f'{line}",'
elif not line.endswith(","):
diff --git a/tests/metagpt/utils/test_repair_llm_raw_output.py b/tests/metagpt/utils/test_repair_llm_raw_output.py
index e28423b91..7a29ea3ee 100644
--- a/tests/metagpt/utils/test_repair_llm_raw_output.py
+++ b/tests/metagpt/utils/test_repair_llm_raw_output.py
@@ -211,6 +211,11 @@ value
output = repair_invalid_json(output, "Expecting ',' delimiter: line 4 column 1")
assert output == target_output
+ raw_output = '{"key": "url "http" \\"https\\" "}'
+ target_output = '{"key": "url \\"http\\" \\"https\\" "}'
+ output = repair_invalid_json(raw_output, "Expecting ',' delimiter: line 1 column 15 (char 14)")
+ assert output == target_output
+
def test_retry_parse_json_text():
from metagpt.utils.repair_llm_raw_output import retry_parse_json_text
From 41d6d2be1d26518593c30ce8af9b9ddc865eeded Mon Sep 17 00:00:00 2001
From: better629
Date: Sun, 18 Feb 2024 20:02:42 +0800
Subject: [PATCH 058/104] update dashscope stream output
---
metagpt/provider/dashscope_api.py | 2 ++
requirements.txt | 4 ++--
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/metagpt/provider/dashscope_api.py b/metagpt/provider/dashscope_api.py
index 58031e452..c02a302e4 100644
--- a/metagpt/provider/dashscope_api.py
+++ b/metagpt/provider/dashscope_api.py
@@ -192,6 +192,8 @@ class DashScopeLLM(BaseLLM):
if self.config.temperature > 0:
# different model has default temperature. only set when it"s specified.
kwargs["temperature"] = self.config.temperature
+ if stream:
+ kwargs["incremental_output"] = True
return kwargs
def _check_response(self, resp: GenerationResponse):
diff --git a/requirements.txt b/requirements.txt
index cfac35717..9759248bb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -27,7 +27,7 @@ python_docx==0.8.11
PyYAML==6.0.1
# sentence_transformers==2.2.2
setuptools==65.6.3
-tenacity==8.2.2
+tenacity==8.2.3
tiktoken==0.5.2
tqdm==4.65.0
#unstructured[local-inference]
@@ -68,4 +68,4 @@ anytree
ipywidgets==8.1.1
Pillow
qianfan==0.3.1
-dashscope==1.14.1
+dashscope==1.14.1
\ No newline at end of file
From cf344c244a71da4d3fb48e838a2e05b5f32bd041 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Mon, 19 Feb 2024 18:07:04 +0800
Subject: [PATCH 059/104] v0.7.1: fix a dependency error
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index c89a18928..4ea6dc5d2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,7 +14,7 @@ lancedb==0.4.0
langchain==0.0.352
loguru==0.6.0
meilisearch==0.21.0
-numpy>=1.24.3
+numpy>=1.24.3,<1.25.0
openai==1.6.0
openpyxl
beautifulsoup4==4.12.2
From fdc00e8d9f96324596898a0068f37a79c4470cd7 Mon Sep 17 00:00:00 2001
From: geekan
Date: Tue, 20 Feb 2024 15:34:47 +0800
Subject: [PATCH 060/104] fix example
---
examples/write_novel.py | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/examples/write_novel.py b/examples/write_novel.py
index b272a56e6..03daf1f0c 100644
--- a/examples/write_novel.py
+++ b/examples/write_novel.py
@@ -33,14 +33,25 @@ class Chapter(BaseModel):
content: str = Field(default="...", description="The content of the chapter. No more than 1000 words.")
+class Chapters(BaseModel):
+ chapters: List[Chapter] = Field(
+ default=[
+ {"name": "Chapter 1", "content": "..."},
+ {"name": "Chapter 2", "content": "..."},
+ {"name": "Chapter 3", "content": "..."},
+ ],
+ description="The chapters of the novel.",
+ )
+
+
async def generate_novel():
instruction = (
- "Write a novel named 'Harry Potter in The Lord of the Rings'. "
+ "Write a novel named 'Reborn in Skyrim'. "
"Fill the empty nodes with your own ideas. Be creative! Use your own words!"
"I will tip you $100,000 if you write a good novel."
)
novel_node = await ActionNode.from_pydantic(Novel).fill(context=instruction, llm=LLM())
- chap_node = await ActionNode.from_pydantic(Chapter).fill(
+ chap_node = await ActionNode.from_pydantic(Chapters).fill(
context=f"### instruction\n{instruction}\n### novel\n{novel_node.content}", llm=LLM()
)
print(chap_node.content)
From 54d06960ea3433ffdeb7d282c6d500959aa54b64 Mon Sep 17 00:00:00 2001
From: geekan
Date: Tue, 20 Feb 2024 15:35:55 +0800
Subject: [PATCH 061/104] fix example
---
examples/write_novel.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/examples/write_novel.py b/examples/write_novel.py
index 03daf1f0c..e92d94acf 100644
--- a/examples/write_novel.py
+++ b/examples/write_novel.py
@@ -54,7 +54,7 @@ async def generate_novel():
chap_node = await ActionNode.from_pydantic(Chapters).fill(
context=f"### instruction\n{instruction}\n### novel\n{novel_node.content}", llm=LLM()
)
- print(chap_node.content)
+ print(chap_node.instruct_content)
asyncio.run(generate_novel())
From a989a1ab515bdc37c632cb80acbe5787eda8885a Mon Sep 17 00:00:00 2001
From: yzlin
Date: Tue, 20 Feb 2024 21:13:34 +0800
Subject: [PATCH 062/104] fix human interact bug & add example
---
examples/mi/machine_learning.py | 10 +++++-----
metagpt/strategy/planner.py | 2 +-
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/examples/mi/machine_learning.py b/examples/mi/machine_learning.py
index ee3b47f26..a8ab5051e 100644
--- a/examples/mi/machine_learning.py
+++ b/examples/mi/machine_learning.py
@@ -1,13 +1,13 @@
-import asyncio
+import fire
from metagpt.roles.mi.interpreter import Interpreter
-async def main(requirement: str):
- mi = Interpreter(auto_run=True, use_tools=False)
+async def main(auto_run: bool = True):
+ requirement = "Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy."
+ mi = Interpreter(auto_run=auto_run)
await mi.run(requirement)
if __name__ == "__main__":
- requirement = "Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy."
- asyncio.run(main(requirement))
+ fire.Fire(main)
diff --git a/metagpt/strategy/planner.py b/metagpt/strategy/planner.py
index 851d45893..99d16f78b 100644
--- a/metagpt/strategy/planner.py
+++ b/metagpt/strategy/planner.py
@@ -122,7 +122,7 @@ class Planner(BaseModel):
) # "confirm, ... (more content, such as changing downstream tasks)"
if confirmed_and_more:
self.working_memory.add(Message(content=review, role="user", cause_by=AskReview))
- await self.update_plan(review)
+ await self.update_plan()
def get_useful_memories(self, task_exclude_field=None) -> list[Message]:
"""find useful memories only to reduce context length and improve performance"""
From 56435cd6a081cbfecc167369b4813eddfd6f2fd0 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Tue, 20 Feb 2024 21:45:53 +0800
Subject: [PATCH 063/104] fix execute_nb_code bug
---
metagpt/actions/mi/execute_nb_code.py | 2 +-
setup.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/metagpt/actions/mi/execute_nb_code.py b/metagpt/actions/mi/execute_nb_code.py
index a8c9c3085..0371d556c 100644
--- a/metagpt/actions/mi/execute_nb_code.py
+++ b/metagpt/actions/mi/execute_nb_code.py
@@ -182,7 +182,7 @@ class ExecuteNbCode(Action):
outputs = self.parse_outputs(self.nb.cells[-1].outputs)
outputs, success = truncate(remove_escape_and_color_codes(outputs), is_success=success)
- if "!pip" in outputs:
+ if "!pip" in code:
success = False
return outputs, success
diff --git a/setup.py b/setup.py
index 4efcebdc1..ed3d5f78f 100644
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@ extras_require["dev"] = (["pylint~=3.0.3", "black~=23.3.0", "isort~=5.12.0", "pr
setup(
name="metagpt",
- version="0.7.1",
+ version="0.7.2",
description="The Multi-Agent Framework",
long_description=long_description,
long_description_content_type="text/markdown",
From f03a5dd59c2e0c85b20775e9232db33ad2ba5034 Mon Sep 17 00:00:00 2001
From: yzlin
Date: Tue, 20 Feb 2024 22:56:50 +0800
Subject: [PATCH 064/104] fix tool path bug
---
metagpt/tools/tool_registry.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/metagpt/tools/tool_registry.py b/metagpt/tools/tool_registry.py
index 5fbd39421..02eecb0f3 100644
--- a/metagpt/tools/tool_registry.py
+++ b/metagpt/tools/tool_registry.py
@@ -9,7 +9,6 @@ from __future__ import annotations
import inspect
import os
-import re
from collections import defaultdict
import yaml
@@ -109,7 +108,8 @@ def register_tool(tool_type: str = "other", schema_path: str = "", **kwargs):
# Get the file path where the function / class is defined and the source code
file_path = inspect.getfile(cls)
if "metagpt" in file_path:
- file_path = re.search("metagpt.+", file_path).group(0)
+ # split to handle ../metagpt/metagpt/tools/... where only metapgt/tools/... is needed
+ file_path = "metagpt" + file_path.split("metagpt")[-1]
source_code = inspect.getsource(cls)
TOOL_REGISTRY.register_tool(
From 06d72698bb379ecd630fe06fcac509d600c14974 Mon Sep 17 00:00:00 2001
From: geekan
Date: Wed, 21 Feb 2024 16:49:59 +0800
Subject: [PATCH 065/104] refine code to avoid config error
---
metagpt/software_company.py | 21 +++++++++++++++------
1 file changed, 15 insertions(+), 6 deletions(-)
diff --git a/metagpt/software_company.py b/metagpt/software_company.py
index 26bb29cd1..f290d497a 100644
--- a/metagpt/software_company.py
+++ b/metagpt/software_company.py
@@ -2,14 +2,11 @@
# -*- coding: utf-8 -*-
import asyncio
-import shutil
from pathlib import Path
import typer
-from metagpt.config2 import config
-from metagpt.const import CONFIG_ROOT, METAGPT_ROOT
-from metagpt.context import Context
+from metagpt.const import CONFIG_ROOT
from metagpt.utils.project_repo import ProjectRepo
app = typer.Typer(add_completion=False, pretty_exceptions_show_locals=False)
@@ -30,6 +27,8 @@ def generate_repo(
recover_path=None,
) -> ProjectRepo:
"""Run the startup logic. Can be called from CLI or other Python scripts."""
+ from metagpt.config2 import config
+ from metagpt.context import Context
from metagpt.roles import (
Architect,
Engineer,
@@ -122,7 +121,17 @@ def startup(
)
-def copy_config_to(config_path=METAGPT_ROOT / "config" / "config2.yaml"):
+DEFAULT_CONFIG = """# Full Example: https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml
+# Reflected Code: https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
+llm:
+ api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
+ model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
+ base_url: "https://api.openai.com/v1" # or forward url / other llm url
+ api_key: "YOUR_API_KEY"
+"""
+
+
+def copy_config_to():
"""Initialize the configuration file for MetaGPT."""
target_path = CONFIG_ROOT / "config2.yaml"
@@ -136,7 +145,7 @@ def copy_config_to(config_path=METAGPT_ROOT / "config" / "config2.yaml"):
print(f"Existing configuration file backed up at {backup_path}")
# 复制文件
- shutil.copy(str(config_path), target_path)
+ target_path.write_text(DEFAULT_CONFIG, encoding="utf-8")
print(f"Configuration file initialized at {target_path}")
From dfa33fcdaade1e4f8019835bf065d372d76724ae Mon Sep 17 00:00:00 2001
From: lixiangtiandashen <39831228+lixiangtiandashen@users.noreply.github.com>
Date: Thu, 22 Feb 2024 04:31:24 +0800
Subject: [PATCH 066/104] Update role.py
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
由于提示词里要求llm选择的状态码是 0 到 状态数-1,所以我认为这里往states里添加动作对应状态时,也应当从0开始。
---
metagpt/roles/role.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/metagpt/roles/role.py b/metagpt/roles/role.py
index 3938664ba..893c5cafd 100644
--- a/metagpt/roles/role.py
+++ b/metagpt/roles/role.py
@@ -281,7 +281,7 @@ class Role(SerializationMixin, ContextMixin, BaseModel):
i = action
self._init_action(i)
self.actions.append(i)
- self.states.append(f"{len(self.actions)}. {action}")
+ self.states.append(f"{len(self.actions) - 1}. {action}")
def _set_react_mode(self, react_mode: str, max_react_loop: int = 1, auto_run: bool = True, use_tools: bool = False):
"""Set strategy of the Role reacting to observed Message. Variation lies in how
From 655ba5647b82106bd8fc63035bb3b8c5da4a479c Mon Sep 17 00:00:00 2001
From: betterwang
Date: Fri, 23 Feb 2024 12:45:06 +0800
Subject: [PATCH 067/104] update qianfan pypi version
---
metagpt/utils/cost_manager.py | 2 +-
requirements.txt | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/metagpt/utils/cost_manager.py b/metagpt/utils/cost_manager.py
index 4e6b65b2c..929d0b2e5 100644
--- a/metagpt/utils/cost_manager.py
+++ b/metagpt/utils/cost_manager.py
@@ -42,7 +42,7 @@ class CostManager(BaseModel):
"""
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
- if model not in TOKEN_COSTS:
+ if model not in self.token_costs:
logger.warning(f"Model {model} not found in TOKEN_COSTS.")
return
diff --git a/requirements.txt b/requirements.txt
index 9759248bb..4651cb258 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -67,5 +67,5 @@ playwright>=1.26 # used at metagpt/tools/libs/web_scraping.py
anytree
ipywidgets==8.1.1
Pillow
-qianfan==0.3.1
-dashscope==1.14.1
\ No newline at end of file
+qianfan==0.3.2
+dashscope==1.14.1
From 6893f78c212a65eeafb746e9fc55372a709aee61 Mon Sep 17 00:00:00 2001
From: betterwang
Date: Fri, 23 Feb 2024 12:54:46 +0800
Subject: [PATCH 068/104] update ut of dashscope
---
tests/metagpt/provider/test_dashscope_api.py | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/tests/metagpt/provider/test_dashscope_api.py b/tests/metagpt/provider/test_dashscope_api.py
index 48a676bc7..df6e92af0 100644
--- a/tests/metagpt/provider/test_dashscope_api.py
+++ b/tests/metagpt/provider/test_dashscope_api.py
@@ -23,14 +23,26 @@ resp_cont = resp_cont_tmpl.format(name=name)
@classmethod
def mock_dashscope_call(
- cls, messages: list[dict], model: str, api_key: str, result_format: str, stream: bool = False
+ cls,
+ messages: list[dict],
+ model: str,
+ api_key: str,
+ result_format: str,
+ incremental_output: bool = True,
+ stream: bool = False,
) -> GenerationResponse:
return get_dashscope_response(name)
@classmethod
async def mock_dashscope_acall(
- cls, messages: list[dict], model: str, api_key: str, result_format: str, stream: bool = False
+ cls,
+ messages: list[dict],
+ model: str,
+ api_key: str,
+ result_format: str,
+ incremental_output: bool = True,
+ stream: bool = False,
) -> Union[AsyncGenerator[GenerationResponse, None], GenerationResponse]:
resps = [get_dashscope_response(name)]
From cdd3025408cfb2cb83c91545df3e329383fc713e Mon Sep 17 00:00:00 2001
From: Blq <243987385@qq.com>
Date: Fri, 23 Feb 2024 08:39:44 +0000
Subject: [PATCH 069/104] Assigned configs for actions to prevent
action.llm.model being overridden in Role._init_action.
---
examples/debate_simple.py | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/examples/debate_simple.py b/examples/debate_simple.py
index 869e02a0e..953f664f3 100644
--- a/examples/debate_simple.py
+++ b/examples/debate_simple.py
@@ -8,14 +8,17 @@
import asyncio
from metagpt.actions import Action
+from metagpt.config2 import Config
from metagpt.environment import Environment
from metagpt.roles import Role
from metagpt.team import Team
-action1 = Action(name="AlexSay", instruction="Express your opinion with emotion and don't repeat it")
-action1.llm.model = "gpt-4-1106-preview"
-action2 = Action(name="BobSay", instruction="Express your opinion with emotion and don't repeat it")
-action2.llm.model = "gpt-3.5-turbo-1106"
+gpt35 = Config.default()
+gpt35.llm.model = "gpt-3.5-turbo-1106"
+gpt4 = Config.default()
+gpt4.llm.model = "gpt-4-1106-preview"
+action1 = Action(config=gpt4, name="AlexSay", instruction="Express your opinion with emotion and don't repeat it")
+action2 = Action(config=gpt35, name="BobSay", instruction="Express your opinion with emotion and don't repeat it")
alex = Role(name="Alex", profile="Democratic candidate", goal="Win the election", actions=[action1], watch=[action2])
bob = Role(name="Bob", profile="Republican candidate", goal="Win the election", actions=[action2], watch=[action1])
env = Environment(desc="US election live broadcast")
From 79c71dc5ad980c32ee8103f8b8a08afb264ba217 Mon Sep 17 00:00:00 2001
From: betterwang
Date: Fri, 23 Feb 2024 20:28:35 +0800
Subject: [PATCH 070/104] update ERNIE-Bot token cost
---
metagpt/utils/token_counter.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py
index 5a1b2f2e7..53dc9e9c7 100644
--- a/metagpt/utils/token_counter.py
+++ b/metagpt/utils/token_counter.py
@@ -45,7 +45,7 @@ Due to QianFan has multi price strategies, we unify `Tokens post-payment` as a s
QianFan_MODEL_TOKEN_COSTS = {
"ERNIE-Bot-4": {"prompt": 0.017, "completion": 0.017},
"ERNIE-Bot-8k": {"prompt": 0.0034, "completion": 0.0067},
- "ERNIE-Bot": {"prompt": 0.017, "completion": 0.017},
+ "ERNIE-Bot": {"prompt": 0.0017, "completion": 0.0017},
"ERNIE-Bot-turbo": {"prompt": 0.0011, "completion": 0.0011},
"EB-turbo-AppBuilder": {"prompt": 0.0011, "completion": 0.0011},
"ERNIE-Speed": {"prompt": 0.00056, "completion": 0.0011},
From e12dde978e5e1ac52ffee7b0e7f8191276359e27 Mon Sep 17 00:00:00 2001
From: Abhishek0075
Date: Sat, 24 Feb 2024 12:16:35 +0530
Subject: [PATCH 071/104] Changed requirements.txt
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 4ea6dc5d2..2a09d1db9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,7 +11,7 @@ typer==0.9.0
# godot==0.1.1
# google_api_python_client==2.93.0 # Used by search_engine.py
lancedb==0.4.0
-langchain==0.0.352
+langchain==0.1.8
loguru==0.6.0
meilisearch==0.21.0
numpy>=1.24.3,<1.25.0
From 2d17da28262210a6f03977e7277811fdf3d0bb10 Mon Sep 17 00:00:00 2001
From: betterwang
Date: Sat, 24 Feb 2024 15:00:45 +0800
Subject: [PATCH 072/104] fix dashcope spell
---
metagpt/provider/dashscope_api.py | 4 ++--
metagpt/utils/token_counter.py | 4 ++--
tests/metagpt/provider/test_dashscope_api.py | 8 ++++----
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/metagpt/provider/dashscope_api.py b/metagpt/provider/dashscope_api.py
index c02a302e4..7d7dcb7b7 100644
--- a/metagpt/provider/dashscope_api.py
+++ b/metagpt/provider/dashscope_api.py
@@ -37,7 +37,7 @@ from metagpt.provider.base_llm import BaseLLM, LLMConfig
from metagpt.provider.llm_provider_registry import LLMType, register_provider
from metagpt.provider.openai_api import log_and_reraise
from metagpt.utils.cost_manager import CostManager
-from metagpt.utils.token_counter import DashScore_TOKEN_COSTS
+from metagpt.utils.token_counter import DashScope_TOKEN_COSTS
def build_api_arequest(
@@ -167,7 +167,7 @@ class DashScopeLLM(BaseLLM):
def __init_dashscope(self):
self.model = self.config.model
self.api_key = self.config.api_key
- self.token_costs = DashScore_TOKEN_COSTS
+ self.token_costs = DashScope_TOKEN_COSTS
self.aclient: AGeneration = AGeneration
# check support system_message models
diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py
index 53dc9e9c7..220014d8f 100644
--- a/metagpt/utils/token_counter.py
+++ b/metagpt/utils/token_counter.py
@@ -91,10 +91,10 @@ QianFan_EndPoint_TOKEN_COSTS = {
}
"""
-DashScore Token price https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing
+DashScope Token price https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing
Different model has different detail page. Attention, some model are free for a limited time.
"""
-DashScore_TOKEN_COSTS = {
+DashScope_TOKEN_COSTS = {
"qwen-turbo": {"prompt": 0.0011, "completion": 0.0011},
"qwen-plus": {"prompt": 0.0028, "completion": 0.0028},
"qwen-max": {"prompt": 0.0, "completion": 0.0},
diff --git a/tests/metagpt/provider/test_dashscope_api.py b/tests/metagpt/provider/test_dashscope_api.py
index df6e92af0..a6dd8f247 100644
--- a/tests/metagpt/provider/test_dashscope_api.py
+++ b/tests/metagpt/provider/test_dashscope_api.py
@@ -62,12 +62,12 @@ async def test_dashscope_acompletion(mocker):
mocker.patch("dashscope.aigc.generation.Generation.call", mock_dashscope_call)
mocker.patch("metagpt.provider.dashscope_api.AGeneration.acall", mock_dashscope_acall)
- dashscore_llm = DashScopeLLM(mock_llm_config_dashscope)
+ dashscope_llm = DashScopeLLM(mock_llm_config_dashscope)
- resp = dashscore_llm.completion(messages)
+ resp = dashscope_llm.completion(messages)
assert resp.choices[0]["message"]["content"] == resp_cont
- resp = await dashscore_llm.acompletion(messages)
+ resp = await dashscope_llm.acompletion(messages)
assert resp.choices[0]["message"]["content"] == resp_cont
- await llm_general_chat_funcs_test(dashscore_llm, prompt, messages, resp_cont)
+ await llm_general_chat_funcs_test(dashscope_llm, prompt, messages, resp_cont)
From 4f5152cae4307c91a9f8ac123b571318eaad6466 Mon Sep 17 00:00:00 2001
From: revupst
Date: Mon, 26 Feb 2024 21:59:22 +0800
Subject: [PATCH 073/104] chore: bump ipykernel to 6.27.1
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 4ea6dc5d2..e36c69971 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -54,7 +54,7 @@ rich==13.6.0
nbclient==0.9.0
nbformat==5.9.2
ipython==8.17.2
-ipykernel==6.27.0
+ipykernel==6.27.1
scikit_learn==1.3.2
typing-extensions==4.9.0
socksio~=1.0.0
From 9ec56263133f916269c04355f925ce8140217790 Mon Sep 17 00:00:00 2001
From: better629
Date: Tue, 27 Feb 2024 14:08:05 +0800
Subject: [PATCH 074/104] mock openai embed for document_store and memory UTs
---
metagpt/memory/memory_storage.py | 4 +--
.../document_store/test_faiss_store.py | 22 +++++++++++--
tests/metagpt/memory/mock_text_embed.py | 33 +++++++++++++++++++
tests/metagpt/memory/test_longterm_memory.py | 20 ++++++-----
tests/metagpt/memory/test_memory_storage.py | 32 +++++++++++-------
5 files changed, 85 insertions(+), 26 deletions(-)
create mode 100644 tests/metagpt/memory/mock_text_embed.py
diff --git a/metagpt/memory/memory_storage.py b/metagpt/memory/memory_storage.py
index c029d027b..fa04d8138 100644
--- a/metagpt/memory/memory_storage.py
+++ b/metagpt/memory/memory_storage.py
@@ -7,7 +7,6 @@
from pathlib import Path
from typing import Optional
-from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain_core.embeddings import Embeddings
@@ -15,6 +14,7 @@ from metagpt.const import DATA_PATH, MEM_TTL
from metagpt.document_store.faiss_store import FaissStore
from metagpt.logs import logger
from metagpt.schema import Message
+from metagpt.utils.embedding import get_embedding
from metagpt.utils.serialize import deserialize_message, serialize_message
@@ -30,7 +30,7 @@ class MemoryStorage(FaissStore):
self.threshold: float = 0.1 # experience value. TODO The threshold to filter similar memories
self._initialized: bool = False
- self.embedding = embedding or OpenAIEmbeddings()
+ self.embedding = embedding or get_embedding()
self.store: FAISS = None # Faiss engine
@property
diff --git a/tests/metagpt/document_store/test_faiss_store.py b/tests/metagpt/document_store/test_faiss_store.py
index 7e2979bd4..397ba6ce5 100644
--- a/tests/metagpt/document_store/test_faiss_store.py
+++ b/tests/metagpt/document_store/test_faiss_store.py
@@ -6,6 +6,9 @@
@File : test_faiss_store.py
"""
+from typing import Optional
+
+import numpy as np
import pytest
from metagpt.const import EXAMPLE_PATH
@@ -14,8 +17,17 @@ from metagpt.logs import logger
from metagpt.roles import Sales
+def mock_openai_embed_documents(self, texts: list[str], chunk_size: Optional[int] = 0) -> list[list[float]]:
+ num = len(texts)
+ embeds = np.random.randint(1, 100, size=(num, 1536)) # 1536: openai embedding dim
+ embeds = (embeds - embeds.mean(axis=0)) / (embeds.std(axis=0))
+ return embeds
+
+
@pytest.mark.asyncio
-async def test_search_json():
+async def test_search_json(mocker):
+ mocker.patch("langchain_community.embeddings.openai.OpenAIEmbeddings.embed_documents", mock_openai_embed_documents)
+
store = FaissStore(EXAMPLE_PATH / "example.json")
role = Sales(profile="Sales", store=store)
query = "Which facial cleanser is good for oily skin?"
@@ -24,7 +36,9 @@ async def test_search_json():
@pytest.mark.asyncio
-async def test_search_xlsx():
+async def test_search_xlsx(mocker):
+ mocker.patch("langchain_community.embeddings.openai.OpenAIEmbeddings.embed_documents", mock_openai_embed_documents)
+
store = FaissStore(EXAMPLE_PATH / "example.xlsx")
role = Sales(profile="Sales", store=store)
query = "Which facial cleanser is good for oily skin?"
@@ -33,7 +47,9 @@ async def test_search_xlsx():
@pytest.mark.asyncio
-async def test_write():
+async def test_write(mocker):
+ mocker.patch("langchain_community.embeddings.openai.OpenAIEmbeddings.embed_documents", mock_openai_embed_documents)
+
store = FaissStore(EXAMPLE_PATH / "example.xlsx", meta_col="Answer", content_col="Question")
_faiss_store = store.write()
assert _faiss_store.docstore
diff --git a/tests/metagpt/memory/mock_text_embed.py b/tests/metagpt/memory/mock_text_embed.py
new file mode 100644
index 000000000..897c7cf10
--- /dev/null
+++ b/tests/metagpt/memory/mock_text_embed.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Desc :
+
+from typing import Optional
+
+import numpy as np
+
+dim = 1536 # openai embedding dim
+
+text_embed_arr = [
+ {"text": "Write a cli snake game", "embed": np.zeros(shape=[1, dim])}, # mock data, same as below
+ {"text": "Write a game of cli snake", "embed": np.zeros(shape=[1, dim])},
+ {"text": "Write a 2048 web game", "embed": np.ones(shape=[1, dim])},
+ {"text": "Write a Battle City", "embed": np.ones(shape=[1, dim])},
+ {
+ "text": "The user has requested the creation of a command-line interface (CLI) snake game",
+ "embed": np.zeros(shape=[1, dim]),
+ },
+ {"text": "The request is command-line interface (CLI) snake game", "embed": np.zeros(shape=[1, dim])},
+ {
+ "text": "Incorporate basic features of a snake game such as scoring and increasing difficulty",
+ "embed": np.ones(shape=[1, dim]),
+ },
+]
+
+text_idx_dict = {item["text"]: idx for idx, item in enumerate(text_embed_arr)}
+
+
+def mock_openai_embed_documents(self, texts: list[str], chunk_size: Optional[int] = 0) -> list[list[float]]:
+ idx = text_idx_dict.get(texts[0])
+ embed = text_embed_arr[idx].get("embed")
+ return embed
diff --git a/tests/metagpt/memory/test_longterm_memory.py b/tests/metagpt/memory/test_longterm_memory.py
index 5c71ddd13..f7e652758 100644
--- a/tests/metagpt/memory/test_longterm_memory.py
+++ b/tests/metagpt/memory/test_longterm_memory.py
@@ -4,20 +4,22 @@
@Desc : unittest of `metagpt/memory/longterm_memory.py`
"""
-import os
import pytest
from metagpt.actions import UserRequirement
-from metagpt.config2 import config
from metagpt.memory.longterm_memory import LongTermMemory
from metagpt.roles.role import RoleContext
from metagpt.schema import Message
-
-os.environ.setdefault("OPENAI_API_KEY", config.get_openai_llm().api_key)
+from tests.metagpt.memory.mock_text_embed import (
+ mock_openai_embed_documents,
+ text_embed_arr,
+)
-def test_ltm_search():
+def test_ltm_search(mocker):
+ mocker.patch("langchain_community.embeddings.openai.OpenAIEmbeddings.embed_documents", mock_openai_embed_documents)
+
role_id = "UTUserLtm(Product Manager)"
from metagpt.environment import Environment
@@ -27,20 +29,20 @@ def test_ltm_search():
ltm = LongTermMemory()
ltm.recover_memory(role_id, rc)
- idea = "Write a cli snake game"
+ idea = text_embed_arr[0].get("text", "Write a cli snake game")
message = Message(role="User", content=idea, cause_by=UserRequirement)
news = ltm.find_news([message])
assert len(news) == 1
ltm.add(message)
- sim_idea = "Write a game of cli snake"
+ sim_idea = text_embed_arr[1].get("text", "Write a game of cli snake")
sim_message = Message(role="User", content=sim_idea, cause_by=UserRequirement)
news = ltm.find_news([sim_message])
assert len(news) == 0
ltm.add(sim_message)
- new_idea = "Write a 2048 web game"
+ new_idea = text_embed_arr[2].get("text", "Write a 2048 web game")
new_message = Message(role="User", content=new_idea, cause_by=UserRequirement)
news = ltm.find_news([new_message])
assert len(news) == 1
@@ -56,7 +58,7 @@ def test_ltm_search():
news = ltm_new.find_news([sim_message])
assert len(news) == 0
- new_idea = "Write a Battle City"
+ new_idea = text_embed_arr[3].get("text", "Write a Battle City")
new_message = Message(role="User", content=new_idea, cause_by=UserRequirement)
news = ltm_new.find_news([new_message])
assert len(news) == 1
diff --git a/tests/metagpt/memory/test_memory_storage.py b/tests/metagpt/memory/test_memory_storage.py
index e82a82fc8..28a73276b 100644
--- a/tests/metagpt/memory/test_memory_storage.py
+++ b/tests/metagpt/memory/test_memory_storage.py
@@ -4,23 +4,25 @@
@Desc : the unittests of metagpt/memory/memory_storage.py
"""
-import os
import shutil
from pathlib import Path
from typing import List
from metagpt.actions import UserRequirement, WritePRD
from metagpt.actions.action_node import ActionNode
-from metagpt.config2 import config
from metagpt.const import DATA_PATH
from metagpt.memory.memory_storage import MemoryStorage
from metagpt.schema import Message
-
-os.environ.setdefault("OPENAI_API_KEY", config.get_openai_llm().api_key)
+from tests.metagpt.memory.mock_text_embed import (
+ mock_openai_embed_documents,
+ text_embed_arr,
+)
-def test_idea_message():
- idea = "Write a cli snake game"
+def test_idea_message(mocker):
+ mocker.patch("langchain_community.embeddings.openai.OpenAIEmbeddings.embed_documents", mock_openai_embed_documents)
+
+ idea = text_embed_arr[0].get("text", "Write a cli snake game")
role_id = "UTUser1(Product Manager)"
message = Message(role="User", content=idea, cause_by=UserRequirement)
@@ -33,12 +35,12 @@ def test_idea_message():
memory_storage.add(message)
assert memory_storage.is_initialized is True
- sim_idea = "Write a game of cli snake"
+ sim_idea = text_embed_arr[1].get("text", "Write a game of cli snake")
sim_message = Message(role="User", content=sim_idea, cause_by=UserRequirement)
new_messages = memory_storage.search_dissimilar(sim_message)
assert len(new_messages) == 0 # similar, return []
- new_idea = "Write a 2048 web game"
+ new_idea = text_embed_arr[2].get("text", "Write a 2048 web game")
new_message = Message(role="User", content=new_idea, cause_by=UserRequirement)
new_messages = memory_storage.search_dissimilar(new_message)
assert new_messages[0].content == message.content
@@ -47,13 +49,17 @@ def test_idea_message():
assert memory_storage.is_initialized is False
-def test_actionout_message():
+def test_actionout_message(mocker):
+ mocker.patch("langchain_community.embeddings.openai.OpenAIEmbeddings.embed_documents", mock_openai_embed_documents)
+
out_mapping = {"field1": (str, ...), "field2": (List[str], ...)}
out_data = {"field1": "field1 value", "field2": ["field2 value1", "field2 value2"]}
ic_obj = ActionNode.create_model_class("prd", out_mapping)
role_id = "UTUser2(Architect)"
- content = "The user has requested the creation of a command-line interface (CLI) snake game"
+ content = text_embed_arr[4].get(
+ "text", "The user has requested the creation of a command-line interface (CLI) snake game"
+ )
message = Message(
content=content, instruct_content=ic_obj(**out_data), role="user", cause_by=WritePRD
) # WritePRD as test action
@@ -67,12 +73,14 @@ def test_actionout_message():
memory_storage.add(message)
assert memory_storage.is_initialized is True
- sim_conent = "The request is command-line interface (CLI) snake game"
+ sim_conent = text_embed_arr[5].get("text", "The request is command-line interface (CLI) snake game")
sim_message = Message(content=sim_conent, instruct_content=ic_obj(**out_data), role="user", cause_by=WritePRD)
new_messages = memory_storage.search_dissimilar(sim_message)
assert len(new_messages) == 0 # similar, return []
- new_conent = "Incorporate basic features of a snake game such as scoring and increasing difficulty"
+ new_conent = text_embed_arr[6].get(
+ "text", "Incorporate basic features of a snake game such as scoring and increasing difficulty"
+ )
new_message = Message(content=new_conent, instruct_content=ic_obj(**out_data), role="user", cause_by=WritePRD)
new_messages = memory_storage.search_dissimilar(new_message)
assert new_messages[0].content == message.content
From 51bd8a056e4a3f4ffc4bdda22018429841a793b4 Mon Sep 17 00:00:00 2001
From: betterwang
Date: Wed, 28 Feb 2024 09:36:44 +0800
Subject: [PATCH 075/104] update missing content
---
metagpt/configs/llm_config.py | 7 ++++++-
metagpt/provider/base_llm.py | 25 ++++++++++++++++++++++++-
metagpt/utils/cost_manager.py | 4 +++-
3 files changed, 33 insertions(+), 3 deletions(-)
diff --git a/metagpt/configs/llm_config.py b/metagpt/configs/llm_config.py
index 4ed741af3..36f5d7ae7 100644
--- a/metagpt/configs/llm_config.py
+++ b/metagpt/configs/llm_config.py
@@ -38,13 +38,18 @@ class LLMConfig(YamlModel):
Optional Fields in pydantic: https://docs.pydantic.dev/latest/migration/#required-optional-and-nullable-fields
"""
- api_key: str
+ api_key: str = "sk-"
api_type: LLMType = LLMType.OPENAI
base_url: str = "https://api.openai.com/v1"
api_version: Optional[str] = None
model: Optional[str] = None # also stands for DEPLOYMENT_NAME
+ # For Cloud Service Provider like Baidu/ Alibaba
+ access_key: Optional[str] = None
+ secret_key: Optional[str] = None
+ endpoint: Optional[str] = None # for self-deployed model on the cloud
+
# For Spark(Xunfei), maybe remove later
app_id: Optional[str] = None
api_secret: Optional[str] = None
diff --git a/metagpt/provider/base_llm.py b/metagpt/provider/base_llm.py
index b144471b5..2f57b15aa 100644
--- a/metagpt/provider/base_llm.py
+++ b/metagpt/provider/base_llm.py
@@ -11,11 +11,12 @@ from abc import ABC, abstractmethod
from typing import Optional, Union
from openai import AsyncOpenAI
+from pydantic import BaseModel
from metagpt.configs.llm_config import LLMConfig
from metagpt.logs import logger
from metagpt.schema import Message
-from metagpt.utils.cost_manager import CostManager
+from metagpt.utils.cost_manager import CostManager, Costs
class BaseLLM(ABC):
@@ -67,6 +68,28 @@ class BaseLLM(ABC):
def _default_system_msg(self):
return self._system_msg(self.system_prompt)
+ def _update_costs(self, usage: Union[dict, BaseModel], model: str = None, local_calc_usage: bool = True):
+ """update each request's token cost
+ Args:
+ model (str): model name or in some scenarios called endpoint
+ local_calc_usage (bool): some models don't calculate usage, it will overwrite LLMConfig.calc_usage
+ """
+ calc_usage = self.config.calc_usage and local_calc_usage
+ model = model if model else self.model
+ usage = usage.model_dump() if isinstance(usage, BaseModel) else usage
+ if calc_usage and self.cost_manager:
+ try:
+ prompt_tokens = int(usage.get("prompt_tokens", 0))
+ completion_tokens = int(usage.get("completion_tokens", 0))
+ self.cost_manager.update_cost(prompt_tokens, completion_tokens, model)
+ except Exception as e:
+ logger.error(f"{self.__class__.__name__} updats costs failed! exp: {e}")
+
+ def get_costs(self) -> Costs:
+ if not self.cost_manager:
+ return Costs(0, 0, 0, 0)
+ return self.cost_manager.get_costs()
+
async def aask(
self,
msg: str,
diff --git a/metagpt/utils/cost_manager.py b/metagpt/utils/cost_manager.py
index c3aa7323f..efff07ae1 100644
--- a/metagpt/utils/cost_manager.py
+++ b/metagpt/utils/cost_manager.py
@@ -29,6 +29,7 @@ class CostManager(BaseModel):
total_budget: float = 0
max_budget: float = 10.0
total_cost: float = 0
+ token_costs: dict[str, dict[str, float]] = TOKEN_COSTS # different model's token cost
def update_cost(self, prompt_tokens, completion_tokens, model):
"""
@@ -46,7 +47,8 @@ class CostManager(BaseModel):
return
cost = (
- prompt_tokens * TOKEN_COSTS[model]["prompt"] + completion_tokens * TOKEN_COSTS[model]["completion"]
+ prompt_tokens * self.token_costs[model]["prompt"]
+ + completion_tokens * self.token_costs[model]["completion"]
) / 1000
self.total_cost += cost
logger.info(
From 21d588ae0e793d5154b9793bb9ba1194e0fb2d07 Mon Sep 17 00:00:00 2001
From: betterwang
Date: Wed, 28 Feb 2024 09:38:12 +0800
Subject: [PATCH 076/104] update missing qifan ut
---
tests/metagpt/provider/test_qianfan_api.py | 56 ++++++++++++++++++++++
1 file changed, 56 insertions(+)
create mode 100644 tests/metagpt/provider/test_qianfan_api.py
diff --git a/tests/metagpt/provider/test_qianfan_api.py b/tests/metagpt/provider/test_qianfan_api.py
new file mode 100644
index 000000000..28341425c
--- /dev/null
+++ b/tests/metagpt/provider/test_qianfan_api.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# @Desc : the unittest of qianfan api
+
+from typing import AsyncIterator, Union
+
+import pytest
+from qianfan.resources.typing import JsonBody, QfResponse
+
+from metagpt.provider.qianfan_api import QianFanLLM
+from tests.metagpt.provider.mock_llm_config import mock_llm_config_qianfan
+from tests.metagpt.provider.req_resp_const import (
+ get_qianfan_response,
+ llm_general_chat_funcs_test,
+ messages,
+ prompt,
+ resp_cont_tmpl,
+)
+
+name = "ERNIE-Bot-turbo"
+resp_cont = resp_cont_tmpl.format(name=name)
+
+
+def mock_qianfan_do(self, messages: list[dict], model: str, stream: bool = False, system: str = None) -> QfResponse:
+ return get_qianfan_response(name=name)
+
+
+async def mock_qianfan_ado(
+ self, messages: list[dict], model: str, stream: bool = True, system: str = None
+) -> Union[QfResponse, AsyncIterator[QfResponse]]:
+ resps = [get_qianfan_response(name=name)]
+ if stream:
+
+ async def aresp_iterator(resps: list[JsonBody]):
+ for resp in resps:
+ yield resp
+
+ return aresp_iterator(resps)
+ else:
+ return resps[0]
+
+
+@pytest.mark.asyncio
+async def test_qianfan_acompletion(mocker):
+ mocker.patch("qianfan.resources.llm.chat_completion.ChatCompletion.do", mock_qianfan_do)
+ mocker.patch("qianfan.resources.llm.chat_completion.ChatCompletion.ado", mock_qianfan_ado)
+
+ qianfan_llm = QianFanLLM(mock_llm_config_qianfan)
+
+ resp = qianfan_llm.completion(messages)
+ assert resp.get("result") == resp_cont
+
+ resp = await qianfan_llm.acompletion(messages)
+ assert resp.get("result") == resp_cont
+
+ await llm_general_chat_funcs_test(qianfan_llm, prompt, messages, resp_cont)
From ad88c5000d135303a23a22da56ba240867ec44fc Mon Sep 17 00:00:00 2001
From: betterwang
Date: Wed, 28 Feb 2024 12:09:33 +0800
Subject: [PATCH 077/104] fix typo
---
examples/llm_hello_world.py | 6 ++--
metagpt/provider/base_llm.py | 4 +--
metagpt/provider/dashscope_api.py | 4 +--
metagpt/provider/qianfan_api.py | 12 ++++----
metagpt/utils/token_counter.py | 48 +++++++++++++++----------------
5 files changed, 37 insertions(+), 37 deletions(-)
diff --git a/examples/llm_hello_world.py b/examples/llm_hello_world.py
index 9340f7d5a..62fc2ed68 100644
--- a/examples/llm_hello_world.py
+++ b/examples/llm_hello_world.py
@@ -14,9 +14,9 @@ from metagpt.logs import logger
async def main():
llm = LLM()
# llm type check
- id_ques = "what's your name"
- logger.info(f"{id_ques}: ")
- logger.info(await llm.aask(id_ques))
+ question = "what's your name"
+ logger.info(f"{question}: ")
+ logger.info(await llm.aask(question))
logger.info("\n\n")
logger.info(
diff --git a/metagpt/provider/base_llm.py b/metagpt/provider/base_llm.py
index 2f57b15aa..7cf3faac0 100644
--- a/metagpt/provider/base_llm.py
+++ b/metagpt/provider/base_llm.py
@@ -75,7 +75,7 @@ class BaseLLM(ABC):
local_calc_usage (bool): some models don't calculate usage, it will overwrite LLMConfig.calc_usage
"""
calc_usage = self.config.calc_usage and local_calc_usage
- model = model if model else self.model
+ model = model or self.model
usage = usage.model_dump() if isinstance(usage, BaseModel) else usage
if calc_usage and self.cost_manager:
try:
@@ -83,7 +83,7 @@ class BaseLLM(ABC):
completion_tokens = int(usage.get("completion_tokens", 0))
self.cost_manager.update_cost(prompt_tokens, completion_tokens, model)
except Exception as e:
- logger.error(f"{self.__class__.__name__} updats costs failed! exp: {e}")
+ logger.error(f"{self.__class__.__name__} updates costs failed! exp: {e}")
def get_costs(self) -> Costs:
if not self.cost_manager:
diff --git a/metagpt/provider/dashscope_api.py b/metagpt/provider/dashscope_api.py
index 7d7dcb7b7..f2b3a19a1 100644
--- a/metagpt/provider/dashscope_api.py
+++ b/metagpt/provider/dashscope_api.py
@@ -37,7 +37,7 @@ from metagpt.provider.base_llm import BaseLLM, LLMConfig
from metagpt.provider.llm_provider_registry import LLMType, register_provider
from metagpt.provider.openai_api import log_and_reraise
from metagpt.utils.cost_manager import CostManager
-from metagpt.utils.token_counter import DashScope_TOKEN_COSTS
+from metagpt.utils.token_counter import DASHSCOPE_TOKEN_COSTS
def build_api_arequest(
@@ -167,7 +167,7 @@ class DashScopeLLM(BaseLLM):
def __init_dashscope(self):
self.model = self.config.model
self.api_key = self.config.api_key
- self.token_costs = DashScope_TOKEN_COSTS
+ self.token_costs = DASHSCOPE_TOKEN_COSTS
self.aclient: AGeneration = AGeneration
# check support system_message models
diff --git a/metagpt/provider/qianfan_api.py b/metagpt/provider/qianfan_api.py
index 6f94b9cea..4cbb76566 100644
--- a/metagpt/provider/qianfan_api.py
+++ b/metagpt/provider/qianfan_api.py
@@ -22,8 +22,8 @@ from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import log_and_reraise
from metagpt.utils.cost_manager import CostManager
from metagpt.utils.token_counter import (
- QianFan_EndPoint_TOKEN_COSTS,
- QianFan_MODEL_TOKEN_COSTS,
+ QIANFAN_ENDPOINT_TOKEN_COSTS,
+ QIANFAN_MODEL_TOKEN_COSTS,
)
@@ -74,8 +74,8 @@ class QianFanLLM(BaseLLM):
assert not (self.config.model and self.config.endpoint), "Only set `model` or `endpoint` in the config"
assert self.config.model or self.config.endpoint, "Should set one of `model` or `endpoint` in the config"
- self.token_costs = copy.deepcopy(QianFan_MODEL_TOKEN_COSTS)
- self.token_costs.update(QianFan_EndPoint_TOKEN_COSTS)
+ self.token_costs = copy.deepcopy(QIANFAN_MODEL_TOKEN_COSTS)
+ self.token_costs.update(QIANFAN_ENDPOINT_TOKEN_COSTS)
# self deployed model on the cloud not to calculate usage, it charges resource pool rental fee
self.calc_usage = self.config.calc_usage and self.config.endpoint is None
@@ -103,8 +103,8 @@ class QianFanLLM(BaseLLM):
def _update_costs(self, usage: dict):
"""update each request's token cost"""
- model_or_endpoint = self.config.model if self.config.model else self.config.endpoint
- local_calc_usage = True if model_or_endpoint in self.token_costs else False
+ model_or_endpoint = self.config.model or self.config.endpoint
+ local_calc_usage = model_or_endpoint in self.token_costs
super()._update_costs(usage, model_or_endpoint, local_calc_usage)
def get_choice_text(self, resp: JsonBody) -> str:
diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py
index 220014d8f..167a1d755 100644
--- a/metagpt/utils/token_counter.py
+++ b/metagpt/utils/token_counter.py
@@ -42,7 +42,7 @@ TOKEN_COSTS = {
QianFan Token Price https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7#tokens%E5%90%8E%E4%BB%98%E8%B4%B9
Due to QianFan has multi price strategies, we unify `Tokens post-payment` as a statistical method.
"""
-QianFan_MODEL_TOKEN_COSTS = {
+QIANFAN_MODEL_TOKEN_COSTS = {
"ERNIE-Bot-4": {"prompt": 0.017, "completion": 0.017},
"ERNIE-Bot-8k": {"prompt": 0.0034, "completion": 0.0067},
"ERNIE-Bot": {"prompt": 0.0017, "completion": 0.0017},
@@ -66,35 +66,35 @@ QianFan_MODEL_TOKEN_COSTS = {
"Yi-34B-Chat": {"prompt": 0.0, "completion": 0.0},
}
-QianFan_EndPoint_TOKEN_COSTS = {
- "completions_pro": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot-4"],
- "ernie_bot_8k": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot-8k"],
- "completions": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot"],
- "eb-instant": QianFan_MODEL_TOKEN_COSTS["ERNIE-Bot-turbo"],
- "ai_apaas": QianFan_MODEL_TOKEN_COSTS["EB-turbo-AppBuilder"],
- "ernie_speed": QianFan_MODEL_TOKEN_COSTS["ERNIE-Speed"],
- "bloomz_7b1": QianFan_MODEL_TOKEN_COSTS["BLOOMZ-7B"],
- "llama_2_7b": QianFan_MODEL_TOKEN_COSTS["Llama-2-7B-Chat"],
- "llama_2_13b": QianFan_MODEL_TOKEN_COSTS["Llama-2-13B-Chat"],
- "llama_2_70b": QianFan_MODEL_TOKEN_COSTS["Llama-2-70B-Chat"],
- "chatglm2_6b_32k": QianFan_MODEL_TOKEN_COSTS["ChatGLM2-6B-32K"],
- "aquilachat_7b": QianFan_MODEL_TOKEN_COSTS["AquilaChat-7B"],
- "mixtral_8x7b_instruct": QianFan_MODEL_TOKEN_COSTS["Mixtral-8x7B-Instruct"],
- "sqlcoder_7b": QianFan_MODEL_TOKEN_COSTS["SQLCoder-7B"],
- "codellama_7b_instruct": QianFan_MODEL_TOKEN_COSTS["CodeLlama-7B-Instruct"],
- "xuanyuan_70b_chat": QianFan_MODEL_TOKEN_COSTS["XuanYuan-70B-Chat-4bit"],
- "qianfan_bloomz_7b_compressed": QianFan_MODEL_TOKEN_COSTS["Qianfan-BLOOMZ-7B-compressed"],
- "qianfan_chinese_llama_2_7b": QianFan_MODEL_TOKEN_COSTS["Qianfan-Chinese-Llama-2-7B"],
- "qianfan_chinese_llama_2_13b": QianFan_MODEL_TOKEN_COSTS["Qianfan-Chinese-Llama-2-13B"],
- "chatlaw": QianFan_MODEL_TOKEN_COSTS["ChatLaw"],
- "yi_34b_chat": QianFan_MODEL_TOKEN_COSTS["Yi-34B-Chat"],
+QIANFAN_ENDPOINT_TOKEN_COSTS = {
+ "completions_pro": QIANFAN_MODEL_TOKEN_COSTS["ERNIE-Bot-4"],
+ "ernie_bot_8k": QIANFAN_MODEL_TOKEN_COSTS["ERNIE-Bot-8k"],
+ "completions": QIANFAN_MODEL_TOKEN_COSTS["ERNIE-Bot"],
+ "eb-instant": QIANFAN_MODEL_TOKEN_COSTS["ERNIE-Bot-turbo"],
+ "ai_apaas": QIANFAN_MODEL_TOKEN_COSTS["EB-turbo-AppBuilder"],
+ "ernie_speed": QIANFAN_MODEL_TOKEN_COSTS["ERNIE-Speed"],
+ "bloomz_7b1": QIANFAN_MODEL_TOKEN_COSTS["BLOOMZ-7B"],
+ "llama_2_7b": QIANFAN_MODEL_TOKEN_COSTS["Llama-2-7B-Chat"],
+ "llama_2_13b": QIANFAN_MODEL_TOKEN_COSTS["Llama-2-13B-Chat"],
+ "llama_2_70b": QIANFAN_MODEL_TOKEN_COSTS["Llama-2-70B-Chat"],
+ "chatglm2_6b_32k": QIANFAN_MODEL_TOKEN_COSTS["ChatGLM2-6B-32K"],
+ "aquilachat_7b": QIANFAN_MODEL_TOKEN_COSTS["AquilaChat-7B"],
+ "mixtral_8x7b_instruct": QIANFAN_MODEL_TOKEN_COSTS["Mixtral-8x7B-Instruct"],
+ "sqlcoder_7b": QIANFAN_MODEL_TOKEN_COSTS["SQLCoder-7B"],
+ "codellama_7b_instruct": QIANFAN_MODEL_TOKEN_COSTS["CodeLlama-7B-Instruct"],
+ "xuanyuan_70b_chat": QIANFAN_MODEL_TOKEN_COSTS["XuanYuan-70B-Chat-4bit"],
+ "qianfan_bloomz_7b_compressed": QIANFAN_MODEL_TOKEN_COSTS["Qianfan-BLOOMZ-7B-compressed"],
+ "qianfan_chinese_llama_2_7b": QIANFAN_MODEL_TOKEN_COSTS["Qianfan-Chinese-Llama-2-7B"],
+ "qianfan_chinese_llama_2_13b": QIANFAN_MODEL_TOKEN_COSTS["Qianfan-Chinese-Llama-2-13B"],
+ "chatlaw": QIANFAN_MODEL_TOKEN_COSTS["ChatLaw"],
+ "yi_34b_chat": QIANFAN_MODEL_TOKEN_COSTS["Yi-34B-Chat"],
}
"""
DashScope Token price https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing
Different model has different detail page. Attention, some model are free for a limited time.
"""
-DashScope_TOKEN_COSTS = {
+DASHSCOPE_TOKEN_COSTS = {
"qwen-turbo": {"prompt": 0.0011, "completion": 0.0011},
"qwen-plus": {"prompt": 0.0028, "completion": 0.0028},
"qwen-max": {"prompt": 0.0, "completion": 0.0},
From 1583c0f718b61702ee4733e418266472f0e7dc67 Mon Sep 17 00:00:00 2001
From: betterwang
Date: Wed, 28 Feb 2024 14:22:58 +0800
Subject: [PATCH 078/104] simplify provider ut code
---
.github/workflows/fulltest.yaml | 1 -
.github/workflows/unittest.yaml | 2 +-
metagpt/provider/fireworks_api.py | 13 +---
metagpt/provider/google_gemini_api.py | 10 ---
metagpt/provider/ollama_api.py | 10 ---
metagpt/provider/open_llm_api.py | 13 +---
metagpt/provider/openai_api.py | 17 +----
metagpt/provider/zhipuai_api.py | 10 ---
tests/metagpt/provider/test_anthropic_api.py | 12 +--
tests/metagpt/provider/test_base_llm.py | 53 +++++--------
tests/metagpt/provider/test_fireworks_llm.py | 76 +++++--------------
.../provider/test_google_gemini_api.py | 36 ++++-----
tests/metagpt/provider/test_ollama_api.py | 30 ++++----
tests/metagpt/provider/test_open_llm_api.py | 76 +++++--------------
tests/metagpt/provider/test_spark_api.py | 44 +++++------
tests/metagpt/provider/test_zhipuai_api.py | 38 ++++------
tests/spark.yaml | 7 --
17 files changed, 134 insertions(+), 314 deletions(-)
delete mode 100644 tests/spark.yaml
diff --git a/.github/workflows/fulltest.yaml b/.github/workflows/fulltest.yaml
index f5c6049e1..70c800481 100644
--- a/.github/workflows/fulltest.yaml
+++ b/.github/workflows/fulltest.yaml
@@ -54,7 +54,6 @@ jobs:
export ALLOW_OPENAI_API_CALL=0
echo "${{ secrets.METAGPT_KEY_YAML }}" | base64 -d > config/key.yaml
mkdir -p ~/.metagpt && echo "${{ secrets.METAGPT_CONFIG2_YAML }}" | base64 -d > ~/.metagpt/config2.yaml
- echo "${{ secrets.SPARK_YAML }}" | base64 -d > ~/.metagpt/spark.yaml
pytest tests/ --doctest-modules --cov=./metagpt/ --cov-report=xml:cov.xml --cov-report=html:htmlcov --durations=20 | tee unittest.txt
- name: Show coverage report
run: |
diff --git a/.github/workflows/unittest.yaml b/.github/workflows/unittest.yaml
index 2e7e3ce2b..afa9faba7 100644
--- a/.github/workflows/unittest.yaml
+++ b/.github/workflows/unittest.yaml
@@ -31,7 +31,7 @@ jobs:
- name: Test with pytest
run: |
export ALLOW_OPENAI_API_CALL=0
- mkdir -p ~/.metagpt && cp tests/config2.yaml ~/.metagpt/config2.yaml && cp tests/spark.yaml ~/.metagpt/spark.yaml
+ mkdir -p ~/.metagpt && cp tests/config2.yaml ~/.metagpt/config2.yaml
pytest tests/ --doctest-modules --cov=./metagpt/ --cov-report=xml:cov.xml --cov-report=html:htmlcov --durations=20 | tee unittest.txt
- name: Show coverage report
run: |
diff --git a/metagpt/provider/fireworks_api.py b/metagpt/provider/fireworks_api.py
index 0c70a3cb4..f356c23c4 100644
--- a/metagpt/provider/fireworks_api.py
+++ b/metagpt/provider/fireworks_api.py
@@ -19,7 +19,7 @@ from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.logs import log_llm_stream, logger
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import OpenAILLM, log_and_reraise
-from metagpt.utils.cost_manager import CostManager, Costs
+from metagpt.utils.cost_manager import CostManager
MODEL_GRADE_TOKEN_COSTS = {
"-1": {"prompt": 0.0, "completion": 0.0}, # abnormal condition
@@ -81,17 +81,6 @@ class FireworksLLM(OpenAILLM):
kwargs = dict(api_key=self.config.api_key, base_url=self.config.base_url)
return kwargs
- def _update_costs(self, usage: CompletionUsage):
- if self.config.calc_usage and usage:
- try:
- # use FireworksCostManager not context.cost_manager
- self.cost_manager.update_cost(usage.prompt_tokens, usage.completion_tokens, self.model)
- except Exception as e:
- logger.error(f"updating costs failed!, exp: {e}")
-
- def get_costs(self) -> Costs:
- return self.cost_manager.get_costs()
-
async def _achat_completion_stream(self, messages: list[dict], timeout=3) -> str:
response: AsyncStream[ChatCompletionChunk] = await self.aclient.chat.completions.create(
**self._cons_kwargs(messages), stream=True
diff --git a/metagpt/provider/google_gemini_api.py b/metagpt/provider/google_gemini_api.py
index 2647ab16b..87ea81c80 100644
--- a/metagpt/provider/google_gemini_api.py
+++ b/metagpt/provider/google_gemini_api.py
@@ -72,16 +72,6 @@ class GeminiLLM(BaseLLM):
kwargs = {"contents": messages, "generation_config": GenerationConfig(temperature=0.3), "stream": stream}
return kwargs
- def _update_costs(self, usage: dict):
- """update each request's token cost"""
- if self.config.calc_usage:
- try:
- prompt_tokens = int(usage.get("prompt_tokens", 0))
- completion_tokens = int(usage.get("completion_tokens", 0))
- self.cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
- except Exception as e:
- logger.error(f"google gemini updats costs failed! exp: {e}")
-
def get_choice_text(self, resp: GenerateContentResponse) -> str:
return resp.text
diff --git a/metagpt/provider/ollama_api.py b/metagpt/provider/ollama_api.py
index c9103b018..52e8dbe36 100644
--- a/metagpt/provider/ollama_api.py
+++ b/metagpt/provider/ollama_api.py
@@ -46,16 +46,6 @@ class OllamaLLM(BaseLLM):
kwargs = {"model": self.model, "messages": messages, "options": {"temperature": 0.3}, "stream": stream}
return kwargs
- def _update_costs(self, usage: dict):
- """update each request's token cost"""
- if self.config.calc_usage:
- try:
- prompt_tokens = int(usage.get("prompt_tokens", 0))
- completion_tokens = int(usage.get("completion_tokens", 0))
- self._cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
- except Exception as e:
- logger.error(f"ollama updats costs failed! exp: {e}")
-
def get_choice_text(self, resp: dict) -> str:
"""get the resp content from llm response"""
assist_msg = resp.get("message", {})
diff --git a/metagpt/provider/open_llm_api.py b/metagpt/provider/open_llm_api.py
index a29b263a4..69371e379 100644
--- a/metagpt/provider/open_llm_api.py
+++ b/metagpt/provider/open_llm_api.py
@@ -8,7 +8,7 @@ from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.logs import logger
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import OpenAILLM
-from metagpt.utils.cost_manager import Costs, TokenCostManager
+from metagpt.utils.cost_manager import TokenCostManager
from metagpt.utils.token_counter import count_message_tokens, count_string_tokens
@@ -34,14 +34,3 @@ class OpenLLM(OpenAILLM):
logger.error(f"usage calculation failed!: {e}")
return usage
-
- def _update_costs(self, usage: CompletionUsage):
- if self.config.calc_usage and usage:
- try:
- # use OpenLLMCostManager not CONFIG.cost_manager
- self._cost_manager.update_cost(usage.prompt_tokens, usage.completion_tokens, self.model)
- except Exception as e:
- logger.error(f"updating costs failed!, exp: {e}")
-
- def get_costs(self) -> Costs:
- return self._cost_manager.get_costs()
diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 28abed752..90a61085f 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -30,7 +30,7 @@ from metagpt.provider.constant import GENERAL_FUNCTION_SCHEMA
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.schema import Message
from metagpt.utils.common import CodeParser, decode_image
-from metagpt.utils.cost_manager import CostManager, Costs
+from metagpt.utils.cost_manager import CostManager
from metagpt.utils.exceptions import handle_exception
from metagpt.utils.token_counter import (
count_message_tokens,
@@ -56,16 +56,13 @@ class OpenAILLM(BaseLLM):
def __init__(self, config: LLMConfig):
self.config = config
- self._init_model()
self._init_client()
self.auto_max_tokens = False
self.cost_manager: Optional[CostManager] = None
- def _init_model(self):
- self.model = self.config.model # Used in _calc_usage & _cons_kwargs
-
def _init_client(self):
"""https://github.com/openai/openai-python#async-usage"""
+ self.model = self.config.model # Used in _calc_usage & _cons_kwargs
kwargs = self._make_client_kwargs()
self.aclient = AsyncOpenAI(**kwargs)
@@ -272,16 +269,6 @@ class OpenAILLM(BaseLLM):
return usage
- @handle_exception
- def _update_costs(self, usage: CompletionUsage):
- if self.config.calc_usage and usage and self.cost_manager:
- self.cost_manager.update_cost(usage.prompt_tokens, usage.completion_tokens, self.model)
-
- def get_costs(self) -> Costs:
- if not self.cost_manager:
- return Costs(0, 0, 0, 0)
- return self.cost_manager.get_costs()
-
def _get_max_tokens(self, messages: list[dict]):
if not self.auto_max_tokens:
return self.config.max_token
diff --git a/metagpt/provider/zhipuai_api.py b/metagpt/provider/zhipuai_api.py
index 9e8e5fb53..4cbee4038 100644
--- a/metagpt/provider/zhipuai_api.py
+++ b/metagpt/provider/zhipuai_api.py
@@ -53,16 +53,6 @@ class ZhiPuAILLM(BaseLLM):
kwargs = {"model": self.model, "messages": messages, "stream": stream, "temperature": 0.3}
return kwargs
- def _update_costs(self, usage: dict):
- """update each request's token cost"""
- if self.config.calc_usage:
- try:
- prompt_tokens = int(usage.get("prompt_tokens", 0))
- completion_tokens = int(usage.get("completion_tokens", 0))
- self.cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
- except Exception as e:
- logger.error(f"zhipuai updats costs failed! exp: {e}")
-
def completion(self, messages: list[dict], timeout=3) -> dict:
resp: Completion = self.llm.chat.completions.create(**self._const_kwargs(messages))
usage = resp.usage.model_dump()
diff --git a/tests/metagpt/provider/test_anthropic_api.py b/tests/metagpt/provider/test_anthropic_api.py
index 6962ab064..93cfd7dbc 100644
--- a/tests/metagpt/provider/test_anthropic_api.py
+++ b/tests/metagpt/provider/test_anthropic_api.py
@@ -8,25 +8,25 @@ from anthropic.resources.completions import Completion
from metagpt.provider.anthropic_api import Claude2
from tests.metagpt.provider.mock_llm_config import mock_llm_config
+from tests.metagpt.provider.req_resp_const import prompt, resp_cont_tmpl
-prompt = "who are you"
-resp = "I'am Claude2"
+resp_cont = resp_cont_tmpl.format(name="Claude")
def mock_anthropic_completions_create(self, model: str, prompt: str, max_tokens_to_sample: int) -> Completion:
- return Completion(id="xx", completion=resp, model="claude-2", stop_reason="stop_sequence", type="completion")
+ return Completion(id="xx", completion=resp_cont, model="claude-2", stop_reason="stop_sequence", type="completion")
async def mock_anthropic_acompletions_create(self, model: str, prompt: str, max_tokens_to_sample: int) -> Completion:
- return Completion(id="xx", completion=resp, model="claude-2", stop_reason="stop_sequence", type="completion")
+ return Completion(id="xx", completion=resp_cont, model="claude-2", stop_reason="stop_sequence", type="completion")
def test_claude2_ask(mocker):
mocker.patch("anthropic.resources.completions.Completions.create", mock_anthropic_completions_create)
- assert resp == Claude2(mock_llm_config).ask(prompt)
+ assert resp_cont == Claude2(mock_llm_config).ask(prompt)
@pytest.mark.asyncio
async def test_claude2_aask(mocker):
mocker.patch("anthropic.resources.completions.AsyncCompletions.create", mock_anthropic_acompletions_create)
- assert resp == await Claude2(mock_llm_config).aask(prompt)
+ assert resp_cont == await Claude2(mock_llm_config).aask(prompt)
diff --git a/tests/metagpt/provider/test_base_llm.py b/tests/metagpt/provider/test_base_llm.py
index cc781f78a..cf44343bc 100644
--- a/tests/metagpt/provider/test_base_llm.py
+++ b/tests/metagpt/provider/test_base_llm.py
@@ -11,21 +11,13 @@ import pytest
from metagpt.configs.llm_config import LLMConfig
from metagpt.provider.base_llm import BaseLLM
from metagpt.schema import Message
+from tests.metagpt.provider.req_resp_const import (
+ default_resp_cont,
+ get_part_chat_completion,
+ prompt,
+)
-default_chat_resp = {
- "choices": [
- {
- "index": 0,
- "message": {
- "role": "assistant",
- "content": "I'am GPT",
- },
- "finish_reason": "stop",
- }
- ]
-}
-prompt_msg = "who are you"
-resp_content = default_chat_resp["choices"][0]["message"]["content"]
+name = "GPT"
class MockBaseLLM(BaseLLM):
@@ -33,16 +25,13 @@ class MockBaseLLM(BaseLLM):
pass
def completion(self, messages: list[dict], timeout=3):
- return default_chat_resp
+ return get_part_chat_completion(name)
async def acompletion(self, messages: list[dict], timeout=3):
- return default_chat_resp
+ return get_part_chat_completion(name)
async def acompletion_text(self, messages: list[dict], stream=False, timeout=3) -> str:
- return resp_content
-
- async def close(self):
- return default_chat_resp
+ return default_resp_cont
def test_base_llm():
@@ -86,25 +75,25 @@ def test_base_llm():
choice_text = base_llm.get_choice_text(openai_funccall_resp)
assert choice_text == openai_funccall_resp["choices"][0]["message"]["content"]
- # resp = base_llm.ask(prompt_msg)
- # assert resp == resp_content
+ # resp = base_llm.ask(prompt)
+ # assert resp == default_resp_cont
- # resp = base_llm.ask_batch([prompt_msg])
- # assert resp == resp_content
+ # resp = base_llm.ask_batch([prompt])
+ # assert resp == default_resp_cont
- # resp = base_llm.ask_code([prompt_msg])
- # assert resp == resp_content
+ # resp = base_llm.ask_code([prompt])
+ # assert resp == default_resp_cont
@pytest.mark.asyncio
async def test_async_base_llm():
base_llm = MockBaseLLM()
- resp = await base_llm.aask(prompt_msg)
- assert resp == resp_content
+ resp = await base_llm.aask(prompt)
+ assert resp == default_resp_cont
- resp = await base_llm.aask_batch([prompt_msg])
- assert resp == resp_content
+ resp = await base_llm.aask_batch([prompt])
+ assert resp == default_resp_cont
- # resp = await base_llm.aask_code([prompt_msg])
- # assert resp == resp_content
+ # resp = await base_llm.aask_code([prompt])
+ # assert resp == default_resp_cont
diff --git a/tests/metagpt/provider/test_fireworks_llm.py b/tests/metagpt/provider/test_fireworks_llm.py
index 66b55e5b2..1c1aa9caa 100644
--- a/tests/metagpt/provider/test_fireworks_llm.py
+++ b/tests/metagpt/provider/test_fireworks_llm.py
@@ -3,14 +3,7 @@
# @Desc : the unittest of fireworks api
import pytest
-from openai.types.chat.chat_completion import (
- ChatCompletion,
- ChatCompletionMessage,
- Choice,
-)
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
-from openai.types.chat.chat_completion_chunk import Choice as AChoice
-from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.completion_usage import CompletionUsage
from metagpt.provider.fireworks_api import (
@@ -20,42 +13,19 @@ from metagpt.provider.fireworks_api import (
)
from metagpt.utils.cost_manager import Costs
from tests.metagpt.provider.mock_llm_config import mock_llm_config
-
-resp_content = "I'm fireworks"
-default_resp = ChatCompletion(
- id="cmpl-a6652c1bb181caae8dd19ad8",
- model="accounts/fireworks/models/llama-v2-13b-chat",
- object="chat.completion",
- created=1703300855,
- choices=[
- Choice(
- finish_reason="stop",
- index=0,
- message=ChatCompletionMessage(role="assistant", content=resp_content),
- logprobs=None,
- )
- ],
- usage=CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202),
+from tests.metagpt.provider.req_resp_const import (
+ get_openai_chat_completion,
+ get_openai_chat_completion_chunk,
+ llm_general_chat_funcs_test,
+ messages,
+ prompt,
+ resp_cont_tmpl,
)
-default_resp_chunk = ChatCompletionChunk(
- id=default_resp.id,
- model=default_resp.model,
- object="chat.completion.chunk",
- created=default_resp.created,
- choices=[
- AChoice(
- delta=ChoiceDelta(content=resp_content, role="assistant"),
- finish_reason="stop",
- index=0,
- logprobs=None,
- )
- ],
- usage=dict(default_resp.usage),
-)
-
-prompt_msg = "who are you"
-messages = [{"role": "user", "content": prompt_msg}]
+name = "fireworks"
+resp_cont = resp_cont_tmpl.format(name=name)
+default_resp = get_openai_chat_completion(name)
+default_resp_chunk = get_openai_chat_completion_chunk(name, usage_as_dict=True)
def test_fireworks_costmanager():
@@ -88,27 +58,17 @@ async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs)
async def test_fireworks_acompletion(mocker):
mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
- fireworks_gpt = FireworksLLM(mock_llm_config)
- fireworks_gpt.model = "llama-v2-13b-chat"
+ fireworks_llm = FireworksLLM(mock_llm_config)
+ fireworks_llm.model = "llama-v2-13b-chat"
- fireworks_gpt._update_costs(
+ fireworks_llm._update_costs(
usage=CompletionUsage(prompt_tokens=500000, completion_tokens=500000, total_tokens=1000000)
)
- assert fireworks_gpt.get_costs() == Costs(
+ assert fireworks_llm.get_costs() == Costs(
total_prompt_tokens=500000, total_completion_tokens=500000, total_cost=0.5, total_budget=0
)
- resp = await fireworks_gpt.acompletion(messages)
- assert resp.choices[0].message.content in resp_content
+ resp = await fireworks_llm.acompletion(messages)
+ assert resp.choices[0].message.content in resp_cont
- resp = await fireworks_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
-
- resp = await fireworks_gpt.acompletion_text(messages, stream=False)
- assert resp == resp_content
-
- resp = await fireworks_gpt.acompletion_text(messages, stream=True)
- assert resp == resp_content
-
- resp = await fireworks_gpt.aask(prompt_msg)
- assert resp == resp_content
+ await llm_general_chat_funcs_test(fireworks_llm, prompt, messages, resp_cont)
diff --git a/tests/metagpt/provider/test_google_gemini_api.py b/tests/metagpt/provider/test_google_gemini_api.py
index 404ae1e90..50c15ee19 100644
--- a/tests/metagpt/provider/test_google_gemini_api.py
+++ b/tests/metagpt/provider/test_google_gemini_api.py
@@ -11,6 +11,12 @@ from google.generativeai.types import content_types
from metagpt.provider.google_gemini_api import GeminiLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config
+from tests.metagpt.provider.req_resp_const import (
+ gemini_messages,
+ llm_general_chat_funcs_test,
+ prompt,
+ resp_cont_tmpl,
+)
@dataclass
@@ -18,10 +24,8 @@ class MockGeminiResponse(ABC):
text: str
-prompt_msg = "who are you"
-messages = [{"role": "user", "parts": prompt_msg}]
-resp_content = "I'm gemini from google"
-default_resp = MockGeminiResponse(text=resp_content)
+resp_cont = resp_cont_tmpl.format(name="gemini")
+default_resp = MockGeminiResponse(text=resp_cont)
def mock_gemini_count_tokens(self, contents: content_types.ContentsType) -> glm.CountTokensResponse:
@@ -60,28 +64,18 @@ async def test_gemini_acompletion(mocker):
mock_gemini_generate_content_async,
)
- gemini_gpt = GeminiLLM(mock_llm_config)
+ gemini_llm = GeminiLLM(mock_llm_config)
- assert gemini_gpt._user_msg(prompt_msg) == {"role": "user", "parts": [prompt_msg]}
- assert gemini_gpt._assistant_msg(prompt_msg) == {"role": "model", "parts": [prompt_msg]}
+ assert gemini_llm._user_msg(prompt) == {"role": "user", "parts": [prompt]}
+ assert gemini_llm._assistant_msg(prompt) == {"role": "model", "parts": [prompt]}
- usage = gemini_gpt.get_usage(messages, resp_content)
+ usage = gemini_llm.get_usage(gemini_messages, resp_cont)
assert usage == {"prompt_tokens": 20, "completion_tokens": 20}
- resp = gemini_gpt.completion(messages)
+ resp = gemini_llm.completion(gemini_messages)
assert resp == default_resp
- resp = await gemini_gpt.acompletion(messages)
+ resp = await gemini_llm.acompletion(gemini_messages)
assert resp.text == default_resp.text
- resp = await gemini_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
-
- resp = await gemini_gpt.acompletion_text(messages, stream=False)
- assert resp == resp_content
-
- resp = await gemini_gpt.acompletion_text(messages, stream=True)
- assert resp == resp_content
-
- resp = await gemini_gpt.aask(prompt_msg)
- assert resp == resp_content
+ await llm_general_chat_funcs_test(gemini_llm, prompt, gemini_messages, resp_cont)
diff --git a/tests/metagpt/provider/test_ollama_api.py b/tests/metagpt/provider/test_ollama_api.py
index 5d942598b..af2e929e9 100644
--- a/tests/metagpt/provider/test_ollama_api.py
+++ b/tests/metagpt/provider/test_ollama_api.py
@@ -9,12 +9,15 @@ import pytest
from metagpt.provider.ollama_api import OllamaLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config
+from tests.metagpt.provider.req_resp_const import (
+ llm_general_chat_funcs_test,
+ messages,
+ prompt,
+ resp_cont_tmpl,
+)
-prompt_msg = "who are you"
-messages = [{"role": "user", "content": prompt_msg}]
-
-resp_content = "I'm ollama"
-default_resp = {"message": {"role": "assistant", "content": resp_content}}
+resp_cont = resp_cont_tmpl.format(name="ollama")
+default_resp = {"message": {"role": "assistant", "content": resp_cont}}
async def mock_ollama_arequest(self, stream: bool = False, **kwargs) -> Tuple[Any, Any, bool]:
@@ -41,19 +44,12 @@ async def mock_ollama_arequest(self, stream: bool = False, **kwargs) -> Tuple[An
async def test_gemini_acompletion(mocker):
mocker.patch("metagpt.provider.general_api_requestor.GeneralAPIRequestor.arequest", mock_ollama_arequest)
- ollama_gpt = OllamaLLM(mock_llm_config)
+ ollama_llm = OllamaLLM(mock_llm_config)
- resp = await ollama_gpt.acompletion(messages)
+ resp = await ollama_llm.acompletion(messages)
assert resp["message"]["content"] == default_resp["message"]["content"]
- resp = await ollama_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
+ resp = await ollama_llm.aask(prompt, stream=False)
+ assert resp == resp_cont
- resp = await ollama_gpt.acompletion_text(messages, stream=False)
- assert resp == resp_content
-
- resp = await ollama_gpt.acompletion_text(messages, stream=True)
- assert resp == resp_content
-
- resp = await ollama_gpt.aask(prompt_msg)
- assert resp == resp_content
+ await llm_general_chat_funcs_test(ollama_llm, prompt, messages, resp_cont)
diff --git a/tests/metagpt/provider/test_open_llm_api.py b/tests/metagpt/provider/test_open_llm_api.py
index fc7b510cc..aa38b95a6 100644
--- a/tests/metagpt/provider/test_open_llm_api.py
+++ b/tests/metagpt/provider/test_open_llm_api.py
@@ -3,53 +3,26 @@
# @Desc :
import pytest
-from openai.types.chat.chat_completion import (
- ChatCompletion,
- ChatCompletionMessage,
- Choice,
-)
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
-from openai.types.chat.chat_completion_chunk import Choice as AChoice
-from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.completion_usage import CompletionUsage
from metagpt.provider.open_llm_api import OpenLLM
-from metagpt.utils.cost_manager import Costs
+from metagpt.utils.cost_manager import CostManager, Costs
from tests.metagpt.provider.mock_llm_config import mock_llm_config
-
-resp_content = "I'm llama2"
-default_resp = ChatCompletion(
- id="cmpl-a6652c1bb181caae8dd19ad8",
- model="llama-v2-13b-chat",
- object="chat.completion",
- created=1703302755,
- choices=[
- Choice(
- finish_reason="stop",
- index=0,
- message=ChatCompletionMessage(role="assistant", content=resp_content),
- logprobs=None,
- )
- ],
+from tests.metagpt.provider.req_resp_const import (
+ get_openai_chat_completion,
+ get_openai_chat_completion_chunk,
+ llm_general_chat_funcs_test,
+ messages,
+ prompt,
+ resp_cont_tmpl,
)
-default_resp_chunk = ChatCompletionChunk(
- id=default_resp.id,
- model=default_resp.model,
- object="chat.completion.chunk",
- created=default_resp.created,
- choices=[
- AChoice(
- delta=ChoiceDelta(content=resp_content, role="assistant"),
- finish_reason="stop",
- index=0,
- logprobs=None,
- )
- ],
-)
+name = "llama2-7b"
+resp_cont = resp_cont_tmpl.format(name=name)
+default_resp = get_openai_chat_completion(name)
-prompt_msg = "who are you"
-messages = [{"role": "user", "content": prompt_msg}]
+default_resp_chunk = get_openai_chat_completion_chunk(name)
async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs) -> ChatCompletionChunk:
@@ -68,25 +41,16 @@ async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs)
async def test_openllm_acompletion(mocker):
mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
- openllm_gpt = OpenLLM(mock_llm_config)
- openllm_gpt.model = "llama-v2-13b-chat"
+ openllm_llm = OpenLLM(mock_llm_config)
+ openllm_llm.model = "llama-v2-13b-chat"
- openllm_gpt._update_costs(usage=CompletionUsage(prompt_tokens=100, completion_tokens=100, total_tokens=200))
- assert openllm_gpt.get_costs() == Costs(
+ openllm_llm.cost_manager = CostManager()
+ openllm_llm._update_costs(usage=CompletionUsage(prompt_tokens=100, completion_tokens=100, total_tokens=200))
+ assert openllm_llm.get_costs() == Costs(
total_prompt_tokens=100, total_completion_tokens=100, total_cost=0, total_budget=0
)
- resp = await openllm_gpt.acompletion(messages)
- assert resp.choices[0].message.content in resp_content
+ resp = await openllm_llm.acompletion(messages)
+ assert resp.choices[0].message.content in resp_cont
- resp = await openllm_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
-
- resp = await openllm_gpt.acompletion_text(messages, stream=False)
- assert resp == resp_content
-
- resp = await openllm_gpt.acompletion_text(messages, stream=True)
- assert resp == resp_content
-
- resp = await openllm_gpt.aask(prompt_msg)
- assert resp == resp_content
+ await llm_general_chat_funcs_test(openllm_llm, prompt, messages, resp_cont)
diff --git a/tests/metagpt/provider/test_spark_api.py b/tests/metagpt/provider/test_spark_api.py
index f5a6f66fd..9c278267d 100644
--- a/tests/metagpt/provider/test_spark_api.py
+++ b/tests/metagpt/provider/test_spark_api.py
@@ -4,12 +4,18 @@
import pytest
-from metagpt.config2 import Config
from metagpt.provider.spark_api import GetMessageFromWeb, SparkLLM
-from tests.metagpt.provider.mock_llm_config import mock_llm_config
+from tests.metagpt.provider.mock_llm_config import (
+ mock_llm_config,
+ mock_llm_config_spark,
+)
+from tests.metagpt.provider.req_resp_const import (
+ llm_general_chat_funcs_test,
+ prompt,
+ resp_cont_tmpl,
+)
-prompt_msg = "who are you"
-resp_content = "I'm Spark"
+resp_cont = resp_cont_tmpl.format(name="Spark")
class MockWebSocketApp(object):
@@ -23,7 +29,7 @@ class MockWebSocketApp(object):
def test_get_msg_from_web(mocker):
mocker.patch("websocket.WebSocketApp", MockWebSocketApp)
- get_msg_from_web = GetMessageFromWeb(prompt_msg, mock_llm_config)
+ get_msg_from_web = GetMessageFromWeb(prompt, mock_llm_config)
assert get_msg_from_web.gen_params()["parameter"]["chat"]["domain"] == "mock_domain"
ret = get_msg_from_web.run()
@@ -31,34 +37,26 @@ def test_get_msg_from_web(mocker):
def mock_spark_get_msg_from_web_run(self) -> str:
- return resp_content
+ return resp_cont
@pytest.mark.asyncio
-async def test_spark_aask():
- llm = SparkLLM(Config.from_home("spark.yaml").llm)
+async def test_spark_aask(mocker):
+ mocker.patch("metagpt.provider.spark_api.GetMessageFromWeb.run", mock_spark_get_msg_from_web_run)
+
+ llm = SparkLLM(mock_llm_config_spark)
resp = await llm.aask("Hello!")
- print(resp)
+ assert resp == resp_cont
@pytest.mark.asyncio
async def test_spark_acompletion(mocker):
mocker.patch("metagpt.provider.spark_api.GetMessageFromWeb.run", mock_spark_get_msg_from_web_run)
- spark_gpt = SparkLLM(mock_llm_config)
+ spark_llm = SparkLLM(mock_llm_config)
- resp = await spark_gpt.acompletion([])
- assert resp == resp_content
+ resp = await spark_llm.acompletion([])
+ assert resp == resp_cont
- resp = await spark_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
-
- resp = await spark_gpt.acompletion_text([], stream=False)
- assert resp == resp_content
-
- resp = await spark_gpt.acompletion_text([], stream=True)
- assert resp == resp_content
-
- resp = await spark_gpt.aask(prompt_msg)
- assert resp == resp_content
+ await llm_general_chat_funcs_test(spark_llm, prompt, prompt, resp_cont)
diff --git a/tests/metagpt/provider/test_zhipuai_api.py b/tests/metagpt/provider/test_zhipuai_api.py
index ad2ececa2..c51010122 100644
--- a/tests/metagpt/provider/test_zhipuai_api.py
+++ b/tests/metagpt/provider/test_zhipuai_api.py
@@ -6,22 +6,24 @@ import pytest
from metagpt.provider.zhipuai_api import ZhiPuAILLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_zhipu
+from tests.metagpt.provider.req_resp_const import (
+ get_part_chat_completion,
+ llm_general_chat_funcs_test,
+ messages,
+ prompt,
+ resp_cont_tmpl,
+)
-prompt_msg = "who are you"
-messages = [{"role": "user", "content": prompt_msg}]
-
-resp_content = "I'm chatglm-turbo"
-default_resp = {
- "choices": [{"finish_reason": "stop", "index": 0, "message": {"content": resp_content, "role": "assistant"}}],
- "usage": {"completion_tokens": 22, "prompt_tokens": 19, "total_tokens": 41},
-}
+name = "ChatGLM-4"
+resp_cont = resp_cont_tmpl.format(name=name)
+default_resp = get_part_chat_completion(name)
async def mock_zhipuai_acreate_stream(self, **kwargs):
class MockResponse(object):
async def _aread(self):
class Iterator(object):
- events = [{"choices": [{"index": 0, "delta": {"content": resp_content, "role": "assistant"}}]}]
+ events = [{"choices": [{"index": 0, "delta": {"content": resp_cont, "role": "assistant"}}]}]
async def __aiter__(self):
for event in self.events:
@@ -46,22 +48,12 @@ async def test_zhipuai_acompletion(mocker):
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.acreate", mock_zhipuai_acreate)
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.acreate_stream", mock_zhipuai_acreate_stream)
- zhipu_gpt = ZhiPuAILLM(mock_llm_config_zhipu)
+ zhipu_llm = ZhiPuAILLM(mock_llm_config_zhipu)
- resp = await zhipu_gpt.acompletion(messages)
- assert resp["choices"][0]["message"]["content"] == resp_content
+ resp = await zhipu_llm.acompletion(messages)
+ assert resp["choices"][0]["message"]["content"] == resp_cont
- resp = await zhipu_gpt.aask(prompt_msg, stream=False)
- assert resp == resp_content
-
- resp = await zhipu_gpt.acompletion_text(messages, stream=False)
- assert resp == resp_content
-
- resp = await zhipu_gpt.acompletion_text(messages, stream=True)
- assert resp == resp_content
-
- resp = await zhipu_gpt.aask(prompt_msg)
- assert resp == resp_content
+ await llm_general_chat_funcs_test(zhipu_llm, prompt, messages, resp_cont)
def test_zhipuai_proxy():
diff --git a/tests/spark.yaml b/tests/spark.yaml
deleted file mode 100644
index a5bbd98bd..000000000
--- a/tests/spark.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-llm:
- api_type: "spark"
- app_id: "xxx"
- api_key: "xxx"
- api_secret: "xxx"
- domain: "generalv2"
- base_url: "wss://spark-api.xf-yun.com/v3.1/chat"
\ No newline at end of file
From ecbac6bbabc5587fdc397724290e0bb9a667b115 Mon Sep 17 00:00:00 2001
From: mannaandpoem <1580466765@qq.com>
Date: Wed, 28 Feb 2024 15:29:41 +0800
Subject: [PATCH 079/104] update prompt of ocr example
---
examples/mi/ocr_receipt.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/examples/mi/ocr_receipt.py b/examples/mi/ocr_receipt.py
index ffa5cff05..f394cccec 100644
--- a/examples/mi/ocr_receipt.py
+++ b/examples/mi/ocr_receipt.py
@@ -6,7 +6,9 @@ async def main():
image_path = "image.jpg"
language = "English"
requirement = f"""This is a {language} receipt image.
- Your goal is to perform OCR on images using PaddleOCR, then extract the total amount from ocr text results, and finally save as table. Image path: {image_path}.
+ Your goal is to perform OCR on images using PaddleOCR, output text content from the OCR results and discard
+ coordinates and confidence levels, then recognize the total amount from ocr text content, and finally save as table.
+ Image path: {image_path}.
NOTE: The environments for Paddle and PaddleOCR are all ready and has been fully installed."""
mi = Interpreter()
From b3515ca90b47a1752b2e9fc497b38e3f544d9308 Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Wed, 28 Feb 2024 20:40:07 +0800
Subject: [PATCH 080/104] Add files via upload
---
tests/metagpt/provider/test_openai.py | 50 ++++++++++++++++++++++++++-
1 file changed, 49 insertions(+), 1 deletion(-)
diff --git a/tests/metagpt/provider/test_openai.py b/tests/metagpt/provider/test_openai.py
index 96c08a867..1c28fbd7b 100644
--- a/tests/metagpt/provider/test_openai.py
+++ b/tests/metagpt/provider/test_openai.py
@@ -1,10 +1,11 @@
import pytest
from openai.types.chat import (
ChatCompletion,
+ ChatCompletionChunk,
ChatCompletionMessage,
ChatCompletionMessageToolCall,
)
-from openai.types.chat.chat_completion import Choice
+from openai.types.chat.chat_completion import Choice, CompletionUsage
from openai.types.chat.chat_completion_message_tool_call import Function
from PIL import Image
@@ -16,6 +17,24 @@ from tests.metagpt.provider.mock_llm_config import (
mock_llm_config,
mock_llm_config_proxy,
)
+from tests.metagpt.provider.req_resp_const import (
+ get_openai_chat_completion,
+ get_openai_chat_completion_chunk,
+ llm_general_chat_funcs_test,
+ messages,
+ prompt,
+ resp_cont_tmpl,
+)
+
+prompt_msg = "who are you"
+messages = [{"role": "user", "content": prompt_msg}]
+name = 'AI'
+resp_cont = resp_cont_tmpl.format(name=name)
+default_resp = get_openai_chat_completion(name)
+
+default_resp_chunk = get_openai_chat_completion_chunk(name)
+
+usage = CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202)
@pytest.mark.asyncio
@@ -121,3 +140,32 @@ async def test_gen_image():
images: list[Image] = await llm.gen_image(model=model, prompt=prompt, resp_format="b64_json")
assert images[0].size == (1024, 1024)
+
+
+async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs) -> ChatCompletionChunk:
+ if stream:
+
+ class Iterator(object):
+ async def __aiter__(self):
+ yield default_resp_chunk
+
+ return Iterator()
+ else:
+ return default_resp
+
+
+@pytest.mark.asyncio
+async def test_openai_acompletion(mocker):
+ mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
+
+ llm = OpenAILLM(mock_llm_config)
+
+ resp = await llm.acompletion(messages)
+ assert resp.choices[0].finish_reason == "stop"
+ assert resp.choices[0].message.content == resp_cont
+ assert resp.usage == usage
+
+ await llm_general_chat_funcs_test(llm, prompt, messages, resp_cont)
+
+
+
From fe52bf60325498d3277b6836194a4deb1ebda06f Mon Sep 17 00:00:00 2001
From: geekan
Date: Wed, 28 Feb 2024 22:59:55 +0800
Subject: [PATCH 081/104] add config in provider
---
metagpt/provider/openai_api.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 90a61085f..36d6f6d77 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -99,7 +99,7 @@ class OpenAILLM(BaseLLM):
"max_tokens": self._get_max_tokens(messages),
"n": 1,
# "stop": None, # default it's None and gpt4-v can't have this one
- "temperature": 0.3,
+ "temperature": self.config.temperature,
"model": self.model,
"timeout": max(self.config.timeout, timeout),
}
From 10be95be1b94edd26ad599d35c1526fd76d2223d Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 10:14:15 +0800
Subject: [PATCH 082/104] Add files via upload
---
metagpt/context.py | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
diff --git a/metagpt/context.py b/metagpt/context.py
index 3dfd52d58..0add4c71a 100644
--- a/metagpt/context.py
+++ b/metagpt/context.py
@@ -12,10 +12,14 @@ from typing import Any, Optional
from pydantic import BaseModel, ConfigDict
from metagpt.config2 import Config
-from metagpt.configs.llm_config import LLMConfig
+from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.provider.base_llm import BaseLLM
from metagpt.provider.llm_provider_registry import create_llm_instance
-from metagpt.utils.cost_manager import CostManager
+from metagpt.utils.cost_manager import (
+ CostManager,
+ FireworksCostManager,
+ TokenCostManager,
+)
from metagpt.utils.git_repository import GitRepository
from metagpt.utils.project_repo import ProjectRepo
@@ -80,12 +84,21 @@ class Context(BaseModel):
# self._llm = None
# return self._llm
+ def _select_costmanager(self, llm_config: LLMConfig) -> CostManager:
+ """Return a CostManager instance"""
+ if llm_config.api_type == LLMType.FIREWORKS:
+ return FireworksCostManager()
+ elif llm_config.api_type == LLMType.OPEN_LLM:
+ return TokenCostManager()
+ else:
+ return self.cost_manager
+
def llm(self) -> BaseLLM:
"""Return a LLM instance, fixme: support cache"""
# if self._llm is None:
self._llm = create_llm_instance(self.config.llm)
if self._llm.cost_manager is None:
- self._llm.cost_manager = self.cost_manager
+ self._llm.cost_manager = self._select_costmanager(self.config.llm)
return self._llm
def llm_with_cost_manager_from_llm_config(self, llm_config: LLMConfig) -> BaseLLM:
@@ -93,5 +106,5 @@ class Context(BaseModel):
# if self._llm is None:
llm = create_llm_instance(llm_config)
if llm.cost_manager is None:
- llm.cost_manager = self.cost_manager
+ llm.cost_manager = self._select_costmanager(llm_config)
return llm
From fbd5d65e6b8282aa0a9cc2574f22f4475803548c Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 10:14:41 +0800
Subject: [PATCH 083/104] Add files via upload
---
metagpt/configs/llm_config.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/metagpt/configs/llm_config.py b/metagpt/configs/llm_config.py
index 36f5d7ae7..5cca669cf 100644
--- a/metagpt/configs/llm_config.py
+++ b/metagpt/configs/llm_config.py
@@ -26,6 +26,7 @@ class LLMType(Enum):
OLLAMA = "ollama"
QIANFAN = "qianfan" # Baidu BCE
DASHSCOPE = "dashscope" # Aliyun LingJi DashScope
+ MOONSHOT = "moonshot"
def __missing__(self, key):
return self.OPENAI
From 8f267b33d25a5cf0ab625195921bc90508caf831 Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 10:15:09 +0800
Subject: [PATCH 084/104] Add files via upload
---
metagpt/provider/llm_provider_registry.py | 8 +++-
metagpt/provider/openai_api.py | 51 +++++++++++++----------
2 files changed, 36 insertions(+), 23 deletions(-)
diff --git a/metagpt/provider/llm_provider_registry.py b/metagpt/provider/llm_provider_registry.py
index df89d36aa..d598b678e 100644
--- a/metagpt/provider/llm_provider_registry.py
+++ b/metagpt/provider/llm_provider_registry.py
@@ -21,11 +21,15 @@ class LLMProviderRegistry:
return self.providers[enum]
-def register_provider(key):
+def register_provider(keys):
"""register provider to registry"""
def decorator(cls):
- LLM_REGISTRY.register(key, cls)
+ if isinstance(keys,list):
+ for key in keys:
+ LLM_REGISTRY.register(key, cls)
+ else:
+ LLM_REGISTRY.register(keys, cls)
return cls
return decorator
diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 36d6f6d77..5ed7168e3 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -30,7 +30,7 @@ from metagpt.provider.constant import GENERAL_FUNCTION_SCHEMA
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.schema import Message
from metagpt.utils.common import CodeParser, decode_image
-from metagpt.utils.cost_manager import CostManager
+from metagpt.utils.cost_manager import CostManager, Costs, TokenCostManager
from metagpt.utils.exceptions import handle_exception
from metagpt.utils.token_counter import (
count_message_tokens,
@@ -50,7 +50,7 @@ See FAQ 5.8
raise retry_state.outcome.exception()
-@register_provider(LLMType.OPENAI)
+@register_provider([LLMType.OPENAI, LLMType.FIREWORKS, LLMType.OPEN_LLM, LLMType.MOONSHOT])
class OpenAILLM(BaseLLM):
"""Check https://platform.openai.com/examples for examples"""
@@ -84,14 +84,33 @@ class OpenAILLM(BaseLLM):
return params
- async def _achat_completion_stream(self, messages: list[dict], timeout=3) -> AsyncIterator[str]:
+ async def _achat_completion_stream(self, messages: list[dict], timeout=3) -> str:
response: AsyncStream[ChatCompletionChunk] = await self.aclient.chat.completions.create(
**self._cons_kwargs(messages, timeout=timeout), stream=True
)
-
+ usage = None
+ collected_messages = []
async for chunk in response:
chunk_message = chunk.choices[0].delta.content or "" if chunk.choices else "" # extract the message
- yield chunk_message
+ finish_reason = chunk.choices[0].finish_reason if hasattr(chunk.choices[0], "finish_reason") else None
+ log_llm_stream(chunk_message)
+ collected_messages.append(chunk_message)
+ if finish_reason:
+ if hasattr(chunk, "usage"):
+ # Some services have usage as an attribute of the chunk, such as Fireworks
+ usage = CompletionUsage(**chunk.usage)
+ elif hasattr(chunk.choices[0], "usage"):
+ # The usage of some services is an attribute of chunk.choices[0], such as Moonshot
+ usage = CompletionUsage(**chunk.choices[0].usage)
+
+ log_llm_stream("\n")
+ full_reply_content = "".join(collected_messages)
+ if not usage:
+ # Some services do not provide the usage attribute, such as OpenAI or OpenLLM
+ usage = self._calc_usage(messages, full_reply_content)
+
+ self._update_costs(usage)
+ return full_reply_content
def _cons_kwargs(self, messages: list[dict], timeout=3, **extra_kwargs) -> dict:
kwargs = {
@@ -99,7 +118,7 @@ class OpenAILLM(BaseLLM):
"max_tokens": self._get_max_tokens(messages),
"n": 1,
# "stop": None, # default it's None and gpt4-v can't have this one
- "temperature": self.config.temperature,
+ "temperature": 0.3,
"model": self.model,
"timeout": max(self.config.timeout, timeout),
}
@@ -126,18 +145,7 @@ class OpenAILLM(BaseLLM):
async def acompletion_text(self, messages: list[dict], stream=False, timeout=3) -> str:
"""when streaming, print each token in place."""
if stream:
- resp = self._achat_completion_stream(messages, timeout=timeout)
-
- collected_messages = []
- async for i in resp:
- log_llm_stream(i)
- collected_messages.append(i)
- log_llm_stream("\n")
-
- full_reply_content = "".join(collected_messages)
- usage = self._calc_usage(messages, full_reply_content)
- self._update_costs(usage)
- return full_reply_content
+ await self._achat_completion_stream(messages, timeout=timeout)
rsp = await self._achat_completion(messages, timeout=timeout)
return self.get_choice_text(rsp)
@@ -261,11 +269,12 @@ class OpenAILLM(BaseLLM):
if not self.config.calc_usage:
return usage
+ model = self.model if not isinstance(self.cost_manager, TokenCostManager) else "open-llm-model"
try:
- usage.prompt_tokens = count_message_tokens(messages, self.model)
- usage.completion_tokens = count_string_tokens(rsp, self.model)
+ usage.prompt_tokens = count_message_tokens(messages, model)
+ usage.completion_tokens = count_string_tokens(rsp, model)
except Exception as e:
- logger.warning(f"usage calculation failed: {e}")
+ logger.error(f"usage calculation failed: {e}")
return usage
From 430fe2961758f1e68aa99978da9f4d8041fa52ea Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 10:15:32 +0800
Subject: [PATCH 085/104] Add files via upload
---
metagpt/utils/cost_manager.py | 45 +++++++++++++++++++++++++++++++++-
metagpt/utils/token_counter.py | 14 +++++++++++
2 files changed, 58 insertions(+), 1 deletion(-)
diff --git a/metagpt/utils/cost_manager.py b/metagpt/utils/cost_manager.py
index efff07ae1..921fbe6cd 100644
--- a/metagpt/utils/cost_manager.py
+++ b/metagpt/utils/cost_manager.py
@@ -6,12 +6,14 @@
@Desc : mashenquan, 2023/8/28. Separate the `CostManager` class to support user-level cost accounting.
"""
+import re
from typing import NamedTuple
from pydantic import BaseModel
from metagpt.logs import logger
-from metagpt.utils.token_counter import TOKEN_COSTS
+from metagpt.utils.token_counter import FIREWORKS_GRADE_TOKEN_COSTS, TOKEN_COSTS
+
class Costs(NamedTuple):
@@ -103,3 +105,44 @@ class TokenCostManager(CostManager):
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
logger.info(f"prompt_tokens: {prompt_tokens}, completion_tokens: {completion_tokens}")
+
+
+class FireworksCostManager(CostManager):
+ def model_grade_token_costs(self, model: str) -> dict[str, float]:
+ def _get_model_size(model: str) -> float:
+ size = re.findall(".*-([0-9.]+)b", model)
+ size = float(size[0]) if len(size) > 0 else -1
+ return size
+
+ if "mixtral-8x7b" in model:
+ token_costs = FIREWORKS_GRADE_TOKEN_COSTS["mixtral-8x7b"]
+ else:
+ model_size = _get_model_size(model)
+ if 0 < model_size <= 16:
+ token_costs = FIREWORKS_GRADE_TOKEN_COSTS["16"]
+ elif 16 < model_size <= 80:
+ token_costs = FIREWORKS_GRADE_TOKEN_COSTS["80"]
+ else:
+ token_costs = FIREWORKS_GRADE_TOKEN_COSTS["-1"]
+ return token_costs
+
+ def update_cost(self, prompt_tokens: int, completion_tokens: int, model: str):
+ """
+ Refs to `https://app.fireworks.ai/pricing` **Developer pricing**
+ Update the total cost, prompt tokens, and completion tokens.
+
+ Args:
+ prompt_tokens (int): The number of tokens used in the prompt.
+ completion_tokens (int): The number of tokens used in the completion.
+ model (str): The model used for the API call.
+ """
+ self.total_prompt_tokens += prompt_tokens
+ self.total_completion_tokens += completion_tokens
+
+ token_costs = self.model_grade_token_costs(model)
+ cost = (prompt_tokens * token_costs["prompt"] + completion_tokens * token_costs["completion"]) / 1000000
+ self.total_cost += cost
+ logger.info(
+ f"Total running cost: ${self.total_cost:.4f}"
+ f"Current cost: ${cost:.4f}, prompt_tokens: {prompt_tokens}, completion_tokens: {completion_tokens}"
+ )
\ No newline at end of file
diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py
index 167a1d755..9a6fddf34 100644
--- a/metagpt/utils/token_counter.py
+++ b/metagpt/utils/token_counter.py
@@ -35,6 +35,9 @@ TOKEN_COSTS = {
"glm-3-turbo": {"prompt": 0.0007, "completion": 0.0007}, # 128k version, prompt + completion tokens=0.005¥/k-tokens
"glm-4": {"prompt": 0.014, "completion": 0.014}, # 128k version, prompt + completion tokens=0.1¥/k-tokens
"gemini-pro": {"prompt": 0.00025, "completion": 0.0005},
+ "moonshot-v1-8k": {"prompt": 0.012, "completion": 0.012}, # prompt + completion tokens=0.012¥/k-tokens
+ "moonshot-v1-32k": {"prompt": 0.024, "completion": 0.024},
+ "moonshot-v1-128k": {"prompt": 0.06, "completion": 0.06},
}
@@ -120,6 +123,14 @@ DASHSCOPE_TOKEN_COSTS = {
}
+FIREWORKS_GRADE_TOKEN_COSTS = {
+ "-1": {"prompt": 0.0, "completion": 0.0}, # abnormal condition
+ "16": {"prompt": 0.2, "completion": 0.8}, # 16 means model size <= 16B; 0.2 means $0.2/1M tokens
+ "80": {"prompt": 0.7, "completion": 2.8}, # 80 means 16B < model size <= 80B
+ "mixtral-8x7b": {"prompt": 0.4, "completion": 1.6},
+}
+
+
TOKEN_MAX = {
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
@@ -143,6 +154,9 @@ TOKEN_MAX = {
"glm-3-turbo": 128000,
"glm-4": 128000,
"gemini-pro": 32768,
+ "moonshot-v1-8k": 8192,
+ "moonshot-v1-32k": 32768,
+ "moonshot-v1-128k": 128000,
}
From 33290121ec7446c7ccd35f64a6f8d5995ffa4ee1 Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 10:16:14 +0800
Subject: [PATCH 086/104] Add files via upload
From 4a73d714f76caf1d51a04109c17c59522bd6759c Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 10:19:04 +0800
Subject: [PATCH 087/104] Add files via upload
---
metagpt/provider/openai_api.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 5ed7168e3..f53776adb 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -118,7 +118,7 @@ class OpenAILLM(BaseLLM):
"max_tokens": self._get_max_tokens(messages),
"n": 1,
# "stop": None, # default it's None and gpt4-v can't have this one
- "temperature": 0.3,
+ "temperature": self.config.temperature,
"model": self.model,
"timeout": max(self.config.timeout, timeout),
}
@@ -274,7 +274,7 @@ class OpenAILLM(BaseLLM):
usage.prompt_tokens = count_message_tokens(messages, model)
usage.completion_tokens = count_string_tokens(rsp, model)
except Exception as e:
- logger.error(f"usage calculation failed: {e}")
+ logger.warning(f"usage calculation failed: {e}")
return usage
From b5be7a28bf1be4a5009d57fe7d3deacf740f4f83 Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 10:19:38 +0800
Subject: [PATCH 088/104] Add files via upload
---
metagpt/provider/__init__.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/metagpt/provider/__init__.py b/metagpt/provider/__init__.py
index 44e6d3f3b..38ae12f75 100644
--- a/metagpt/provider/__init__.py
+++ b/metagpt/provider/__init__.py
@@ -20,9 +20,7 @@ from metagpt.provider.qianfan_api import QianFanLLM
from metagpt.provider.dashscope_api import DashScopeLLM
__all__ = [
- "FireworksLLM",
"GeminiLLM",
- "OpenLLM",
"OpenAILLM",
"ZhiPuAILLM",
"AzureOpenAILLM",
From 0776dc7c45a143636324dc28cf753b20c64bac1e Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 11:13:40 +0800
Subject: [PATCH 089/104] Add files via upload
---
tests/metagpt/provider/test_openai.py | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/tests/metagpt/provider/test_openai.py b/tests/metagpt/provider/test_openai.py
index 1c28fbd7b..3ce38d2a5 100644
--- a/tests/metagpt/provider/test_openai.py
+++ b/tests/metagpt/provider/test_openai.py
@@ -26,13 +26,11 @@ from tests.metagpt.provider.req_resp_const import (
resp_cont_tmpl,
)
-prompt_msg = "who are you"
-messages = [{"role": "user", "content": prompt_msg}]
-name = 'AI'
+name = "AI assistant"
resp_cont = resp_cont_tmpl.format(name=name)
default_resp = get_openai_chat_completion(name)
-default_resp_chunk = get_openai_chat_completion_chunk(name)
+default_resp_chunk = get_openai_chat_completion_chunk(name, usage_as_dict=True)
usage = CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202)
@@ -166,6 +164,3 @@ async def test_openai_acompletion(mocker):
assert resp.usage == usage
await llm_general_chat_funcs_test(llm, prompt, messages, resp_cont)
-
-
-
From dcd4c89cc3ed6907d12871630111a09067a94be9 Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 11:13:59 +0800
Subject: [PATCH 090/104] Add files via upload
---
metagpt/utils/cost_manager.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/metagpt/utils/cost_manager.py b/metagpt/utils/cost_manager.py
index 921fbe6cd..b871cef3b 100644
--- a/metagpt/utils/cost_manager.py
+++ b/metagpt/utils/cost_manager.py
@@ -15,7 +15,6 @@ from metagpt.logs import logger
from metagpt.utils.token_counter import FIREWORKS_GRADE_TOKEN_COSTS, TOKEN_COSTS
-
class Costs(NamedTuple):
total_prompt_tokens: int
total_completion_tokens: int
@@ -145,4 +144,4 @@ class FireworksCostManager(CostManager):
logger.info(
f"Total running cost: ${self.total_cost:.4f}"
f"Current cost: ${cost:.4f}, prompt_tokens: {prompt_tokens}, completion_tokens: {completion_tokens}"
- )
\ No newline at end of file
+ )
From ae1bc78af6828948be48497146ce9e6813f8d773 Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 11:14:20 +0800
Subject: [PATCH 091/104] Add files via upload
From 4123feae78c18ea287accf7f82eeca984649feff Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 11:17:06 +0800
Subject: [PATCH 092/104] Delete metagpt/provider/open_llm_api.py
---
metagpt/provider/open_llm_api.py | 36 --------------------------------
1 file changed, 36 deletions(-)
delete mode 100644 metagpt/provider/open_llm_api.py
diff --git a/metagpt/provider/open_llm_api.py b/metagpt/provider/open_llm_api.py
deleted file mode 100644
index 69371e379..000000000
--- a/metagpt/provider/open_llm_api.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# @Desc : self-host open llm model with openai-compatible interface
-
-from openai.types import CompletionUsage
-
-from metagpt.configs.llm_config import LLMConfig, LLMType
-from metagpt.logs import logger
-from metagpt.provider.llm_provider_registry import register_provider
-from metagpt.provider.openai_api import OpenAILLM
-from metagpt.utils.cost_manager import TokenCostManager
-from metagpt.utils.token_counter import count_message_tokens, count_string_tokens
-
-
-@register_provider(LLMType.OPEN_LLM)
-class OpenLLM(OpenAILLM):
- def __init__(self, config: LLMConfig):
- super().__init__(config)
- self._cost_manager = TokenCostManager()
-
- def _make_client_kwargs(self) -> dict:
- kwargs = dict(api_key="sk-xxx", base_url=self.config.base_url)
- return kwargs
-
- def _calc_usage(self, messages: list[dict], rsp: str) -> CompletionUsage:
- usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
- if not self.config.calc_usage:
- return usage
-
- try:
- usage.prompt_tokens = count_message_tokens(messages, "open-llm-model")
- usage.completion_tokens = count_string_tokens(rsp, "open-llm-model")
- except Exception as e:
- logger.error(f"usage calculation failed!: {e}")
-
- return usage
From e2cb117a4ea4d30f0171e1948f96ea2e74c9125c Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 11:17:18 +0800
Subject: [PATCH 093/104] Delete metagpt/provider/fireworks_api.py
---
metagpt/provider/fireworks_api.py | 121 ------------------------------
1 file changed, 121 deletions(-)
delete mode 100644 metagpt/provider/fireworks_api.py
diff --git a/metagpt/provider/fireworks_api.py b/metagpt/provider/fireworks_api.py
deleted file mode 100644
index f356c23c4..000000000
--- a/metagpt/provider/fireworks_api.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# @Desc : fireworks.ai's api
-
-import re
-
-from openai import APIConnectionError, AsyncStream
-from openai.types import CompletionUsage
-from openai.types.chat import ChatCompletionChunk
-from tenacity import (
- after_log,
- retry,
- retry_if_exception_type,
- stop_after_attempt,
- wait_random_exponential,
-)
-
-from metagpt.configs.llm_config import LLMConfig, LLMType
-from metagpt.logs import log_llm_stream, logger
-from metagpt.provider.llm_provider_registry import register_provider
-from metagpt.provider.openai_api import OpenAILLM, log_and_reraise
-from metagpt.utils.cost_manager import CostManager
-
-MODEL_GRADE_TOKEN_COSTS = {
- "-1": {"prompt": 0.0, "completion": 0.0}, # abnormal condition
- "16": {"prompt": 0.2, "completion": 0.8}, # 16 means model size <= 16B; 0.2 means $0.2/1M tokens
- "80": {"prompt": 0.7, "completion": 2.8}, # 80 means 16B < model size <= 80B
- "mixtral-8x7b": {"prompt": 0.4, "completion": 1.6},
-}
-
-
-class FireworksCostManager(CostManager):
- def model_grade_token_costs(self, model: str) -> dict[str, float]:
- def _get_model_size(model: str) -> float:
- size = re.findall(".*-([0-9.]+)b", model)
- size = float(size[0]) if len(size) > 0 else -1
- return size
-
- if "mixtral-8x7b" in model:
- token_costs = MODEL_GRADE_TOKEN_COSTS["mixtral-8x7b"]
- else:
- model_size = _get_model_size(model)
- if 0 < model_size <= 16:
- token_costs = MODEL_GRADE_TOKEN_COSTS["16"]
- elif 16 < model_size <= 80:
- token_costs = MODEL_GRADE_TOKEN_COSTS["80"]
- else:
- token_costs = MODEL_GRADE_TOKEN_COSTS["-1"]
- return token_costs
-
- def update_cost(self, prompt_tokens: int, completion_tokens: int, model: str):
- """
- Refs to `https://app.fireworks.ai/pricing` **Developer pricing**
- Update the total cost, prompt tokens, and completion tokens.
-
- Args:
- prompt_tokens (int): The number of tokens used in the prompt.
- completion_tokens (int): The number of tokens used in the completion.
- model (str): The model used for the API call.
- """
- self.total_prompt_tokens += prompt_tokens
- self.total_completion_tokens += completion_tokens
-
- token_costs = self.model_grade_token_costs(model)
- cost = (prompt_tokens * token_costs["prompt"] + completion_tokens * token_costs["completion"]) / 1000000
- self.total_cost += cost
- logger.info(
- f"Total running cost: ${self.total_cost:.4f}"
- f"Current cost: ${cost:.4f}, prompt_tokens: {prompt_tokens}, completion_tokens: {completion_tokens}"
- )
-
-
-@register_provider(LLMType.FIREWORKS)
-class FireworksLLM(OpenAILLM):
- def __init__(self, config: LLMConfig):
- super().__init__(config=config)
- self.auto_max_tokens = False
- self.cost_manager = FireworksCostManager()
-
- def _make_client_kwargs(self) -> dict:
- kwargs = dict(api_key=self.config.api_key, base_url=self.config.base_url)
- return kwargs
-
- async def _achat_completion_stream(self, messages: list[dict], timeout=3) -> str:
- response: AsyncStream[ChatCompletionChunk] = await self.aclient.chat.completions.create(
- **self._cons_kwargs(messages), stream=True
- )
-
- collected_content = []
- usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
- # iterate through the stream of events
- async for chunk in response:
- if chunk.choices:
- choice = chunk.choices[0]
- choice_delta = choice.delta
- finish_reason = choice.finish_reason if hasattr(choice, "finish_reason") else None
- if choice_delta.content:
- collected_content.append(choice_delta.content)
- log_llm_stream(choice_delta.content)
- if finish_reason:
- # fireworks api return usage when finish_reason is not None
- usage = CompletionUsage(**chunk.usage)
- log_llm_stream("\n")
-
- full_content = "".join(collected_content)
- self._update_costs(usage)
- return full_content
-
- @retry(
- wait=wait_random_exponential(min=1, max=60),
- stop=stop_after_attempt(6),
- after=after_log(logger, logger.level("WARNING").name),
- retry=retry_if_exception_type(APIConnectionError),
- retry_error_callback=log_and_reraise,
- )
- async def acompletion_text(self, messages: list[dict], stream=False, timeout: int = 3) -> str:
- """when streaming, print each token in place."""
- if stream:
- return await self._achat_completion_stream(messages)
- rsp = await self._achat_completion(messages)
- return self.get_choice_text(rsp)
From d3f07759b608529c0422f80bc71d4ec20822f9a2 Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 11:17:38 +0800
Subject: [PATCH 094/104] Delete tests/metagpt/provider/test_open_llm_api.py
---
tests/metagpt/provider/test_open_llm_api.py | 56 ---------------------
1 file changed, 56 deletions(-)
delete mode 100644 tests/metagpt/provider/test_open_llm_api.py
diff --git a/tests/metagpt/provider/test_open_llm_api.py b/tests/metagpt/provider/test_open_llm_api.py
deleted file mode 100644
index aa38b95a6..000000000
--- a/tests/metagpt/provider/test_open_llm_api.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# @Desc :
-
-import pytest
-from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
-from openai.types.completion_usage import CompletionUsage
-
-from metagpt.provider.open_llm_api import OpenLLM
-from metagpt.utils.cost_manager import CostManager, Costs
-from tests.metagpt.provider.mock_llm_config import mock_llm_config
-from tests.metagpt.provider.req_resp_const import (
- get_openai_chat_completion,
- get_openai_chat_completion_chunk,
- llm_general_chat_funcs_test,
- messages,
- prompt,
- resp_cont_tmpl,
-)
-
-name = "llama2-7b"
-resp_cont = resp_cont_tmpl.format(name=name)
-default_resp = get_openai_chat_completion(name)
-
-default_resp_chunk = get_openai_chat_completion_chunk(name)
-
-
-async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs) -> ChatCompletionChunk:
- if stream:
-
- class Iterator(object):
- async def __aiter__(self):
- yield default_resp_chunk
-
- return Iterator()
- else:
- return default_resp
-
-
-@pytest.mark.asyncio
-async def test_openllm_acompletion(mocker):
- mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
-
- openllm_llm = OpenLLM(mock_llm_config)
- openllm_llm.model = "llama-v2-13b-chat"
-
- openllm_llm.cost_manager = CostManager()
- openllm_llm._update_costs(usage=CompletionUsage(prompt_tokens=100, completion_tokens=100, total_tokens=200))
- assert openllm_llm.get_costs() == Costs(
- total_prompt_tokens=100, total_completion_tokens=100, total_cost=0, total_budget=0
- )
-
- resp = await openllm_llm.acompletion(messages)
- assert resp.choices[0].message.content in resp_cont
-
- await llm_general_chat_funcs_test(openllm_llm, prompt, messages, resp_cont)
From c17cfbee44e8e8975c94ed155d376015735fe200 Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 11:17:49 +0800
Subject: [PATCH 095/104] Delete tests/metagpt/provider/test_fireworks_llm.py
---
tests/metagpt/provider/test_fireworks_llm.py | 74 --------------------
1 file changed, 74 deletions(-)
delete mode 100644 tests/metagpt/provider/test_fireworks_llm.py
diff --git a/tests/metagpt/provider/test_fireworks_llm.py b/tests/metagpt/provider/test_fireworks_llm.py
deleted file mode 100644
index 1c1aa9caa..000000000
--- a/tests/metagpt/provider/test_fireworks_llm.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# @Desc : the unittest of fireworks api
-
-import pytest
-from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
-from openai.types.completion_usage import CompletionUsage
-
-from metagpt.provider.fireworks_api import (
- MODEL_GRADE_TOKEN_COSTS,
- FireworksCostManager,
- FireworksLLM,
-)
-from metagpt.utils.cost_manager import Costs
-from tests.metagpt.provider.mock_llm_config import mock_llm_config
-from tests.metagpt.provider.req_resp_const import (
- get_openai_chat_completion,
- get_openai_chat_completion_chunk,
- llm_general_chat_funcs_test,
- messages,
- prompt,
- resp_cont_tmpl,
-)
-
-name = "fireworks"
-resp_cont = resp_cont_tmpl.format(name=name)
-default_resp = get_openai_chat_completion(name)
-default_resp_chunk = get_openai_chat_completion_chunk(name, usage_as_dict=True)
-
-
-def test_fireworks_costmanager():
- cost_manager = FireworksCostManager()
- assert MODEL_GRADE_TOKEN_COSTS["-1"] == cost_manager.model_grade_token_costs("test")
- assert MODEL_GRADE_TOKEN_COSTS["-1"] == cost_manager.model_grade_token_costs("xxx-81b-chat")
- assert MODEL_GRADE_TOKEN_COSTS["16"] == cost_manager.model_grade_token_costs("llama-v2-13b-chat")
- assert MODEL_GRADE_TOKEN_COSTS["16"] == cost_manager.model_grade_token_costs("xxx-15.5b-chat")
- assert MODEL_GRADE_TOKEN_COSTS["16"] == cost_manager.model_grade_token_costs("xxx-16b-chat")
- assert MODEL_GRADE_TOKEN_COSTS["80"] == cost_manager.model_grade_token_costs("xxx-80b-chat")
- assert MODEL_GRADE_TOKEN_COSTS["mixtral-8x7b"] == cost_manager.model_grade_token_costs("mixtral-8x7b-chat")
-
- cost_manager.update_cost(prompt_tokens=500000, completion_tokens=500000, model="llama-v2-13b-chat")
- assert cost_manager.total_cost == 0.5
-
-
-async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs) -> ChatCompletionChunk:
- if stream:
-
- class Iterator(object):
- async def __aiter__(self):
- yield default_resp_chunk
-
- return Iterator()
- else:
- return default_resp
-
-
-@pytest.mark.asyncio
-async def test_fireworks_acompletion(mocker):
- mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
-
- fireworks_llm = FireworksLLM(mock_llm_config)
- fireworks_llm.model = "llama-v2-13b-chat"
-
- fireworks_llm._update_costs(
- usage=CompletionUsage(prompt_tokens=500000, completion_tokens=500000, total_tokens=1000000)
- )
- assert fireworks_llm.get_costs() == Costs(
- total_prompt_tokens=500000, total_completion_tokens=500000, total_cost=0.5, total_budget=0
- )
-
- resp = await fireworks_llm.acompletion(messages)
- assert resp.choices[0].message.content in resp_cont
-
- await llm_general_chat_funcs_test(fireworks_llm, prompt, messages, resp_cont)
From be76b007b1901431773c3c0f4f492e565b69da74 Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 11:20:09 +0800
Subject: [PATCH 096/104] Add files via upload
---
metagpt/provider/llm_provider_registry.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/metagpt/provider/llm_provider_registry.py b/metagpt/provider/llm_provider_registry.py
index d598b678e..4fd2b1978 100644
--- a/metagpt/provider/llm_provider_registry.py
+++ b/metagpt/provider/llm_provider_registry.py
@@ -25,7 +25,7 @@ def register_provider(keys):
"""register provider to registry"""
def decorator(cls):
- if isinstance(keys,list):
+ if isinstance(keys, list):
for key in keys:
LLM_REGISTRY.register(key, cls)
else:
From 1ad134faf475d97a29b9eeafad0d55d9f1280084 Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 11:23:06 +0800
Subject: [PATCH 097/104] Add files via upload
---
metagpt/provider/__init__.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/metagpt/provider/__init__.py b/metagpt/provider/__init__.py
index 38ae12f75..ed49d01c9 100644
--- a/metagpt/provider/__init__.py
+++ b/metagpt/provider/__init__.py
@@ -6,10 +6,8 @@
@File : __init__.py
"""
-from metagpt.provider.fireworks_api import FireworksLLM
from metagpt.provider.google_gemini_api import GeminiLLM
from metagpt.provider.ollama_api import OllamaLLM
-from metagpt.provider.open_llm_api import OpenLLM
from metagpt.provider.openai_api import OpenAILLM
from metagpt.provider.zhipuai_api import ZhiPuAILLM
from metagpt.provider.azure_openai_api import AzureOpenAILLM
From 638c84a9bcb5e78ea0f2c101583b11c9e37afea7 Mon Sep 17 00:00:00 2001
From: YangQianli92 <108046369+YangQianli92@users.noreply.github.com>
Date: Thu, 29 Feb 2024 11:32:13 +0800
Subject: [PATCH 098/104] Update llm_config.py
---
metagpt/configs/llm_config.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/metagpt/configs/llm_config.py b/metagpt/configs/llm_config.py
index 5cca669cf..4ce50c57e 100644
--- a/metagpt/configs/llm_config.py
+++ b/metagpt/configs/llm_config.py
@@ -18,8 +18,6 @@ class LLMType(Enum):
ANTHROPIC = "anthropic"
SPARK = "spark"
ZHIPUAI = "zhipuai"
- FIREWORKS = "fireworks"
- OPEN_LLM = "open_llm"
GEMINI = "gemini"
METAGPT = "metagpt"
AZURE = "azure"
From 379b7f2f2c2c4f374b390bfd441ab3cae07d5e24 Mon Sep 17 00:00:00 2001
From: voidking
Date: Thu, 29 Feb 2024 17:36:16 +0800
Subject: [PATCH 099/104] doc: add docker installation section and fold it
---
README.md | 39 +++++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
diff --git a/README.md b/README.md
index 42718129f..4e2c3280d 100644
--- a/README.md
+++ b/README.md
@@ -97,6 +97,45 @@ ### Usage
detail installation please refer to [cli_install](https://docs.deepwisdom.ai/main/en/guide/get_started/installation.html#install-stable-version)
or [docker_install](https://docs.deepwisdom.ai/main/en/guide/get_started/installation.html#install-with-docker)
+### Docker installation
+⏬ Step 1: Download metagpt image and prepare config2.yaml :: click to expand ::
+