Merge pull request #1515 from voidking/main

删除codecov
This commit is contained in:
Alexander Wu 2024-10-20 15:13:36 +08:00 committed by GitHub
commit ae4e5d3b49
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 25 additions and 39 deletions

View file

@ -79,8 +79,8 @@ jobs:
./tests/data/rsp_cache_new.json
retention-days: 3
if: ${{ always() }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
if: ${{ always() }}
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v3
# env:
# CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
# if: ${{ always() }}

View file

@ -91,8 +91,8 @@ jobs:
./tests/data/rsp_cache_new.json
retention-days: 3
if: ${{ always() }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
if: ${{ always() }}
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v3
# env:
# CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
# if: ${{ always() }}

View file

@ -69,19 +69,9 @@ class CohereProvider(BaseBedrockProvider):
def messages_to_prompt(self, messages: list[dict]) -> str:
if "command-r" in self.model_name:
role_map = {
"user": "USER",
"assistant": "CHATBOT",
"system": "USER"
}
role_map = {"user": "USER", "assistant": "CHATBOT", "system": "USER"}
messages = list(
map(
lambda message: {
"role": role_map[message["role"]],
"message": message["content"]
},
messages
)
map(lambda message: {"role": role_map[message["role"]], "message": message["content"]}, messages)
)
return messages
else:
@ -92,17 +82,9 @@ class CohereProvider(BaseBedrockProvider):
prompt = self.messages_to_prompt(messages)
if "command-r" in self.model_name:
chat_history, message = prompt[:-1], prompt[-1]["message"]
body = json.dumps({
"message": message,
"chat_history": chat_history,
**generate_kwargs
})
body = json.dumps({"message": message, "chat_history": chat_history, **generate_kwargs})
else:
body = json.dumps({
"prompt": prompt,
"stream": kwargs.get("stream", False),
**generate_kwargs
})
body = json.dumps({"prompt": prompt, "stream": kwargs.get("stream", False), **generate_kwargs})
return body
def get_choice_text_from_stream(self, event) -> str:

View file

@ -91,7 +91,7 @@ SUPPORT_STREAM_MODELS = {
# Mistral Large (24.02)
"mistral.mistral-large-2402-v1:0": 8192,
# Mistral Large 2 (24.07)
"mistral.mistral-large-2407-v1:0": 8192
"mistral.mistral-large-2407-v1:0": 8192,
}
# TODO:use a more general function for constructing chat templates.

View file

@ -1,6 +1,6 @@
import os
import asyncio
import json
import os
from functools import partial
from typing import List, Literal

View file

@ -30,7 +30,7 @@ class RAGIndexFactory(ConfigBasedFactory):
BM25IndexConfig: self._create_bm25,
ElasticsearchIndexConfig: self._create_es,
ElasticsearchKeywordIndexConfig: self._create_es,
MilvusIndexConfig: self._create_milvus
MilvusIndexConfig: self._create_milvus,
}
super().__init__(creators)

View file

@ -139,7 +139,9 @@ class RetrieverFactory(ConfigBasedFactory):
@get_or_build_index
def _build_milvus_index(self, config: MilvusRetrieverConfig, **kwargs) -> VectorStoreIndex:
vector_store = MilvusVectorStore(uri=config.uri, collection_name=config.collection_name, token=config.token, dim=config.dimensions)
vector_store = MilvusVectorStore(
uri=config.uri, collection_name=config.collection_name, token=config.token, dim=config.dimensions
)
return self._build_index_from_vector_store(config, vector_store, **kwargs)

View file

@ -14,4 +14,4 @@ class MilvusRetriever(VectorIndexRetriever):
def persist(self, persist_dir: str, **kwargs) -> None:
"""Support persist.
Milvus automatically saves, so there is no need to implement."""
Milvus automatically saves, so there is no need to implement."""

View file

@ -8,7 +8,7 @@ from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.indices.base import BaseIndex
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import VectorStoreQueryMode
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, model_validator, validator
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, model_validator
from metagpt.config2 import config
from metagpt.configs.embedding_config import EmbeddingType
@ -199,6 +199,7 @@ class ChromaIndexConfig(VectorIndexConfig):
default=None, description="Optional metadata to associate with the collection"
)
class MilvusIndexConfig(VectorIndexConfig):
"""Config for milvus-based index."""

BIN
milvus_local.db Normal file

Binary file not shown.

View file

@ -7,7 +7,8 @@ from metagpt.rag.schema import (
ChromaIndexConfig,
ElasticsearchIndexConfig,
ElasticsearchStoreConfig,
FAISSIndexConfig, MilvusIndexConfig,
FAISSIndexConfig,
MilvusIndexConfig,
)