2023-11-20 11:24:46 +08:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
"""
|
|
|
|
|
@Time : 2023/6/8 14:03
|
|
|
|
|
@Author : alexanderwu
|
|
|
|
|
@File : document.py
|
2023-12-15 12:17:26 +08:00
|
|
|
@Desc : Classes and Operations Related to Files in the File System.
|
2023-11-20 11:24:46 +08:00
|
|
|
"""
|
2023-11-27 15:36:50 +08:00
|
|
|
from enum import Enum
|
2023-11-20 11:24:46 +08:00
|
|
|
from pathlib import Path
|
2023-11-28 18:16:50 +08:00
|
|
|
from typing import Optional, Union
|
|
|
|
|
|
2023-11-20 11:24:46 +08:00
|
|
|
import pandas as pd
|
|
|
|
|
from langchain.document_loaders import (
|
|
|
|
|
TextLoader,
|
|
|
|
|
UnstructuredPDFLoader,
|
|
|
|
|
UnstructuredWordDocumentLoader,
|
|
|
|
|
)
|
|
|
|
|
from langchain.text_splitter import CharacterTextSplitter
|
2023-12-26 14:44:09 +08:00
|
|
|
from pydantic import BaseModel, ConfigDict, Field
|
2023-11-20 11:24:46 +08:00
|
|
|
from tqdm import tqdm
|
|
|
|
|
|
2023-11-27 15:36:50 +08:00
|
|
|
from metagpt.repo_parser import RepoParser
|
2023-11-20 11:24:46 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def validate_cols(content_col: str, df: pd.DataFrame):
|
|
|
|
|
if content_col not in df.columns:
|
|
|
|
|
raise ValueError("Content column not found in DataFrame.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def read_data(data_path: Path):
|
|
|
|
|
suffix = data_path.suffix
|
2023-11-28 18:16:50 +08:00
|
|
|
if ".xlsx" == suffix:
|
2023-11-20 11:24:46 +08:00
|
|
|
data = pd.read_excel(data_path)
|
2023-11-28 18:16:50 +08:00
|
|
|
elif ".csv" == suffix:
|
2023-11-20 11:24:46 +08:00
|
|
|
data = pd.read_csv(data_path)
|
2023-11-28 18:16:50 +08:00
|
|
|
elif ".json" == suffix:
|
2023-11-20 11:24:46 +08:00
|
|
|
data = pd.read_json(data_path)
|
2023-11-28 18:16:50 +08:00
|
|
|
elif suffix in (".docx", ".doc"):
|
|
|
|
|
data = UnstructuredWordDocumentLoader(str(data_path), mode="elements").load()
|
|
|
|
|
elif ".txt" == suffix:
|
2023-11-20 11:24:46 +08:00
|
|
|
data = TextLoader(str(data_path)).load()
|
2023-11-28 18:16:50 +08:00
|
|
|
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=256, chunk_overlap=0)
|
2023-11-20 11:24:46 +08:00
|
|
|
texts = text_splitter.split_documents(data)
|
|
|
|
|
data = texts
|
2023-11-28 18:16:50 +08:00
|
|
|
elif ".pdf" == suffix:
|
2023-11-20 11:24:46 +08:00
|
|
|
data = UnstructuredPDFLoader(str(data_path), mode="elements").load()
|
|
|
|
|
else:
|
|
|
|
|
raise NotImplementedError("File format not supported.")
|
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
|
|
2023-11-27 15:36:50 +08:00
|
|
|
class DocumentStatus(Enum):
|
|
|
|
|
"""Indicates document status, a mechanism similar to RFC/PEP"""
|
2023-11-28 18:16:50 +08:00
|
|
|
|
2023-11-27 15:36:50 +08:00
|
|
|
DRAFT = "draft"
|
|
|
|
|
UNDERREVIEW = "underreview"
|
|
|
|
|
APPROVED = "approved"
|
|
|
|
|
DONE = "done"
|
|
|
|
|
|
|
|
|
|
|
2023-11-20 11:24:46 +08:00
|
|
|
class Document(BaseModel):
|
|
|
|
|
"""
|
|
|
|
|
Document: Handles operations related to document files.
|
|
|
|
|
"""
|
2023-11-28 18:16:50 +08:00
|
|
|
|
2023-11-27 15:36:50 +08:00
|
|
|
path: Path = Field(default=None)
|
|
|
|
|
name: str = Field(default="")
|
|
|
|
|
content: str = Field(default="")
|
|
|
|
|
|
|
|
|
|
# metadata? in content perhaps.
|
|
|
|
|
author: str = Field(default="")
|
|
|
|
|
status: DocumentStatus = Field(default=DocumentStatus.DRAFT)
|
|
|
|
|
reviews: list = Field(default_factory=list)
|
2023-11-20 11:24:46 +08:00
|
|
|
|
|
|
|
|
@classmethod
|
2023-11-27 15:36:50 +08:00
|
|
|
def from_path(cls, path: Path):
|
2023-11-20 11:24:46 +08:00
|
|
|
"""
|
|
|
|
|
Create a Document instance from a file path.
|
|
|
|
|
"""
|
2023-11-27 15:36:50 +08:00
|
|
|
if not path.exists():
|
|
|
|
|
raise FileNotFoundError(f"File {path} not found.")
|
|
|
|
|
content = path.read_text()
|
|
|
|
|
return cls(content=content, path=path)
|
2023-11-20 11:24:46 +08:00
|
|
|
|
|
|
|
|
@classmethod
|
2023-11-27 15:36:50 +08:00
|
|
|
def from_text(cls, text: str, path: Optional[Path] = None):
|
2023-11-20 11:24:46 +08:00
|
|
|
"""
|
|
|
|
|
Create a Document from a text string.
|
|
|
|
|
"""
|
2023-11-27 15:36:50 +08:00
|
|
|
return cls(content=text, path=path)
|
2023-11-20 11:24:46 +08:00
|
|
|
|
2023-11-27 15:36:50 +08:00
|
|
|
def to_path(self, path: Optional[Path] = None):
|
2023-11-20 11:24:46 +08:00
|
|
|
"""
|
|
|
|
|
Save content to the specified file path.
|
|
|
|
|
"""
|
2023-11-27 15:36:50 +08:00
|
|
|
if path is not None:
|
|
|
|
|
self.path = path
|
2023-11-20 11:24:46 +08:00
|
|
|
|
2023-11-27 15:36:50 +08:00
|
|
|
if self.path is None:
|
2023-11-20 11:24:46 +08:00
|
|
|
raise ValueError("File path is not set.")
|
|
|
|
|
|
2023-11-27 15:36:50 +08:00
|
|
|
self.path.parent.mkdir(parents=True, exist_ok=True)
|
2024-01-02 21:30:35 +08:00
|
|
|
# TODO: excel, csv, json, etc.
|
2023-11-27 15:36:50 +08:00
|
|
|
self.path.write_text(self.content, encoding="utf-8")
|
2023-11-20 11:24:46 +08:00
|
|
|
|
|
|
|
|
def persist(self):
|
|
|
|
|
"""
|
|
|
|
|
Persist document to disk.
|
|
|
|
|
"""
|
|
|
|
|
return self.to_path()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class IndexableDocument(Document):
|
|
|
|
|
"""
|
|
|
|
|
Advanced document handling: For vector databases or search engines.
|
|
|
|
|
"""
|
2023-11-28 18:16:50 +08:00
|
|
|
|
2023-12-26 14:44:09 +08:00
|
|
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
|
|
|
|
2023-11-20 11:24:46 +08:00
|
|
|
data: Union[pd.DataFrame, list]
|
2023-11-28 18:16:50 +08:00
|
|
|
content_col: Optional[str] = Field(default="")
|
|
|
|
|
meta_col: Optional[str] = Field(default="")
|
2023-11-20 11:24:46 +08:00
|
|
|
|
|
|
|
|
@classmethod
|
2023-11-28 18:16:50 +08:00
|
|
|
def from_path(cls, data_path: Path, content_col="content", meta_col="metadata"):
|
2023-11-20 11:24:46 +08:00
|
|
|
if not data_path.exists():
|
|
|
|
|
raise FileNotFoundError(f"File {data_path} not found.")
|
|
|
|
|
data = read_data(data_path)
|
|
|
|
|
if isinstance(data, pd.DataFrame):
|
|
|
|
|
validate_cols(content_col, data)
|
2024-01-02 21:30:35 +08:00
|
|
|
return cls(data=data, content=str(data), content_col=content_col, meta_col=meta_col)
|
|
|
|
|
else:
|
|
|
|
|
content = data_path.read_text()
|
|
|
|
|
return cls(data=data, content=content, content_col=content_col, meta_col=meta_col)
|
2023-11-20 11:24:46 +08:00
|
|
|
|
|
|
|
|
def _get_docs_and_metadatas_by_df(self) -> (list, list):
|
|
|
|
|
df = self.data
|
|
|
|
|
docs = []
|
|
|
|
|
metadatas = []
|
|
|
|
|
for i in tqdm(range(len(df))):
|
|
|
|
|
docs.append(df[self.content_col].iloc[i])
|
|
|
|
|
if self.meta_col:
|
|
|
|
|
metadatas.append({self.meta_col: df[self.meta_col].iloc[i]})
|
|
|
|
|
else:
|
|
|
|
|
metadatas.append({})
|
|
|
|
|
return docs, metadatas
|
|
|
|
|
|
|
|
|
|
def _get_docs_and_metadatas_by_langchain(self) -> (list, list):
|
|
|
|
|
data = self.data
|
|
|
|
|
docs = [i.page_content for i in data]
|
|
|
|
|
metadatas = [i.metadata for i in data]
|
|
|
|
|
return docs, metadatas
|
|
|
|
|
|
|
|
|
|
def get_docs_and_metadatas(self) -> (list, list):
|
|
|
|
|
if isinstance(self.data, pd.DataFrame):
|
|
|
|
|
return self._get_docs_and_metadatas_by_df()
|
|
|
|
|
elif isinstance(self.data, list):
|
|
|
|
|
return self._get_docs_and_metadatas_by_langchain()
|
|
|
|
|
else:
|
|
|
|
|
raise NotImplementedError("Data type not supported for metadata extraction.")
|
|
|
|
|
|
|
|
|
|
|
2023-11-27 15:36:50 +08:00
|
|
|
class RepoMetadata(BaseModel):
|
|
|
|
|
name: str = Field(default="")
|
|
|
|
|
n_docs: int = Field(default=0)
|
|
|
|
|
n_chars: int = Field(default=0)
|
|
|
|
|
symbols: list = Field(default_factory=list)
|
|
|
|
|
|
|
|
|
|
|
2023-11-20 11:24:46 +08:00
|
|
|
class Repo(BaseModel):
|
|
|
|
|
# Name of this repo.
|
|
|
|
|
name: str = Field(default="")
|
2023-11-27 15:36:50 +08:00
|
|
|
# metadata: RepoMetadata = Field(default=RepoMetadata)
|
2023-11-20 11:24:46 +08:00
|
|
|
docs: dict[Path, Document] = Field(default_factory=dict)
|
|
|
|
|
codes: dict[Path, Document] = Field(default_factory=dict)
|
|
|
|
|
assets: dict[Path, Document] = Field(default_factory=dict)
|
2023-11-27 15:36:50 +08:00
|
|
|
path: Path = Field(default=None)
|
2023-11-20 11:24:46 +08:00
|
|
|
|
|
|
|
|
def _path(self, filename):
|
2023-11-27 15:36:50 +08:00
|
|
|
return self.path / filename
|
2023-11-20 11:24:46 +08:00
|
|
|
|
|
|
|
|
@classmethod
|
2023-11-27 15:36:50 +08:00
|
|
|
def from_path(cls, path: Path):
|
2023-11-20 11:24:46 +08:00
|
|
|
"""Load documents, code, and assets from a repository path."""
|
2023-11-27 15:36:50 +08:00
|
|
|
path.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
repo = Repo(path=path, name=path.name)
|
2023-11-28 18:16:50 +08:00
|
|
|
for file_path in path.rglob("*"):
|
2023-11-27 15:36:50 +08:00
|
|
|
# FIXME: These judgments are difficult to support multiple programming languages and need to be more general
|
|
|
|
|
if file_path.is_file() and file_path.suffix in [".json", ".txt", ".md", ".py", ".js", ".css", ".html"]:
|
2023-11-20 11:24:46 +08:00
|
|
|
repo._set(file_path.read_text(), file_path)
|
|
|
|
|
return repo
|
|
|
|
|
|
|
|
|
|
def to_path(self):
|
|
|
|
|
"""Persist all documents, code, and assets to the given repository path."""
|
|
|
|
|
for doc in self.docs.values():
|
|
|
|
|
doc.to_path()
|
|
|
|
|
for code in self.codes.values():
|
|
|
|
|
code.to_path()
|
|
|
|
|
for asset in self.assets.values():
|
|
|
|
|
asset.to_path()
|
|
|
|
|
|
2023-11-27 15:36:50 +08:00
|
|
|
def _set(self, content: str, path: Path):
|
2023-11-20 11:24:46 +08:00
|
|
|
"""Add a document to the appropriate category based on its file extension."""
|
2023-11-27 15:36:50 +08:00
|
|
|
suffix = path.suffix
|
|
|
|
|
doc = Document(content=content, path=path, name=str(path.relative_to(self.path)))
|
|
|
|
|
|
|
|
|
|
# FIXME: These judgments are difficult to support multiple programming languages and need to be more general
|
2023-11-28 18:16:50 +08:00
|
|
|
if suffix.lower() == ".md":
|
2023-11-27 15:36:50 +08:00
|
|
|
self.docs[path] = doc
|
2023-11-28 18:16:50 +08:00
|
|
|
elif suffix.lower() in [".py", ".js", ".css", ".html"]:
|
2023-11-27 15:36:50 +08:00
|
|
|
self.codes[path] = doc
|
2023-11-20 11:24:46 +08:00
|
|
|
else:
|
2023-11-27 15:36:50 +08:00
|
|
|
self.assets[path] = doc
|
2023-11-20 11:24:46 +08:00
|
|
|
return doc
|
|
|
|
|
|
2024-01-02 21:07:03 +08:00
|
|
|
def set(self, filename: str, content: str):
|
2023-11-20 11:24:46 +08:00
|
|
|
"""Set a document and persist it to disk."""
|
2023-11-27 15:36:50 +08:00
|
|
|
path = self._path(filename)
|
|
|
|
|
doc = self._set(content, path)
|
2023-11-20 11:24:46 +08:00
|
|
|
doc.to_path()
|
|
|
|
|
|
|
|
|
|
def get(self, filename: str) -> Optional[Document]:
|
|
|
|
|
"""Get a document by its filename."""
|
|
|
|
|
path = self._path(filename)
|
|
|
|
|
return self.docs.get(path) or self.codes.get(path) or self.assets.get(path)
|
|
|
|
|
|
2023-11-27 15:36:50 +08:00
|
|
|
def get_text_documents(self) -> list[Document]:
|
|
|
|
|
return list(self.docs.values()) + list(self.codes.values())
|
2023-11-20 11:24:46 +08:00
|
|
|
|
2023-11-27 15:36:50 +08:00
|
|
|
def eda(self) -> RepoMetadata:
|
|
|
|
|
n_docs = sum(len(i) for i in [self.docs, self.codes, self.assets])
|
|
|
|
|
n_chars = sum(sum(len(j.content) for j in i.values()) for i in [self.docs, self.codes, self.assets])
|
|
|
|
|
symbols = RepoParser(base_directory=self.path).generate_symbols()
|
|
|
|
|
return RepoMetadata(name=self.name, n_docs=n_docs, n_chars=n_chars, symbols=symbols)
|