mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-17 15:10:14 +00:00
feat:dbgpt api 0.3.0 (#319)
1.EmbeddingEngine:provide knowledge_embedding() and similar_search() 2.Multi SourceEmbedding 3.doc for installation 4.fix chroma exit bug
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
from pilot.embedding_engine import SourceEmbedding, register
|
||||
from pilot.embedding_engine import EmbeddingEngine, KnowledgeType
|
||||
|
||||
__all__ = ["SourceEmbedding", "register"]
|
||||
__all__ = ["SourceEmbedding", "register", "EmbeddingEngine", "KnowledgeType"]
|
||||
|
@@ -344,7 +344,14 @@ class Database:
|
||||
return [
|
||||
d[0]
|
||||
for d in results
|
||||
if d[0] not in ["information_schema", "performance_schema", "sys", "mysql"]
|
||||
if d[0]
|
||||
not in [
|
||||
"information_schema",
|
||||
"performance_schema",
|
||||
"sys",
|
||||
"mysql",
|
||||
"knowledge_management",
|
||||
]
|
||||
]
|
||||
|
||||
def convert_sql_write_to_select(self, write_sql):
|
||||
@@ -421,7 +428,13 @@ class Database:
|
||||
session = self._db_sessions()
|
||||
cursor = session.execute(text(f"SHOW CREATE TABLE {table_name}"))
|
||||
ans = cursor.fetchall()
|
||||
return ans[0][1]
|
||||
res = ans[0][1]
|
||||
res = re.sub(r"\s*ENGINE\s*=\s*InnoDB\s*", " ", res, flags=re.IGNORECASE)
|
||||
res = re.sub(
|
||||
r"\s*DEFAULT\s*CHARSET\s*=\s*\w+\s*", " ", res, flags=re.IGNORECASE
|
||||
)
|
||||
res = re.sub(r"\s*COLLATE\s*=\s*\w+\s*", " ", res, flags=re.IGNORECASE)
|
||||
return res
|
||||
|
||||
def get_fields(self, table_name):
|
||||
"""Get column fields about specified table."""
|
||||
|
@@ -1,3 +1,5 @@
|
||||
from pilot.embedding_engine.source_embedding import SourceEmbedding, register
|
||||
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
|
||||
from pilot.embedding_engine.knowledge_type import KnowledgeType
|
||||
|
||||
__all__ = ["SourceEmbedding", "register"]
|
||||
__all__ = ["SourceEmbedding", "register", "EmbeddingEngine", "KnowledgeType"]
|
||||
|
@@ -2,6 +2,11 @@ from typing import Dict, List, Optional
|
||||
|
||||
from langchain.document_loaders import CSVLoader
|
||||
from langchain.schema import Document
|
||||
from langchain.text_splitter import (
|
||||
TextSplitter,
|
||||
SpacyTextSplitter,
|
||||
RecursiveCharacterTextSplitter,
|
||||
)
|
||||
|
||||
from pilot.embedding_engine import SourceEmbedding, register
|
||||
|
||||
@@ -13,19 +18,36 @@ class CSVEmbedding(SourceEmbedding):
|
||||
self,
|
||||
file_path,
|
||||
vector_store_config,
|
||||
embedding_args: Optional[Dict] = None,
|
||||
source_reader: Optional = None,
|
||||
text_splitter: Optional[TextSplitter] = None,
|
||||
):
|
||||
"""Initialize with csv path."""
|
||||
super().__init__(file_path, vector_store_config)
|
||||
super().__init__(
|
||||
file_path, vector_store_config, source_reader=None, text_splitter=None
|
||||
)
|
||||
self.file_path = file_path
|
||||
self.vector_store_config = vector_store_config
|
||||
self.embedding_args = embedding_args
|
||||
self.source_reader = source_reader or None
|
||||
self.text_splitter = text_splitter or None
|
||||
|
||||
@register
|
||||
def read(self):
|
||||
"""Load from csv path."""
|
||||
loader = CSVLoader(file_path=self.file_path)
|
||||
return loader.load()
|
||||
if self.source_reader is None:
|
||||
self.source_reader = CSVLoader(self.file_path)
|
||||
if self.text_splitter is None:
|
||||
try:
|
||||
self.text_splitter = SpacyTextSplitter(
|
||||
pipeline="zh_core_web_sm",
|
||||
chunk_size=100,
|
||||
chunk_overlap=100,
|
||||
)
|
||||
except Exception:
|
||||
self.text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=100, chunk_overlap=50
|
||||
)
|
||||
|
||||
return self.source_reader.load_and_split(self.text_splitter)
|
||||
|
||||
@register
|
||||
def data_process(self, documents: List[Document]):
|
||||
|
@@ -2,21 +2,28 @@ from typing import Optional
|
||||
|
||||
from chromadb.errors import NotEnoughElementsException
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
from langchain.text_splitter import TextSplitter
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.embedding_engine.knowledge_type import get_knowledge_embedding, KnowledgeType
|
||||
from pilot.vector_store.connector import VectorStoreConnector
|
||||
|
||||
CFG = Config()
|
||||
|
||||
class EmbeddingEngine:
|
||||
"""EmbeddingEngine provide a chain process include(read->text_split->data_process->index_store) for knowledge document embedding into vector store.
|
||||
1.knowledge_embedding:knowledge document source into vector store.(Chroma, Milvus, Weaviate)
|
||||
2.similar_search: similarity search from vector_store
|
||||
how to use reference:https://db-gpt.readthedocs.io/en/latest/modules/knowledge.html
|
||||
how to integrate:https://db-gpt.readthedocs.io/en/latest/modules/knowledge/pdf/pdf_embedding.html
|
||||
"""
|
||||
|
||||
class KnowledgeEmbedding:
|
||||
def __init__(
|
||||
self,
|
||||
model_name,
|
||||
vector_store_config,
|
||||
knowledge_type: Optional[str] = KnowledgeType.DOCUMENT.value,
|
||||
knowledge_source: Optional[str] = None,
|
||||
source_reader: Optional = None,
|
||||
text_splitter: Optional[TextSplitter] = None,
|
||||
):
|
||||
"""Initialize with knowledge embedding client, model_name, vector_store_config, knowledge_type, knowledge_source"""
|
||||
self.knowledge_source = knowledge_source
|
||||
@@ -25,27 +32,36 @@ class KnowledgeEmbedding:
|
||||
self.knowledge_type = knowledge_type
|
||||
self.embeddings = HuggingFaceEmbeddings(model_name=self.model_name)
|
||||
self.vector_store_config["embeddings"] = self.embeddings
|
||||
self.source_reader = source_reader
|
||||
self.text_splitter = text_splitter
|
||||
|
||||
def knowledge_embedding(self):
|
||||
"""source embedding is chain process.read->text_split->data_process->index_store"""
|
||||
self.knowledge_embedding_client = self.init_knowledge_embedding()
|
||||
self.knowledge_embedding_client.source_embedding()
|
||||
|
||||
def knowledge_embedding_batch(self, docs):
|
||||
"""Deprecation"""
|
||||
# docs = self.knowledge_embedding_client.read_batch()
|
||||
return self.knowledge_embedding_client.index_to_store(docs)
|
||||
|
||||
def read(self):
|
||||
"""Deprecation"""
|
||||
self.knowledge_embedding_client = self.init_knowledge_embedding()
|
||||
return self.knowledge_embedding_client.read_batch()
|
||||
|
||||
def init_knowledge_embedding(self):
|
||||
return get_knowledge_embedding(
|
||||
self.knowledge_type, self.knowledge_source, self.vector_store_config
|
||||
self.knowledge_type,
|
||||
self.knowledge_source,
|
||||
self.vector_store_config,
|
||||
self.source_reader,
|
||||
self.text_splitter,
|
||||
)
|
||||
|
||||
def similar_search(self, text, topk):
|
||||
vector_client = VectorStoreConnector(
|
||||
CFG.VECTOR_STORE_TYPE, self.vector_store_config
|
||||
self.vector_store_config["vector_store_type"], self.vector_store_config
|
||||
)
|
||||
try:
|
||||
ans = vector_client.similar_search(text, topk)
|
||||
@@ -55,12 +71,12 @@ class KnowledgeEmbedding:
|
||||
|
||||
def vector_exist(self):
|
||||
vector_client = VectorStoreConnector(
|
||||
CFG.VECTOR_STORE_TYPE, self.vector_store_config
|
||||
self.vector_store_config["vector_store_type"], self.vector_store_config
|
||||
)
|
||||
return vector_client.vector_name_exists()
|
||||
|
||||
def delete_by_ids(self, ids):
|
||||
vector_client = VectorStoreConnector(
|
||||
CFG.VECTOR_STORE_TYPE, self.vector_store_config
|
||||
self.vector_store_config["vector_store_type"], self.vector_store_config
|
||||
)
|
||||
vector_client.delete_by_ids(ids=ids)
|
@@ -11,6 +11,7 @@ from pilot.embedding_engine.word_embedding import WordEmbedding
|
||||
DocumentEmbeddingType = {
|
||||
".txt": (MarkdownEmbedding, {}),
|
||||
".md": (MarkdownEmbedding, {}),
|
||||
".html": (MarkdownEmbedding, {}),
|
||||
".pdf": (PDFEmbedding, {}),
|
||||
".doc": (WordEmbedding, {}),
|
||||
".docx": (WordEmbedding, {}),
|
||||
@@ -25,10 +26,23 @@ class KnowledgeType(Enum):
|
||||
URL = "URL"
|
||||
TEXT = "TEXT"
|
||||
OSS = "OSS"
|
||||
S3 = "S3"
|
||||
NOTION = "NOTION"
|
||||
MYSQL = "MYSQL"
|
||||
TIDB = "TIDB"
|
||||
CLICKHOUSE = "CLICKHOUSE"
|
||||
OCEANBASE = "OCEANBASE"
|
||||
ELASTICSEARCH = "ELASTICSEARCH"
|
||||
HIVE = "HIVE"
|
||||
PRESTO = "PRESTO"
|
||||
KAFKA = "KAFKA"
|
||||
SPARK = "SPARK"
|
||||
YOUTUBE = "YOUTUBE"
|
||||
|
||||
|
||||
def get_knowledge_embedding(knowledge_type, knowledge_source, vector_store_config):
|
||||
def get_knowledge_embedding(
|
||||
knowledge_type, knowledge_source, vector_store_config, source_reader, text_splitter
|
||||
):
|
||||
match knowledge_type:
|
||||
case KnowledgeType.DOCUMENT.value:
|
||||
extension = "." + knowledge_source.rsplit(".", 1)[-1]
|
||||
@@ -37,6 +51,8 @@ def get_knowledge_embedding(knowledge_type, knowledge_source, vector_store_confi
|
||||
embedding = knowledge_class(
|
||||
knowledge_source,
|
||||
vector_store_config=vector_store_config,
|
||||
source_reader=source_reader,
|
||||
text_splitter=text_splitter,
|
||||
**knowledge_args,
|
||||
)
|
||||
return embedding
|
||||
@@ -45,18 +61,43 @@ def get_knowledge_embedding(knowledge_type, knowledge_source, vector_store_confi
|
||||
embedding = URLEmbedding(
|
||||
file_path=knowledge_source,
|
||||
vector_store_config=vector_store_config,
|
||||
source_reader=source_reader,
|
||||
text_splitter=text_splitter,
|
||||
)
|
||||
return embedding
|
||||
case KnowledgeType.TEXT.value:
|
||||
embedding = StringEmbedding(
|
||||
file_path=knowledge_source,
|
||||
vector_store_config=vector_store_config,
|
||||
source_reader=source_reader,
|
||||
text_splitter=text_splitter,
|
||||
)
|
||||
return embedding
|
||||
case KnowledgeType.OSS.value:
|
||||
raise Exception("OSS have not integrate")
|
||||
case KnowledgeType.S3.value:
|
||||
raise Exception("S3 have not integrate")
|
||||
case KnowledgeType.NOTION.value:
|
||||
raise Exception("NOTION have not integrate")
|
||||
|
||||
case KnowledgeType.MYSQL.value:
|
||||
raise Exception("MYSQL have not integrate")
|
||||
case KnowledgeType.TIDB.value:
|
||||
raise Exception("TIDB have not integrate")
|
||||
case KnowledgeType.CLICKHOUSE.value:
|
||||
raise Exception("CLICKHOUSE have not integrate")
|
||||
case KnowledgeType.OCEANBASE.value:
|
||||
raise Exception("OCEANBASE have not integrate")
|
||||
case KnowledgeType.ELASTICSEARCH.value:
|
||||
raise Exception("ELASTICSEARCH have not integrate")
|
||||
case KnowledgeType.HIVE.value:
|
||||
raise Exception("HIVE have not integrate")
|
||||
case KnowledgeType.PRESTO.value:
|
||||
raise Exception("PRESTO have not integrate")
|
||||
case KnowledgeType.KAFKA.value:
|
||||
raise Exception("KAFKA have not integrate")
|
||||
case KnowledgeType.SPARK.value:
|
||||
raise Exception("SPARK have not integrate")
|
||||
case KnowledgeType.YOUTUBE.value:
|
||||
raise Exception("YOUTUBE have not integrate")
|
||||
case _:
|
||||
raise Exception("unknown knowledge type")
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
import markdown
|
||||
from bs4 import BeautifulSoup
|
||||
@@ -10,48 +10,50 @@ from langchain.text_splitter import (
|
||||
SpacyTextSplitter,
|
||||
CharacterTextSplitter,
|
||||
RecursiveCharacterTextSplitter,
|
||||
TextSplitter,
|
||||
)
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.embedding_engine import SourceEmbedding, register
|
||||
from pilot.embedding_engine.EncodeTextLoader import EncodeTextLoader
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
class MarkdownEmbedding(SourceEmbedding):
|
||||
"""markdown embedding for read markdown document."""
|
||||
|
||||
def __init__(self, file_path, vector_store_config):
|
||||
"""Initialize with markdown path."""
|
||||
super().__init__(file_path, vector_store_config)
|
||||
def __init__(
|
||||
self,
|
||||
file_path,
|
||||
vector_store_config,
|
||||
source_reader: Optional = None,
|
||||
text_splitter: Optional[TextSplitter] = None,
|
||||
):
|
||||
"""Initialize raw text word path."""
|
||||
super().__init__(
|
||||
file_path, vector_store_config, source_reader=None, text_splitter=None
|
||||
)
|
||||
self.file_path = file_path
|
||||
self.vector_store_config = vector_store_config
|
||||
# self.encoding = encoding
|
||||
self.source_reader = source_reader or None
|
||||
self.text_splitter = text_splitter or None
|
||||
|
||||
@register
|
||||
def read(self):
|
||||
"""Load from markdown path."""
|
||||
loader = EncodeTextLoader(self.file_path)
|
||||
|
||||
if CFG.LANGUAGE == "en":
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_overlap=20,
|
||||
length_function=len,
|
||||
)
|
||||
else:
|
||||
if self.source_reader is None:
|
||||
self.source_reader = EncodeTextLoader(self.file_path)
|
||||
if self.text_splitter is None:
|
||||
try:
|
||||
text_splitter = SpacyTextSplitter(
|
||||
self.text_splitter = SpacyTextSplitter(
|
||||
pipeline="zh_core_web_sm",
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_size=100,
|
||||
chunk_overlap=100,
|
||||
)
|
||||
except Exception:
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, chunk_overlap=50
|
||||
self.text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=100, chunk_overlap=50
|
||||
)
|
||||
return loader.load_and_split(text_splitter)
|
||||
|
||||
return self.source_reader.load_and_split(self.text_splitter)
|
||||
|
||||
@register
|
||||
def data_process(self, documents: List[Document]):
|
||||
|
@@ -1,56 +1,55 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain.document_loaders import PyPDFLoader
|
||||
from langchain.schema import Document
|
||||
from langchain.text_splitter import SpacyTextSplitter, RecursiveCharacterTextSplitter
|
||||
from langchain.text_splitter import (
|
||||
SpacyTextSplitter,
|
||||
RecursiveCharacterTextSplitter,
|
||||
TextSplitter,
|
||||
)
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.embedding_engine import SourceEmbedding, register
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
class PDFEmbedding(SourceEmbedding):
|
||||
"""pdf embedding for read pdf document."""
|
||||
|
||||
def __init__(self, file_path, vector_store_config):
|
||||
"""Initialize with pdf path."""
|
||||
super().__init__(file_path, vector_store_config)
|
||||
def __init__(
|
||||
self,
|
||||
file_path,
|
||||
vector_store_config,
|
||||
source_reader: Optional = None,
|
||||
text_splitter: Optional[TextSplitter] = None,
|
||||
):
|
||||
"""Initialize pdf word path."""
|
||||
super().__init__(
|
||||
file_path, vector_store_config, source_reader=None, text_splitter=None
|
||||
)
|
||||
self.file_path = file_path
|
||||
self.vector_store_config = vector_store_config
|
||||
self.source_reader = source_reader or None
|
||||
self.text_splitter = text_splitter or None
|
||||
|
||||
@register
|
||||
def read(self):
|
||||
"""Load from pdf path."""
|
||||
loader = PyPDFLoader(self.file_path)
|
||||
# textsplitter = CHNDocumentSplitter(
|
||||
# pdf=True, sentence_size=CFG.KNOWLEDGE_CHUNK_SIZE
|
||||
# )
|
||||
# textsplitter = SpacyTextSplitter(
|
||||
# pipeline="zh_core_web_sm",
|
||||
# chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
# chunk_overlap=100,
|
||||
# )
|
||||
if CFG.LANGUAGE == "en":
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_overlap=20,
|
||||
length_function=len,
|
||||
)
|
||||
else:
|
||||
if self.source_reader is None:
|
||||
self.source_reader = PyPDFLoader(self.file_path)
|
||||
if self.text_splitter is None:
|
||||
try:
|
||||
text_splitter = SpacyTextSplitter(
|
||||
self.text_splitter = SpacyTextSplitter(
|
||||
pipeline="zh_core_web_sm",
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_size=100,
|
||||
chunk_overlap=100,
|
||||
)
|
||||
except Exception:
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, chunk_overlap=50
|
||||
self.text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=100, chunk_overlap=50
|
||||
)
|
||||
return loader.load_and_split(text_splitter)
|
||||
|
||||
return self.source_reader.load_and_split(self.text_splitter)
|
||||
|
||||
@register
|
||||
def data_process(self, documents: List[Document]):
|
||||
|
@@ -1,54 +1,55 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain.document_loaders import UnstructuredPowerPointLoader
|
||||
from langchain.schema import Document
|
||||
from langchain.text_splitter import SpacyTextSplitter, RecursiveCharacterTextSplitter
|
||||
from langchain.text_splitter import (
|
||||
SpacyTextSplitter,
|
||||
RecursiveCharacterTextSplitter,
|
||||
TextSplitter,
|
||||
)
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.embedding_engine import SourceEmbedding, register
|
||||
from pilot.embedding_engine.chn_document_splitter import CHNDocumentSplitter
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
class PPTEmbedding(SourceEmbedding):
|
||||
"""ppt embedding for read ppt document."""
|
||||
|
||||
def __init__(self, file_path, vector_store_config):
|
||||
"""Initialize with pdf path."""
|
||||
super().__init__(file_path, vector_store_config)
|
||||
def __init__(
|
||||
self,
|
||||
file_path,
|
||||
vector_store_config,
|
||||
source_reader: Optional = None,
|
||||
text_splitter: Optional[TextSplitter] = None,
|
||||
):
|
||||
"""Initialize ppt word path."""
|
||||
super().__init__(
|
||||
file_path, vector_store_config, source_reader=None, text_splitter=None
|
||||
)
|
||||
self.file_path = file_path
|
||||
self.vector_store_config = vector_store_config
|
||||
self.source_reader = source_reader or None
|
||||
self.text_splitter = text_splitter or None
|
||||
|
||||
@register
|
||||
def read(self):
|
||||
"""Load from ppt path."""
|
||||
loader = UnstructuredPowerPointLoader(self.file_path)
|
||||
# textsplitter = SpacyTextSplitter(
|
||||
# pipeline="zh_core_web_sm",
|
||||
# chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
# chunk_overlap=200,
|
||||
# )
|
||||
if CFG.LANGUAGE == "en":
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_overlap=20,
|
||||
length_function=len,
|
||||
)
|
||||
else:
|
||||
if self.source_reader is None:
|
||||
self.source_reader = UnstructuredPowerPointLoader(self.file_path)
|
||||
if self.text_splitter is None:
|
||||
try:
|
||||
text_splitter = SpacyTextSplitter(
|
||||
self.text_splitter = SpacyTextSplitter(
|
||||
pipeline="zh_core_web_sm",
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_size=100,
|
||||
chunk_overlap=100,
|
||||
)
|
||||
except Exception:
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, chunk_overlap=50
|
||||
self.text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=100, chunk_overlap=50
|
||||
)
|
||||
return loader.load_and_split(text_splitter)
|
||||
|
||||
return self.source_reader.load_and_split(self.text_splitter)
|
||||
|
||||
@register
|
||||
def data_process(self, documents: List[Document]):
|
||||
|
@@ -4,11 +4,11 @@ from abc import ABC, abstractmethod
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from chromadb.errors import NotEnoughElementsException
|
||||
from pilot.configs.config import Config
|
||||
from langchain.text_splitter import TextSplitter
|
||||
|
||||
from pilot.vector_store.connector import VectorStoreConnector
|
||||
|
||||
registered_methods = []
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def register(method):
|
||||
@@ -25,12 +25,16 @@ class SourceEmbedding(ABC):
|
||||
def __init__(
|
||||
self,
|
||||
file_path,
|
||||
vector_store_config,
|
||||
vector_store_config: {},
|
||||
source_reader: Optional = None,
|
||||
text_splitter: Optional[TextSplitter] = None,
|
||||
embedding_args: Optional[Dict] = None,
|
||||
):
|
||||
"""Initialize with Loader url, model_name, vector_store_config"""
|
||||
self.file_path = file_path
|
||||
self.vector_store_config = vector_store_config
|
||||
self.source_reader = source_reader or None
|
||||
self.text_splitter = text_splitter or None
|
||||
self.embedding_args = embedding_args
|
||||
self.embeddings = vector_store_config["embeddings"]
|
||||
|
||||
@@ -44,8 +48,8 @@ class SourceEmbedding(ABC):
|
||||
"""pre process data."""
|
||||
|
||||
@register
|
||||
def text_split(self, text):
|
||||
"""text split chunk"""
|
||||
def text_splitter(self, text_splitter: TextSplitter):
|
||||
"""add text split chunk"""
|
||||
pass
|
||||
|
||||
@register
|
||||
@@ -57,7 +61,7 @@ class SourceEmbedding(ABC):
|
||||
def index_to_store(self, docs):
|
||||
"""index to vector store"""
|
||||
self.vector_client = VectorStoreConnector(
|
||||
CFG.VECTOR_STORE_TYPE, self.vector_store_config
|
||||
self.vector_store_config["vector_store_type"], self.vector_store_config
|
||||
)
|
||||
return self.vector_client.load_document(docs)
|
||||
|
||||
@@ -65,7 +69,7 @@ class SourceEmbedding(ABC):
|
||||
def similar_search(self, doc, topk):
|
||||
"""vector store similarity_search"""
|
||||
self.vector_client = VectorStoreConnector(
|
||||
CFG.VECTOR_STORE_TYPE, self.vector_store_config
|
||||
self.vector_store_config["vector_store_type"], self.vector_store_config
|
||||
)
|
||||
try:
|
||||
ans = self.vector_client.similar_search(doc, topk)
|
||||
@@ -75,7 +79,7 @@ class SourceEmbedding(ABC):
|
||||
|
||||
def vector_name_exist(self):
|
||||
self.vector_client = VectorStoreConnector(
|
||||
CFG.VECTOR_STORE_TYPE, self.vector_store_config
|
||||
self.vector_store_config["vector_store_type"], self.vector_store_config
|
||||
)
|
||||
return self.vector_client.vector_name_exists()
|
||||
|
||||
|
@@ -1,24 +1,55 @@
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain.schema import Document
|
||||
from langchain.text_splitter import (
|
||||
TextSplitter,
|
||||
SpacyTextSplitter,
|
||||
RecursiveCharacterTextSplitter,
|
||||
)
|
||||
|
||||
from pilot import SourceEmbedding, register
|
||||
from pilot.embedding_engine import SourceEmbedding, register
|
||||
|
||||
|
||||
class StringEmbedding(SourceEmbedding):
|
||||
"""string embedding for read string document."""
|
||||
|
||||
def __init__(self, file_path, vector_store_config):
|
||||
"""Initialize with pdf path."""
|
||||
super().__init__(file_path, vector_store_config)
|
||||
def __init__(
|
||||
self,
|
||||
file_path,
|
||||
vector_store_config,
|
||||
source_reader: Optional = None,
|
||||
text_splitter: Optional[TextSplitter] = None,
|
||||
):
|
||||
"""Initialize raw text word path."""
|
||||
super().__init__(
|
||||
file_path=file_path,
|
||||
vector_store_config=vector_store_config,
|
||||
source_reader=None,
|
||||
text_splitter=None,
|
||||
)
|
||||
self.file_path = file_path
|
||||
self.vector_store_config = vector_store_config
|
||||
self.source_reader = source_reader or None
|
||||
self.text_splitter = text_splitter or None
|
||||
|
||||
@register
|
||||
def read(self):
|
||||
"""Load from String path."""
|
||||
metadata = {"source": "db_summary"}
|
||||
return [Document(page_content=self.file_path, metadata=metadata)]
|
||||
metadata = {"source": "raw text"}
|
||||
docs = [Document(page_content=self.file_path, metadata=metadata)]
|
||||
if self.text_splitter is None:
|
||||
try:
|
||||
self.text_splitter = SpacyTextSplitter(
|
||||
pipeline="zh_core_web_sm",
|
||||
chunk_size=500,
|
||||
chunk_overlap=100,
|
||||
)
|
||||
except Exception:
|
||||
self.text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=100, chunk_overlap=50
|
||||
)
|
||||
return self.text_splitter.split_documents(docs)
|
||||
return docs
|
||||
|
||||
@register
|
||||
def data_process(self, documents: List[Document]):
|
||||
|
@@ -1,49 +1,54 @@
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from langchain.document_loaders import WebBaseLoader
|
||||
from langchain.schema import Document
|
||||
from langchain.text_splitter import SpacyTextSplitter, RecursiveCharacterTextSplitter
|
||||
from langchain.text_splitter import (
|
||||
SpacyTextSplitter,
|
||||
RecursiveCharacterTextSplitter,
|
||||
TextSplitter,
|
||||
)
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.configs.model_config import KNOWLEDGE_CHUNK_SPLIT_SIZE
|
||||
from pilot.embedding_engine import SourceEmbedding, register
|
||||
from pilot.embedding_engine.chn_document_splitter import CHNDocumentSplitter
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
class URLEmbedding(SourceEmbedding):
|
||||
"""url embedding for read url document."""
|
||||
|
||||
def __init__(self, file_path, vector_store_config):
|
||||
"""Initialize with url path."""
|
||||
super().__init__(file_path, vector_store_config)
|
||||
def __init__(
|
||||
self,
|
||||
file_path,
|
||||
vector_store_config,
|
||||
source_reader: Optional = None,
|
||||
text_splitter: Optional[TextSplitter] = None,
|
||||
):
|
||||
"""Initialize url word path."""
|
||||
super().__init__(
|
||||
file_path, vector_store_config, source_reader=None, text_splitter=None
|
||||
)
|
||||
self.file_path = file_path
|
||||
self.vector_store_config = vector_store_config
|
||||
self.source_reader = source_reader or None
|
||||
self.text_splitter = text_splitter or None
|
||||
|
||||
@register
|
||||
def read(self):
|
||||
"""Load from url path."""
|
||||
loader = WebBaseLoader(web_path=self.file_path)
|
||||
if CFG.LANGUAGE == "en":
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_overlap=20,
|
||||
length_function=len,
|
||||
)
|
||||
else:
|
||||
if self.source_reader is None:
|
||||
self.source_reader = WebBaseLoader(web_path=self.file_path)
|
||||
if self.text_splitter is None:
|
||||
try:
|
||||
text_splitter = SpacyTextSplitter(
|
||||
self.text_splitter = SpacyTextSplitter(
|
||||
pipeline="zh_core_web_sm",
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_size=100,
|
||||
chunk_overlap=100,
|
||||
)
|
||||
except Exception:
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, chunk_overlap=50
|
||||
self.text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=100, chunk_overlap=50
|
||||
)
|
||||
return loader.load_and_split(text_splitter)
|
||||
|
||||
return self.source_reader.load_and_split(self.text_splitter)
|
||||
|
||||
@register
|
||||
def data_process(self, documents: List[Document]):
|
||||
|
@@ -1,48 +1,55 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain.document_loaders import UnstructuredWordDocumentLoader
|
||||
from langchain.schema import Document
|
||||
from langchain.text_splitter import SpacyTextSplitter, RecursiveCharacterTextSplitter
|
||||
from langchain.text_splitter import (
|
||||
SpacyTextSplitter,
|
||||
RecursiveCharacterTextSplitter,
|
||||
TextSplitter,
|
||||
)
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.embedding_engine import SourceEmbedding, register
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
class WordEmbedding(SourceEmbedding):
|
||||
"""word embedding for read word document."""
|
||||
|
||||
def __init__(self, file_path, vector_store_config):
|
||||
def __init__(
|
||||
self,
|
||||
file_path,
|
||||
vector_store_config,
|
||||
source_reader: Optional = None,
|
||||
text_splitter: Optional[TextSplitter] = None,
|
||||
):
|
||||
"""Initialize with word path."""
|
||||
super().__init__(file_path, vector_store_config)
|
||||
super().__init__(
|
||||
file_path, vector_store_config, source_reader=None, text_splitter=None
|
||||
)
|
||||
self.file_path = file_path
|
||||
self.vector_store_config = vector_store_config
|
||||
self.source_reader = source_reader or None
|
||||
self.text_splitter = text_splitter or None
|
||||
|
||||
@register
|
||||
def read(self):
|
||||
"""Load from word path."""
|
||||
loader = UnstructuredWordDocumentLoader(self.file_path)
|
||||
if CFG.LANGUAGE == "en":
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_overlap=20,
|
||||
length_function=len,
|
||||
)
|
||||
else:
|
||||
if self.source_reader is None:
|
||||
self.source_reader = UnstructuredWordDocumentLoader(self.file_path)
|
||||
if self.text_splitter is None:
|
||||
try:
|
||||
text_splitter = SpacyTextSplitter(
|
||||
self.text_splitter = SpacyTextSplitter(
|
||||
pipeline="zh_core_web_sm",
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_size=100,
|
||||
chunk_overlap=100,
|
||||
)
|
||||
except Exception:
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, chunk_overlap=50
|
||||
self.text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=100, chunk_overlap=50
|
||||
)
|
||||
return loader.load_and_split(text_splitter)
|
||||
|
||||
return self.source_reader.load_and_split(self.text_splitter)
|
||||
|
||||
@register
|
||||
def data_process(self, documents: List[Document]):
|
||||
|
@@ -50,7 +50,7 @@ prompt = PromptTemplate(
|
||||
output_parser=DbChatOutputParser(
|
||||
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
|
||||
),
|
||||
example_selector=sql_data_example,
|
||||
# example_selector=sql_data_example,
|
||||
temperature=PROMPT_TEMPERATURE,
|
||||
)
|
||||
CFG.prompt_templates.update({prompt.template_scene: prompt})
|
||||
|
@@ -17,7 +17,7 @@ from pilot.configs.model_config import (
|
||||
)
|
||||
|
||||
from pilot.scene.chat_knowledge.custom.prompt import prompt
|
||||
from pilot.embedding_engine.knowledge_embedding import KnowledgeEmbedding
|
||||
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -37,10 +37,10 @@ class ChatNewKnowledge(BaseChat):
|
||||
self.knowledge_name = knowledge_name
|
||||
vector_store_config = {
|
||||
"vector_store_name": knowledge_name,
|
||||
"text_field": "content",
|
||||
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
}
|
||||
self.knowledge_embedding_client = KnowledgeEmbedding(
|
||||
self.knowledge_embedding_client = EmbeddingEngine(
|
||||
model_name=LLM_MODEL_CONFIG["text2vec"],
|
||||
vector_store_config=vector_store_config,
|
||||
)
|
||||
|
@@ -19,7 +19,7 @@ from pilot.configs.model_config import (
|
||||
)
|
||||
|
||||
from pilot.scene.chat_knowledge.default.prompt import prompt
|
||||
from pilot.embedding_engine.knowledge_embedding import KnowledgeEmbedding
|
||||
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -38,9 +38,10 @@ class ChatDefaultKnowledge(BaseChat):
|
||||
)
|
||||
vector_store_config = {
|
||||
"vector_store_name": "default",
|
||||
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
}
|
||||
self.knowledge_embedding_client = KnowledgeEmbedding(
|
||||
self.knowledge_embedding_client = EmbeddingEngine(
|
||||
model_name=LLM_MODEL_CONFIG["text2vec"],
|
||||
vector_store_config=vector_store_config,
|
||||
)
|
||||
|
@@ -18,7 +18,7 @@ from pilot.configs.model_config import (
|
||||
)
|
||||
|
||||
from pilot.scene.chat_knowledge.url.prompt import prompt
|
||||
from pilot.embedding_engine.knowledge_embedding import KnowledgeEmbedding
|
||||
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -38,9 +38,10 @@ class ChatUrlKnowledge(BaseChat):
|
||||
self.url = url
|
||||
vector_store_config = {
|
||||
"vector_store_name": url.replace(":", ""),
|
||||
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
}
|
||||
self.knowledge_embedding_client = KnowledgeEmbedding(
|
||||
self.knowledge_embedding_client = EmbeddingEngine(
|
||||
model_name=LLM_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
|
||||
vector_store_config=vector_store_config,
|
||||
knowledge_type=KnowledgeType.URL.value,
|
||||
|
@@ -19,7 +19,7 @@ from pilot.configs.model_config import (
|
||||
)
|
||||
|
||||
from pilot.scene.chat_knowledge.v1.prompt import prompt
|
||||
from pilot.embedding_engine.knowledge_embedding import KnowledgeEmbedding
|
||||
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -38,9 +38,10 @@ class ChatKnowledge(BaseChat):
|
||||
)
|
||||
vector_store_config = {
|
||||
"vector_store_name": knowledge_space,
|
||||
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
}
|
||||
self.knowledge_embedding_client = KnowledgeEmbedding(
|
||||
self.knowledge_embedding_client = EmbeddingEngine(
|
||||
model_name=LLM_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
|
||||
vector_store_config=vector_store_config,
|
||||
)
|
||||
|
@@ -1,3 +1,4 @@
|
||||
import atexit
|
||||
import traceback
|
||||
import os
|
||||
import shutil
|
||||
@@ -36,7 +37,7 @@ CFG = Config()
|
||||
logger = build_logger("webserver", LOGDIR + "webserver.log")
|
||||
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
def signal_handler():
|
||||
print("in order to avoid chroma db atexit problem")
|
||||
os._exit(0)
|
||||
|
||||
@@ -96,7 +97,6 @@ if __name__ == "__main__":
|
||||
action="store_true",
|
||||
help="enable light mode",
|
||||
)
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
# init server config
|
||||
args = parser.parse_args()
|
||||
@@ -114,3 +114,4 @@ if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=args.port)
|
||||
signal.signal(signal.SIGINT, signal_handler())
|
||||
|
@@ -10,7 +10,7 @@ from pilot.configs.config import Config
|
||||
from pilot.configs.model_config import LLM_MODEL_CONFIG, KNOWLEDGE_UPLOAD_ROOT_PATH
|
||||
|
||||
from pilot.openapi.api_v1.api_view_model import Result
|
||||
from pilot.embedding_engine.knowledge_embedding import KnowledgeEmbedding
|
||||
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
|
||||
|
||||
from pilot.server.knowledge.service import KnowledgeService
|
||||
from pilot.server.knowledge.request.request import (
|
||||
@@ -143,7 +143,7 @@ def document_list(space_name: str, query_request: ChunkQueryRequest):
|
||||
@router.post("/knowledge/{vector_name}/query")
|
||||
def similar_query(space_name: str, query_request: KnowledgeQueryRequest):
|
||||
print(f"Received params: {space_name}, {query_request}")
|
||||
client = KnowledgeEmbedding(
|
||||
client = EmbeddingEngine(
|
||||
model_name=embeddings, vector_store_config={"vector_store_name": space_name}
|
||||
)
|
||||
docs = client.similar_search(query_request.query, query_request.top_k)
|
||||
|
@@ -1,9 +1,11 @@
|
||||
import threading
|
||||
from datetime import datetime
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter, SpacyTextSplitter
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.configs.model_config import LLM_MODEL_CONFIG
|
||||
from pilot.embedding_engine.knowledge_embedding import KnowledgeEmbedding
|
||||
from pilot.configs.model_config import LLM_MODEL_CONFIG, KNOWLEDGE_UPLOAD_ROOT_PATH
|
||||
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
|
||||
from pilot.logs import logger
|
||||
from pilot.server.knowledge.chunk_db import (
|
||||
DocumentChunkEntity,
|
||||
@@ -122,13 +124,34 @@ class KnowledgeService:
|
||||
raise Exception(
|
||||
f" doc:{doc.doc_name} status is {doc.status}, can not sync"
|
||||
)
|
||||
client = KnowledgeEmbedding(
|
||||
|
||||
if CFG.LANGUAGE == "en":
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_overlap=20,
|
||||
length_function=len,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
text_splitter = SpacyTextSplitter(
|
||||
pipeline="zh_core_web_sm",
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
|
||||
chunk_overlap=100,
|
||||
)
|
||||
except Exception:
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, chunk_overlap=50
|
||||
)
|
||||
client = EmbeddingEngine(
|
||||
knowledge_source=doc.content,
|
||||
knowledge_type=doc.doc_type.upper(),
|
||||
model_name=LLM_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
|
||||
vector_store_config={
|
||||
"vector_store_name": space_name,
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
},
|
||||
text_splitter=text_splitter,
|
||||
)
|
||||
chunk_docs = client.read()
|
||||
# update document status
|
||||
|
@@ -37,7 +37,7 @@ from pilot.conversation import (
|
||||
|
||||
from pilot.server.gradio_css import code_highlight_css
|
||||
from pilot.server.gradio_patch import Chatbot as grChatbot
|
||||
from pilot.embedding_engine.knowledge_embedding import KnowledgeEmbedding
|
||||
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
|
||||
from pilot.utils import build_logger
|
||||
from pilot.vector_store.extract_tovec import (
|
||||
get_vector_storelist,
|
||||
@@ -659,13 +659,14 @@ def knowledge_embedding_store(vs_id, files):
|
||||
shutil.move(
|
||||
file.name, os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename)
|
||||
)
|
||||
knowledge_embedding_client = KnowledgeEmbedding(
|
||||
knowledge_embedding_client = EmbeddingEngine(
|
||||
knowledge_source=os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename),
|
||||
knowledge_type=KnowledgeType.DOCUMENT.value,
|
||||
model_name=LLM_MODEL_CONFIG["text2vec"],
|
||||
vector_store_config={
|
||||
"vector_store_name": vector_store_name["vs_name"],
|
||||
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
},
|
||||
)
|
||||
knowledge_embedding_client.knowledge_embedding()
|
||||
|
@@ -4,10 +4,10 @@ import uuid
|
||||
from langchain.embeddings import HuggingFaceEmbeddings, logger
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.configs.model_config import LLM_MODEL_CONFIG
|
||||
from pilot.configs.model_config import LLM_MODEL_CONFIG, KNOWLEDGE_UPLOAD_ROOT_PATH
|
||||
from pilot.scene.base import ChatScene
|
||||
from pilot.scene.base_chat import BaseChat
|
||||
from pilot.embedding_engine.knowledge_embedding import KnowledgeEmbedding
|
||||
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
|
||||
from pilot.embedding_engine.string_embedding import StringEmbedding
|
||||
from pilot.summary.mysql_db_summary import MysqlSummary
|
||||
from pilot.scene.chat_factory import ChatFactory
|
||||
@@ -33,6 +33,8 @@ class DBSummaryClient:
|
||||
)
|
||||
vector_store_config = {
|
||||
"vector_store_name": dbname + "_summary",
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
"embeddings": embeddings,
|
||||
}
|
||||
embedding = StringEmbedding(
|
||||
@@ -60,6 +62,8 @@ class DBSummaryClient:
|
||||
) in db_summary_client.get_table_summary().items():
|
||||
table_vector_store_config = {
|
||||
"vector_store_name": dbname + "_" + table_name + "_ts",
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
"embeddings": embeddings,
|
||||
}
|
||||
embedding = StringEmbedding(
|
||||
@@ -73,8 +77,10 @@ class DBSummaryClient:
|
||||
def get_db_summary(self, dbname, query, topk):
|
||||
vector_store_config = {
|
||||
"vector_store_name": dbname + "_profile",
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
}
|
||||
knowledge_embedding_client = KnowledgeEmbedding(
|
||||
knowledge_embedding_client = EmbeddingEngine(
|
||||
model_name=LLM_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
|
||||
vector_store_config=vector_store_config,
|
||||
)
|
||||
@@ -86,8 +92,11 @@ class DBSummaryClient:
|
||||
"""get user query related tables info"""
|
||||
vector_store_config = {
|
||||
"vector_store_name": dbname + "_summary",
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
}
|
||||
knowledge_embedding_client = KnowledgeEmbedding(
|
||||
knowledge_embedding_client = EmbeddingEngine(
|
||||
model_name=LLM_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
|
||||
vector_store_config=vector_store_config,
|
||||
)
|
||||
@@ -109,9 +118,11 @@ class DBSummaryClient:
|
||||
for table in related_tables:
|
||||
vector_store_config = {
|
||||
"vector_store_name": dbname + "_" + table + "_ts",
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
}
|
||||
knowledge_embedding_client = KnowledgeEmbedding(
|
||||
file_path="",
|
||||
knowledge_embedding_client = EmbeddingEngine(
|
||||
model_name=LLM_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
|
||||
vector_store_config=vector_store_config,
|
||||
)
|
||||
@@ -128,6 +139,8 @@ class DBSummaryClient:
|
||||
def init_db_profile(self, db_summary_client, dbname, embeddings):
|
||||
profile_store_config = {
|
||||
"vector_store_name": dbname + "_profile",
|
||||
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
"vector_store_type": CFG.VECTOR_STORE_TYPE,
|
||||
"embeddings": embeddings,
|
||||
}
|
||||
embedding = StringEmbedding(
|
||||
|
@@ -1,7 +1,6 @@
|
||||
import os
|
||||
|
||||
from langchain.vectorstores import Chroma
|
||||
from pilot.configs.model_config import KNOWLEDGE_UPLOAD_ROOT_PATH
|
||||
from pilot.logs import logger
|
||||
from pilot.vector_store.vector_store_base import VectorStoreBase
|
||||
|
||||
@@ -13,7 +12,7 @@ class ChromaStore(VectorStoreBase):
|
||||
self.ctx = ctx
|
||||
self.embeddings = ctx["embeddings"]
|
||||
self.persist_dir = os.path.join(
|
||||
KNOWLEDGE_UPLOAD_ROOT_PATH, ctx["vector_store_name"] + ".vectordb"
|
||||
ctx["chroma_persist_path"], ctx["vector_store_name"] + ".vectordb"
|
||||
)
|
||||
self.vector_store_client = Chroma(
|
||||
persist_directory=self.persist_dir, embedding_function=self.embeddings
|
||||
|
@@ -1,12 +1,18 @@
|
||||
from pilot.vector_store.chroma_store import ChromaStore
|
||||
|
||||
# from pilot.vector_store.milvus_store import MilvusStore
|
||||
from pilot.vector_store.milvus_store import MilvusStore
|
||||
|
||||
connector = {"Chroma": ChromaStore, "Milvus": None}
|
||||
connector = {"Chroma": ChromaStore, "Milvus": MilvusStore}
|
||||
|
||||
|
||||
class VectorStoreConnector:
|
||||
"""vector store connector, can connect different vector db provided load document api_v1 and similar search api_v1."""
|
||||
"""VectorStoreConnector, can connect different vector db provided load document api_v1 and similar search api_v1.
|
||||
1.load_document:knowledge document source into vector store.(Chroma, Milvus, Weaviate)
|
||||
2.similar_search: similarity search from vector_store
|
||||
how to use reference:https://db-gpt.readthedocs.io/en/latest/modules/vector.html
|
||||
how to integrate:https://db-gpt.readthedocs.io/en/latest/modules/vector/milvus/milvus.html
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, vector_store_type, ctx: {}) -> None:
|
||||
"""initialize vector store connector."""
|
||||
|
@@ -3,13 +3,9 @@ from typing import Any, Iterable, List, Optional, Tuple
|
||||
from langchain.docstore.document import Document
|
||||
from pymilvus import Collection, DataType, connections, utility
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.vector_store.vector_store_base import VectorStoreBase
|
||||
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
class MilvusStore(VectorStoreBase):
|
||||
"""Milvus database"""
|
||||
|
||||
@@ -22,10 +18,10 @@ class MilvusStore(VectorStoreBase):
|
||||
# self.configure(cfg)
|
||||
|
||||
connect_kwargs = {}
|
||||
self.uri = CFG.MILVUS_URL
|
||||
self.port = CFG.MILVUS_PORT
|
||||
self.username = CFG.MILVUS_USERNAME
|
||||
self.password = CFG.MILVUS_PASSWORD
|
||||
self.uri = ctx.get("milvus_url", None)
|
||||
self.port = ctx.get("milvus_port", None)
|
||||
self.username = ctx.get("milvus_username", None)
|
||||
self.password = ctx.get("milvus_password", None)
|
||||
self.collection_name = ctx.get("vector_store_name", None)
|
||||
self.secure = ctx.get("secure", None)
|
||||
self.embedding = ctx.get("embeddings", None)
|
||||
|
Reference in New Issue
Block a user