diff --git a/docs/modules/knowledge/markdown/markdown_embedding.md b/docs/modules/knowledge/markdown/markdown_embedding.md index dfb42c7ff..72a5d2c38 100644 --- a/docs/modules/knowledge/markdown/markdown_embedding.md +++ b/docs/modules/knowledge/markdown/markdown_embedding.md @@ -6,13 +6,14 @@ inheriting the SourceEmbedding ``` class MarkdownEmbedding(SourceEmbedding): - """pdf embedding for read pdf document.""" + """pdf embedding for read markdown document.""" - def __init__(self, file_path, vector_store_config): - """Initialize with pdf path.""" - super().__init__(file_path, vector_store_config) + def __init__(self, file_path, vector_store_config, text_splitter): + """Initialize with markdown path.""" + super().__init__(file_path, vector_store_config, text_splitter) self.file_path = file_path self.vector_store_config = vector_store_config + self.text_splitter = text_splitter or Nore ``` implement read() and data_process() read() method allows you to read data and split data into chunk @@ -22,12 +23,19 @@ read() method allows you to read data and split data into chunk def read(self): """Load from markdown path.""" loader = EncodeTextLoader(self.file_path) - textsplitter = SpacyTextSplitter( - pipeline="zh_core_web_sm", - chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, - chunk_overlap=100, - ) - return loader.load_and_split(textsplitter) + if self.text_splitter is None: + try: + self.text_splitter = SpacyTextSplitter( + pipeline="zh_core_web_sm", + chunk_size=100, + chunk_overlap=100, + ) + except Exception: + self.text_splitter = RecursiveCharacterTextSplitter( + chunk_size=100, chunk_overlap=50 + ) + + return loader.load_and_split(self.text_splitter) ``` data_process() method allows you to pre processing your ways diff --git a/docs/modules/knowledge/pdf/pdf_embedding.md b/docs/modules/knowledge/pdf/pdf_embedding.md index 198a5297c..0b349b406 100644 --- a/docs/modules/knowledge/pdf/pdf_embedding.md +++ b/docs/modules/knowledge/pdf/pdf_embedding.md @@ -7,11 +7,12 @@ inheriting the SourceEmbedding class PDFEmbedding(SourceEmbedding): """pdf embedding for read pdf document.""" - def __init__(self, file_path, vector_store_config): + def __init__(self, file_path, vector_store_config, text_splitter): """Initialize with pdf path.""" - super().__init__(file_path, vector_store_config) + super().__init__(file_path, vector_store_config, text_splitter) self.file_path = file_path self.vector_store_config = vector_store_config + self.text_splitter = text_splitter or Nore ``` implement read() and data_process() @@ -21,15 +22,19 @@ read() method allows you to read data and split data into chunk def read(self): """Load from pdf path.""" loader = PyPDFLoader(self.file_path) - # textsplitter = CHNDocumentSplitter( - # pdf=True, sentence_size=CFG.KNOWLEDGE_CHUNK_SIZE - # ) - textsplitter = SpacyTextSplitter( - pipeline="zh_core_web_sm", - chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, - chunk_overlap=100, - ) - return loader.load_and_split(textsplitter) + if self.text_splitter is None: + try: + self.text_splitter = SpacyTextSplitter( + pipeline="zh_core_web_sm", + chunk_size=100, + chunk_overlap=100, + ) + except Exception: + self.text_splitter = RecursiveCharacterTextSplitter( + chunk_size=100, chunk_overlap=50 + ) + + return loader.load_and_split(self.text_splitter) ``` data_process() method allows you to pre processing your ways ``` diff --git a/docs/modules/knowledge/ppt/ppt_embedding.md b/docs/modules/knowledge/ppt/ppt_embedding.md index e1d7754b9..c23b25a78 100644 --- a/docs/modules/knowledge/ppt/ppt_embedding.md +++ b/docs/modules/knowledge/ppt/ppt_embedding.md @@ -7,11 +7,17 @@ inheriting the SourceEmbedding class PPTEmbedding(SourceEmbedding): """ppt embedding for read ppt document.""" - def __init__(self, file_path, vector_store_config): - """Initialize with pdf path.""" - super().__init__(file_path, vector_store_config) + def __init__( + self, + file_path, + vector_store_config, + text_splitter: Optional[TextSplitter] = None, + ): + """Initialize ppt word path.""" + super().__init__(file_path, vector_store_config, text_splitter=None) self.file_path = file_path self.vector_store_config = vector_store_config + self.text_splitter = text_splitter or None ``` implement read() and data_process() @@ -21,12 +27,19 @@ read() method allows you to read data and split data into chunk def read(self): """Load from ppt path.""" loader = UnstructuredPowerPointLoader(self.file_path) - textsplitter = SpacyTextSplitter( - pipeline="zh_core_web_sm", - chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, - chunk_overlap=200, - ) - return loader.load_and_split(textsplitter) + if self.text_splitter is None: + try: + self.text_splitter = SpacyTextSplitter( + pipeline="zh_core_web_sm", + chunk_size=100, + chunk_overlap=100, + ) + except Exception: + self.text_splitter = RecursiveCharacterTextSplitter( + chunk_size=100, chunk_overlap=50 + ) + + return loader.load_and_split(self.text_splitter) ``` data_process() method allows you to pre processing your ways ``` diff --git a/docs/modules/knowledge/url/url_embedding.md b/docs/modules/knowledge/url/url_embedding.md index 637fa88c0..d485d30b1 100644 --- a/docs/modules/knowledge/url/url_embedding.md +++ b/docs/modules/knowledge/url/url_embedding.md @@ -7,11 +7,17 @@ inheriting the SourceEmbedding class URLEmbedding(SourceEmbedding): """url embedding for read url document.""" - def __init__(self, file_path, vector_store_config): - """Initialize with url path.""" - super().__init__(file_path, vector_store_config) + def __init__( + self, + file_path, + vector_store_config, + text_splitter: Optional[TextSplitter] = None, + ): + """Initialize url word path.""" + super().__init__(file_path, vector_store_config, text_splitter=None) self.file_path = file_path self.vector_store_config = vector_store_config + self.text_splitter = text_splitter or None ``` implement read() and data_process() @@ -21,15 +27,19 @@ read() method allows you to read data and split data into chunk def read(self): """Load from url path.""" loader = WebBaseLoader(web_path=self.file_path) - if CFG.LANGUAGE == "en": - text_splitter = CharacterTextSplitter( - chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, - chunk_overlap=20, - length_function=len, - ) - else: - text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=1000) - return loader.load_and_split(text_splitter) + if self.text_splitter is None: + try: + self.text_splitter = SpacyTextSplitter( + pipeline="zh_core_web_sm", + chunk_size=100, + chunk_overlap=100, + ) + except Exception: + self.text_splitter = RecursiveCharacterTextSplitter( + chunk_size=100, chunk_overlap=50 + ) + + return loader.load_and_split(self.text_splitter) ``` data_process() method allows you to pre processing your ways ``` diff --git a/docs/modules/knowledge/word/word_embedding.md b/docs/modules/knowledge/word/word_embedding.md index d978bf860..38a966f0e 100644 --- a/docs/modules/knowledge/word/word_embedding.md +++ b/docs/modules/knowledge/word/word_embedding.md @@ -7,11 +7,12 @@ inheriting the SourceEmbedding class WordEmbedding(SourceEmbedding): """word embedding for read word document.""" - def __init__(self, file_path, vector_store_config): - """Initialize with word path.""" - super().__init__(file_path, vector_store_config) + def __init__(self, file_path, vector_store_config, text_splitter): + """Initialize with pdf path.""" + super().__init__(file_path, vector_store_config, text_splitter) self.file_path = file_path self.vector_store_config = vector_store_config + self.text_splitter = text_splitter or Nore ``` implement read() and data_process() @@ -21,10 +22,19 @@ read() method allows you to read data and split data into chunk def read(self): """Load from word path.""" loader = UnstructuredWordDocumentLoader(self.file_path) - textsplitter = CHNDocumentSplitter( - pdf=True, sentence_size=CFG.KNOWLEDGE_CHUNK_SIZE - ) - return loader.load_and_split(textsplitter) + if self.text_splitter is None: + try: + self.text_splitter = SpacyTextSplitter( + pipeline="zh_core_web_sm", + chunk_size=100, + chunk_overlap=100, + ) + except Exception: + self.text_splitter = RecursiveCharacterTextSplitter( + chunk_size=100, chunk_overlap=50 + ) + + return loader.load_and_split(self.text_splitter) ``` data_process() method allows you to pre processing your ways ``` diff --git a/pilot/configs/config.py b/pilot/configs/config.py index 1c8026df5..ee407e6a2 100644 --- a/pilot/configs/config.py +++ b/pilot/configs/config.py @@ -28,7 +28,7 @@ class Config(metaclass=Singleton): self.skip_reprompt = False self.temperature = float(os.getenv("TEMPERATURE", 0.7)) - self.NUM_GPUS = int(os.getenv("NUM_GPUS",1)) + self.NUM_GPUS = int(os.getenv("NUM_GPUS", 1)) self.execute_local_commands = ( os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True" diff --git a/pilot/embedding_engine/embedding_engine.py b/pilot/embedding_engine/embedding_engine.py index 4d42fc3d0..ba059808d 100644 --- a/pilot/embedding_engine/embedding_engine.py +++ b/pilot/embedding_engine/embedding_engine.py @@ -8,6 +8,13 @@ from pilot.vector_store.connector import VectorStoreConnector class EmbeddingEngine: + """EmbeddingEngine provide a chain process include(read->text_split->data_process->index_store) for knowledge document embedding into vector store. + 1.knowledge_embedding:knowledge document source into vector store.(Chroma, Milvus, Weaviate) + 2.similar_search: similarity search from vector_store + how to use reference:https://db-gpt.readthedocs.io/en/latest/modules/knowledge.html + how to integrate:https://db-gpt.readthedocs.io/en/latest/modules/knowledge/pdf/pdf_embedding.html + """ + def __init__( self, model_name, @@ -24,14 +31,17 @@ class EmbeddingEngine: self.vector_store_config["embeddings"] = self.embeddings def knowledge_embedding(self): + """source embedding is chain process.read->text_split->data_process->index_store""" self.knowledge_embedding_client = self.init_knowledge_embedding() self.knowledge_embedding_client.source_embedding() def knowledge_embedding_batch(self, docs): + """Deprecation""" # docs = self.knowledge_embedding_client.read_batch() return self.knowledge_embedding_client.index_to_store(docs) def read(self): + """Deprecation""" self.knowledge_embedding_client = self.init_knowledge_embedding() return self.knowledge_embedding_client.read_batch() diff --git a/pilot/model/adapter.py b/pilot/model/adapter.py index 16179af6b..2d420c02f 100644 --- a/pilot/model/adapter.py +++ b/pilot/model/adapter.py @@ -73,6 +73,7 @@ class VicunaLLMAdapater(BaseLLMAdaper): ) return model, tokenizer + def auto_configure_device_map(num_gpus): """handling multi gpu calls""" # transformer.word_embeddings occupying 1 floors @@ -81,18 +82,18 @@ def auto_configure_device_map(num_gpus): # Allocate a total of 30 layers to number On gpus cards num_trans_layers = 28 per_gpu_layers = 30 / num_gpus - #Bugfix: call torch.embedding in Linux and the incoming weight and input are not on the same device, resulting in a RuntimeError - #Under Windows, model. device will be set to transformer. word_ Embeddings. device - #Under Linux, model. device will be set to lm_ Head.device - #When calling chat or stream_ During chat, input_ IDS will be placed on model. device - #If transformer. word_ If embeddings. device and model. device are different, it will cause a RuntimeError - #Therefore, here we will transform. word_ Embeddings, transformer. final_ Layernorm, lm_ Put all the heads on the first card + # Bugfix: call torch.embedding in Linux and the incoming weight and input are not on the same device, resulting in a RuntimeError + # Under Windows, model. device will be set to transformer. word_ Embeddings. device + # Under Linux, model. device will be set to lm_ Head.device + # When calling chat or stream_ During chat, input_ IDS will be placed on model. device + # If transformer. word_ If embeddings. device and model. device are different, it will cause a RuntimeError + # Therefore, here we will transform. word_ Embeddings, transformer. final_ Layernorm, lm_ Put all the heads on the first card device_map = { - 'transformer.embedding.word_embeddings': 0, - 'transformer.encoder.final_layernorm': 0, - 'transformer.output_layer': 0, - 'transformer.rotary_pos_emb': 0, - 'lm_head': 0 + "transformer.embedding.word_embeddings": 0, + "transformer.encoder.final_layernorm": 0, + "transformer.output_layer": 0, + "transformer.rotary_pos_emb": 0, + "lm_head": 0, } used = 2 @@ -102,7 +103,7 @@ def auto_configure_device_map(num_gpus): gpu_target += 1 used = 0 assert gpu_target < num_gpus - device_map[f'transformer.encoder.layers.{i}'] = gpu_target + device_map[f"transformer.encoder.layers.{i}"] = gpu_target used += 1 return device_map @@ -114,7 +115,13 @@ class ChatGLMAdapater(BaseLLMAdaper): def match(self, model_path: str): return "chatglm" in model_path - def loader(self, model_path: str, from_pretrained_kwargs: dict, device_map=None, num_gpus=CFG.NUM_GPUS): + def loader( + self, + model_path: str, + from_pretrained_kwargs: dict, + device_map=None, + num_gpus=CFG.NUM_GPUS, + ): tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) if DEVICE != "cuda": @@ -125,10 +132,8 @@ class ChatGLMAdapater(BaseLLMAdaper): else: model = ( AutoModel.from_pretrained( - model_path, trust_remote_code=True, - **from_pretrained_kwargs - ) - .half() + model_path, trust_remote_code=True, **from_pretrained_kwargs + ).half() # .cuda() ) from accelerate import dispatch_model diff --git a/pilot/vector_store/connector.py b/pilot/vector_store/connector.py index c8d7d3f6f..9b4e3b9e8 100644 --- a/pilot/vector_store/connector.py +++ b/pilot/vector_store/connector.py @@ -6,7 +6,13 @@ connector = {"Chroma": ChromaStore, "Milvus": MilvusStore} class VectorStoreConnector: - """vector store connector, can connect different vector db provided load document api_v1 and similar search api_v1.""" + """VectorStoreConnector, can connect different vector db provided load document api_v1 and similar search api_v1. + 1.load_document:knowledge document source into vector store.(Chroma, Milvus, Weaviate) + 2.similar_search: similarity search from vector_store + how to use reference:https://db-gpt.readthedocs.io/en/latest/modules/vector.html + how to integrate:https://db-gpt.readthedocs.io/en/latest/modules/vector/milvus/milvus.html + + """ def __init__(self, vector_store_type, ctx: {}) -> None: """initialize vector store connector."""