From 3a7d2cf443d5c52ee68f43d4b1c0c8c8e49df2f3 Mon Sep 17 00:00:00 2001 From: yuwenzho Date: Wed, 27 Mar 2024 00:22:06 -0700 Subject: [PATCH] community[minor]: Add ITREX optimized Embeddings (#18474) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduction [IntelĀ® Extension for Transformers](https://github.com/intel/intel-extension-for-transformers) is an innovative toolkit designed to accelerate GenAI/LLM everywhere with the optimal performance of Transformer-based models on various Intel platforms Description adding ITREX runtime embeddings using intel-extension-for-transformers. added mdx documentation and example notebooks added embedding import testing. --------- Signed-off-by: yuwenzho Co-authored-by: Bagatur --- docs/docs/integrations/providers/intel.mdx | 60 +++++ .../integrations/providers/optimum_intel.mdx | 26 --- .../integrations/text_embedding/itrex.ipynb | 85 +++++++ docs/vercel.json | 4 + .../embeddings/__init__.py | 1 + .../langchain_community/embeddings/itrex.py | 214 ++++++++++++++++++ .../unit_tests/embeddings/test_imports.py | 1 + 7 files changed, 365 insertions(+), 26 deletions(-) create mode 100644 docs/docs/integrations/providers/intel.mdx delete mode 100644 docs/docs/integrations/providers/optimum_intel.mdx create mode 100644 docs/docs/integrations/text_embedding/itrex.ipynb create mode 100644 libs/community/langchain_community/embeddings/itrex.py diff --git a/docs/docs/integrations/providers/intel.mdx b/docs/docs/integrations/providers/intel.mdx new file mode 100644 index 00000000000..9338e7e2632 --- /dev/null +++ b/docs/docs/integrations/providers/intel.mdx @@ -0,0 +1,60 @@ +# Intel + +>[Optimum Intel](https://github.com/huggingface/optimum-intel?tab=readme-ov-file#optimum-intel) is the interface between the šŸ¤— Transformers and Diffusers libraries and the different tools and libraries provided by Intel to accelerate end-to-end pipelines on Intel architectures. + +>[IntelĀ® Extension for Transformers](https://github.com/intel/intel-extension-for-transformers?tab=readme-ov-file#intel-extension-for-transformers) (ITREX) is an innovative toolkit designed to accelerate GenAI/LLM everywhere with the optimal performance of Transformer-based models on various Intel platforms, including Intel Gaudi2, Intel CPU, and Intel GPU. + +This page covers how to use optimum-intel and ITREX with LangChain. + +## Optimum-intel + +All functionality related to the [optimum-intel](https://github.com/huggingface/optimum-intel.git) and [IPEX](https://github.com/intel/intel-extension-for-pytorch). + +### Installation + +Install using optimum-intel and ipex using: + +```bash +pip install optimum[neural-compressor] +pip install intel_extension_for_pytorch +``` + +Please follow the installation instructions as specified below: + +* Install optimum-intel as shown [here](https://github.com/huggingface/optimum-intel). +* Install IPEX as shown [here](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=cpu&version=v2.2.0%2Bcpu). + +### Embedding Models + +See a [usage example](/docs/integrations/text_embedding/optimum_intel). +We also offer a full tutorial notebook "rag_with_quantized_embeddings.ipynb" for using the embedder in a RAG pipeline in the cookbook dir. + +```python +from langchain_community.embeddings import QuantizedBiEncoderEmbeddings +``` + +## IntelĀ® Extension for Transformers (ITREX) + +All functionality related to the [intel-extension-for-transformers](https://github.com/intel/intel-extension-for-transformers). + +### Installation + +Install intel-extension-for-transformers. For system requirements and other installation tips, please refer to [Installation Guide](https://github.com/intel/intel-extension-for-transformers/blob/main/docs/installation.md) + +```bash +pip install intel-extension-for-transformers +``` + +Install other required packages. + +```bash +pip install -U torch onnx accelerate datasets +``` + +### Embedding Models + +See a [usage example](/docs/integrations/text_embedding/itrex). + +```python +from langchain_community.embeddings import QuantizedBgeEmbeddings +``` diff --git a/docs/docs/integrations/providers/optimum_intel.mdx b/docs/docs/integrations/providers/optimum_intel.mdx deleted file mode 100644 index 9d112a9d2a9..00000000000 --- a/docs/docs/integrations/providers/optimum_intel.mdx +++ /dev/null @@ -1,26 +0,0 @@ -# Optimum-intel - -All functionality related to the [optimum-intel](https://github.com/huggingface/optimum-intel.git) and [IPEX](https://github.com/intel/intel-extension-for-pytorch). - -## Installation - -Install using optimum-intel and ipex using: - -```bash -pip install optimum[neural-compressor] -pip install intel_extension_for_pytorch -``` - -Please follow the installation instructions as specified below: - -* Install optimum-intel as shown [here](https://github.com/huggingface/optimum-intel). -* Install IPEX as shown [here](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=cpu&version=v2.2.0%2Bcpu). - -## Embedding Models - -See a [usage example](/docs/integrations/text_embedding/optimum_intel). -We also offer a full tutorial notebook "rag_with_quantized_embeddings.ipynb" for using the embedder in a RAG pipeline in the cookbook dir. - -```python -from langchain_community.embeddings import QuantizedBiEncoderEmbeddings -``` \ No newline at end of file diff --git a/docs/docs/integrations/text_embedding/itrex.ipynb b/docs/docs/integrations/text_embedding/itrex.ipynb new file mode 100644 index 00000000000..ec94ca4f97f --- /dev/null +++ b/docs/docs/integrations/text_embedding/itrex.ipynb @@ -0,0 +1,85 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# IntelĀ® Extension for Transformers Quantized Text Embeddings\n", + "\n", + "Load quantized BGE embedding models generated by [IntelĀ® Extension for Transformers](https://github.com/intel/intel-extension-for-transformers) (ITREX) and use ITREX [Neural Engine](https://github.com/intel/intel-extension-for-transformers/blob/main/intel_extension_for_transformers/llm/runtime/deprecated/docs/Installation.md), a high-performance NLP backend, to accelerate the inference of models without compromising accuracy.\n", + "\n", + "Refer to our blog of [Efficient Natural Language Embedding Models with Intel Extension for Transformers](https://medium.com/intel-analytics-software/efficient-natural-language-embedding-models-with-intel-extension-for-transformers-2b6fcd0f8f34) and [BGE optimization example](https://github.com/intel/intel-extension-for-transformers/tree/main/examples/huggingface/pytorch/text-embedding/deployment/mteb/bge) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yuwenzho/.conda/envs/bge/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "2024-03-04 10:17:17 [INFO] Start to extarct onnx model ops...\n", + "2024-03-04 10:17:17 [INFO] Extract onnxruntime model done...\n", + "2024-03-04 10:17:17 [INFO] Start to implement Sub-Graph matching and replacing...\n", + "2024-03-04 10:17:18 [INFO] Sub-Graph match and replace done...\n" + ] + } + ], + "source": [ + "from langchain_community.embeddings import QuantizedBgeEmbeddings\n", + "\n", + "model_name = \"Intel/bge-small-en-v1.5-sts-int8-static-inc\"\n", + "encode_kwargs = {\"normalize_embeddings\": True} # set True to compute cosine similarity\n", + "\n", + "model = QuantizedBgeEmbeddings(\n", + " model_name=model_name,\n", + " encode_kwargs=encode_kwargs,\n", + " query_instruction=\"Represent this sentence for searching relevant passages: \",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## usage" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "text = \"This is a test document.\"\n", + "query_result = model.embed_query(text)\n", + "doc_result = model.embed_documents([text])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "yuwen", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/vercel.json b/docs/vercel.json index 38de5f571bc..6171ec3a762 100644 --- a/docs/vercel.json +++ b/docs/vercel.json @@ -1,5 +1,9 @@ { "redirects": [ + { + "source": "/docs/integrations/providers/optimum_intel", + "destination": "/docs/integrations/providers/intel" + }, { "source": "/docs/integrations/providers/facebook_chat", "destination": "/docs/integrations/providers/facebook" diff --git a/libs/community/langchain_community/embeddings/__init__.py b/libs/community/langchain_community/embeddings/__init__.py index b68c5115599..208d62a9f01 100644 --- a/libs/community/langchain_community/embeddings/__init__.py +++ b/libs/community/langchain_community/embeddings/__init__.py @@ -67,6 +67,7 @@ _module_lookup = { "OllamaEmbeddings": "langchain_community.embeddings.ollama", "OpenAIEmbeddings": "langchain_community.embeddings.openai", "QianfanEmbeddingsEndpoint": "langchain_community.embeddings.baidu_qianfan_endpoint", # noqa: E501 + "QuantizedBgeEmbeddings": "langchain_community.embeddings.itrex", "QuantizedBiEncoderEmbeddings": "langchain_community.embeddings.optimum_intel", "SagemakerEndpointEmbeddings": "langchain_community.embeddings.sagemaker_endpoint", "SelfHostedEmbeddings": "langchain_community.embeddings.self_hosted", diff --git a/libs/community/langchain_community/embeddings/itrex.py b/libs/community/langchain_community/embeddings/itrex.py new file mode 100644 index 00000000000..d3528fb70b9 --- /dev/null +++ b/libs/community/langchain_community/embeddings/itrex.py @@ -0,0 +1,214 @@ +import importlib.util +import os +from typing import Any, Dict, List, Optional + +from langchain_core.embeddings import Embeddings +from langchain_core.pydantic_v1 import BaseModel, Extra + + +class QuantizedBgeEmbeddings(BaseModel, Embeddings): + """Leverage Itrex runtime to unlock the performance of compressed NLP models. + + Please ensure that you have installed intel-extension-for-transformers. + + Input: + model_name: str = Model name. + max_seq_len: int = The maximum sequence length for tokenization. (default 512) + pooling_strategy: str = + "mean" or "cls", pooling strategy for the final layer. (default "mean") + query_instruction: Optional[str] = + An instruction to add to the query before embedding. (default None) + document_instruction: Optional[str] = + An instruction to add to each document before embedding. (default None) + padding: Optional[bool] = + Whether to add padding during tokenization or not. (default True) + model_kwargs: Optional[Dict] = + Parameters to add to the model during initialization. (default {}) + encode_kwargs: Optional[Dict] = + Parameters to add during the embedding forward pass. (default {}) + onnx_file_name: Optional[str] = + File name of onnx optimized model which is exported by itrex. + (default "int8-model.onnx") + + Example: + .. code-block:: python + + from langchain_community.embeddings import QuantizedBgeEmbeddings + + model_name = "Intel/bge-small-en-v1.5-sts-int8-static-inc" + encode_kwargs = {'normalize_embeddings': True} + hf = QuantizedBgeEmbeddings( + model_name, + encode_kwargs=encode_kwargs, + query_instruction="Represent this sentence for searching relevant passages: " + ) + """ # noqa: E501 + + def __init__( + self, + model_name: str, + *, + max_seq_len: int = 512, + pooling_strategy: str = "mean", # "mean" or "cls" + query_instruction: Optional[str] = None, + document_instruction: Optional[str] = None, + padding: bool = True, + model_kwargs: Optional[Dict] = None, + encode_kwargs: Optional[Dict] = None, + onnx_file_name: Optional[str] = "int8-model.onnx", + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + + # check sentence_transformers python package + if importlib.util.find_spec("intel_extension_for_transformers") is None: + raise ImportError( + "Could not import intel_extension_for_transformers python package. " + "Please install it with " + "`pip install -U intel-extension-for-transformers`." + ) + + # check torch python package + if importlib.util.find_spec("torch") is None: + raise ImportError( + "Could not import torch python package. " + "Please install it with `pip install -U torch`." + ) + + # check onnx python package + if importlib.util.find_spec("onnx") is None: + raise ImportError( + "Could not import onnx python package. " + "Please install it with `pip install -U onnx`." + ) + + self.model_name_or_path = model_name + self.max_seq_len = max_seq_len + self.pooling = pooling_strategy + self.padding = padding + self.encode_kwargs = encode_kwargs or {} + self.model_kwargs = model_kwargs or {} + + self.normalize = self.encode_kwargs.get("normalize_embeddings", False) + self.batch_size = self.encode_kwargs.get("batch_size", 32) + + self.query_instruction = query_instruction + self.document_instruction = document_instruction + self.onnx_file_name = onnx_file_name + + self.load_model() + + def load_model(self) -> None: + from huggingface_hub import hf_hub_download + from intel_extension_for_transformers.transformers import AutoModel + from transformers import AutoConfig, AutoTokenizer + + self.hidden_size = AutoConfig.from_pretrained( + self.model_name_or_path + ).hidden_size + self.transformer_tokenizer = AutoTokenizer.from_pretrained( + self.model_name_or_path, + ) + onnx_model_path = os.path.join(self.model_name_or_path, self.onnx_file_name) # type: ignore[arg-type] + if not os.path.exists(onnx_model_path): + onnx_model_path = hf_hub_download( + self.model_name_or_path, filename=self.onnx_file_name + ) + self.transformer_model = AutoModel.from_pretrained( + onnx_model_path, use_embedding_runtime=True + ) + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.allow + + def _embed(self, inputs: Any) -> Any: + import torch + + engine_input = [value for value in inputs.values()] + outputs = self.transformer_model.generate(engine_input) + if "last_hidden_state:0" in outputs: + last_hidden_state = outputs["last_hidden_state:0"] + else: + last_hidden_state = [out for out in outputs.values()][0] + last_hidden_state = torch.tensor(last_hidden_state).reshape( + inputs["input_ids"].shape[0], inputs["input_ids"].shape[1], self.hidden_size + ) + if self.pooling == "mean": + emb = self._mean_pooling(last_hidden_state, inputs["attention_mask"]) + elif self.pooling == "cls": + emb = self._cls_pooling(last_hidden_state) + else: + raise ValueError("pooling method no supported") + + if self.normalize: + emb = torch.nn.functional.normalize(emb, p=2, dim=1) + return emb + + @staticmethod + def _cls_pooling(last_hidden_state: Any) -> Any: + return last_hidden_state[:, 0] + + @staticmethod + def _mean_pooling(last_hidden_state: Any, attention_mask: Any) -> Any: + try: + import torch + except ImportError as e: + raise ImportError( + "Unable to import torch, please install with `pip install -U torch`." + ) from e + input_mask_expanded = ( + attention_mask.unsqueeze(-1).expand(last_hidden_state.size()).float() + ) + sum_embeddings = torch.sum(last_hidden_state * input_mask_expanded, 1) + sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9) + return sum_embeddings / sum_mask + + def _embed_text(self, texts: List[str]) -> List[List[float]]: + inputs = self.transformer_tokenizer( + texts, + max_length=self.max_seq_len, + truncation=True, + padding=self.padding, + return_tensors="pt", + ) + return self._embed(inputs).tolist() + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """Embed a list of text documents using the Optimized Embedder model. + + Input: + texts: List[str] = List of text documents to embed. + Output: + List[List[float]] = The embeddings of each text document. + """ + try: + import pandas as pd + except ImportError as e: + raise ImportError( + "Unable to import pandas, please install with `pip install -U pandas`." + ) from e + docs = [ + self.document_instruction + d if self.document_instruction else d + for d in texts + ] + + # group into batches + text_list_df = pd.DataFrame(docs, columns=["texts"]).reset_index() + + # assign each example with its batch + text_list_df["batch_index"] = text_list_df["index"] // self.batch_size + + # create groups + batches = list(text_list_df.groupby(["batch_index"])["texts"].apply(list)) + + vectors = [] + for batch in batches: + vectors += self._embed_text(batch) + return vectors + + def embed_query(self, text: str) -> List[float]: + if self.query_instruction: + text = self.query_instruction + text + return self._embed_text([text])[0] diff --git a/libs/community/tests/unit_tests/embeddings/test_imports.py b/libs/community/tests/unit_tests/embeddings/test_imports.py index 48e3b6cf65e..a41cdc5dadb 100644 --- a/libs/community/tests/unit_tests/embeddings/test_imports.py +++ b/libs/community/tests/unit_tests/embeddings/test_imports.py @@ -65,6 +65,7 @@ EXPECTED_ALL = [ "QuantizedBiEncoderEmbeddings", "NeMoEmbeddings", "SparkLLMTextEmbeddings", + "QuantizedBgeEmbeddings", "PremAIEmbeddings", "YandexGPTEmbeddings", ]