community[minor]: weight only quantization with intel-extension-for-transformers. (#14504)

Support weight only quantization with intel-extension-for-transformers.
[Intel® Extension for
Transformers](https://github.com/intel/intel-extension-for-transformers)
is an innovative toolkit to accelerate Transformer-based models on Intel
platforms, in particular effective on 4th Intel Xeon Scalable processor
[Sapphire
Rapids](https://www.intel.com/content/www/us/en/products/docs/processors/xeon-accelerated/4th-gen-xeon-scalable-processors.html)
(codenamed Sapphire Rapids). The toolkit provides the below key
features:

* Seamless user experience of model compressions on Transformer-based
models by extending [Hugging Face
transformers](https://github.com/huggingface/transformers) APIs and
leveraging [Intel® Neural
Compressor](https://github.com/intel/neural-compressor)
* Advanced software optimizations and unique compression-aware runtime.
* Optimized Transformer-based model packages.
*
[NeuralChat](https://github.com/intel/intel-extension-for-transformers/blob/main/intel_extension_for_transformers/neural_chat),
a customizable chatbot framework to create your own chatbot within
minutes by leveraging a rich set of plugins and SOTA optimizations.
*
[Inference](https://github.com/intel/intel-extension-for-transformers/blob/main/intel_extension_for_transformers/llm/runtime/graph)
of Large Language Model (LLM) in pure C/C++ with weight-only
quantization kernels.
This PR is an integration of weight only quantization feature with
intel-extension-for-transformers.

Unit test is in
lib/langchain/tests/integration_tests/llm/test_weight_only_quantization.py
The notebook is in
docs/docs/integrations/llms/weight_only_quantization.ipynb.
The document is in
docs/docs/integrations/providers/weight_only_quantization.mdx.

---------

Signed-off-by: Cheng, Penghui <penghui.cheng@intel.com>
Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com>
Co-authored-by: Bagatur <baskaryan@gmail.com>
This commit is contained in:
Cheng, Penghui
2024-04-04 00:21:34 +08:00
committed by GitHub
parent d6d843ec24
commit cc407e8a1b
6 changed files with 632 additions and 1 deletions

View File

@@ -0,0 +1,62 @@
"""Test HuggingFace Pipeline wrapper."""
from langchain_community.llms.weight_only_quantization import WeightOnlyQuantPipeline
model_id = "google/flan-t5-large"
def test_weight_only_quantization_with_config() -> None:
"""Test valid call to HuggingFace text2text model."""
from intel_extension_for_transformers.transformers import WeightOnlyQuantConfig
conf = WeightOnlyQuantConfig(weight_dtype="nf4")
llm = WeightOnlyQuantPipeline.from_model_id(
model_id=model_id, task="text2text-generation", quantization_config=conf
)
output = llm("Say foo:")
assert isinstance(output, str)
def test_weight_only_quantization_4bit() -> None:
"""Test valid call to HuggingFace text2text model."""
llm = WeightOnlyQuantPipeline.from_model_id(
model_id=model_id, task="text2text-generation", load_in_4bit=True
)
output = llm("Say foo:")
assert isinstance(output, str)
def test_weight_only_quantization_8bit() -> None:
"""Test valid call to HuggingFace text2text model."""
llm = WeightOnlyQuantPipeline.from_model_id(
model_id=model_id, task="text2text-generation", load_in_8bit=True
)
output = llm("Say foo:")
assert isinstance(output, str)
def test_init_with_pipeline() -> None:
"""Test initialization with a HF pipeline."""
from intel_extension_for_transformers.transformers import AutoModelForSeq2SeqLM
from transformers import AutoTokenizer, pipeline
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(
model_id, load_in_4bit=True, use_llm_runtime=False
)
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
llm = WeightOnlyQuantPipeline(pipeline=pipe)
output = llm("Say foo:")
assert isinstance(output, str)
def text_weight_only_pipeline_summarization() -> None:
"""Test valid call to HuggingFace summarization model."""
from intel_extension_for_transformers.transformers import WeightOnlyQuantConfig
conf = WeightOnlyQuantConfig()
llm = WeightOnlyQuantPipeline.from_model_id(
model_id=model_id, task="summarization", quantization_config=conf
)
output = llm("Say foo:")
assert isinstance(output, str)