From d9163e7afa0e5e975d36b7482c6a101e5c5dc375 Mon Sep 17 00:00:00 2001 From: Changyong Um Date: Fri, 1 Nov 2024 01:44:35 +0900 Subject: [PATCH] community[docs]: Add content for the Lora adapter in the VLLM page. (#27788) **Description:** I added code for lora_request in the community package, but I forgot to add content to the VLLM page. So, I will do that now. #27731 --------- Co-authored-by: Um Changyong --- docs/docs/integrations/llms/vllm.ipynb | 29 ++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/docs/docs/integrations/llms/vllm.ipynb b/docs/docs/integrations/llms/vllm.ipynb index 6d45b102dc1..1e1baff9631 100644 --- a/docs/docs/integrations/llms/vllm.ipynb +++ b/docs/docs/integrations/llms/vllm.ipynb @@ -246,6 +246,35 @@ ")\n", "print(llm.invoke(\"Rome is\"))" ] + }, + { + "cell_type": "markdown", + "id": "bd3f0f51", + "metadata": {}, + "source": [ + "## LoRA adapter\n", + "LoRA adapters can be used with any vLLM model that implements `SupportsLoRA`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2682ca6c", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.llms import VLLM\n", + "from vllm.lora.request import LoRARequest\n", + "\n", + "llm = VLLM(model=\"meta-llama/Llama-2-7b-hf\", enable_lora=True)\n", + "\n", + "LoRA_ADAPTER_PATH = \"path/to/adapter\"\n", + "lora_adapter = LoRARequest(\"lora_adapter\", 1, LoRA_ADAPTER_PATH)\n", + "\n", + "print(\n", + " llm.invoke(\"What are some popular Korean street foods?\", lora_request=lora_adapter)\n", + ")" + ] } ], "metadata": {