mirror of
				https://github.com/hwchase17/langchain.git
				synced 2025-10-31 07:41:40 +00:00 
			
		
		
		
	- Description: Added streaming support to the textgen component in the llms module. - Dependencies: websocket-client = "^1.6.1"
		
			
				
	
	
		
			153 lines
		
	
	
		
			4.1 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			153 lines
		
	
	
		
			4.1 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
| {
 | |
|  "cells": [
 | |
|   {
 | |
|    "attachments": {},
 | |
|    "cell_type": "markdown",
 | |
|    "metadata": {},
 | |
|    "source": [
 | |
|     "# TextGen\n",
 | |
|     "\n",
 | |
|     "[GitHub:oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui) A gradio web UI for running Large Language Models like LLaMA, llama.cpp, GPT-J, Pythia, OPT, and GALACTICA.\n",
 | |
|     "\n",
 | |
|     "This example goes over how to use LangChain to interact with LLM models via the `text-generation-webui` API integration.\n",
 | |
|     "\n",
 | |
|     "Please ensure that you have `text-generation-webui` configured and an LLM installed.  Recommended installation via the [one-click installer appropriate](https://github.com/oobabooga/text-generation-webui#one-click-installers) for your OS.\n",
 | |
|     "\n",
 | |
|     "Once `text-generation-webui` is installed and confirmed working via the web interface, please enable the `api` option either through the web model configuration tab, or by adding the run-time arg `--api` to your start command."
 | |
|    ]
 | |
|   },
 | |
|   {
 | |
|    "attachments": {},
 | |
|    "cell_type": "markdown",
 | |
|    "metadata": {},
 | |
|    "source": [
 | |
|     "## Set model_url and run the example"
 | |
|    ]
 | |
|   },
 | |
|   {
 | |
|    "cell_type": "code",
 | |
|    "execution_count": null,
 | |
|    "metadata": {
 | |
|     "tags": []
 | |
|    },
 | |
|    "outputs": [],
 | |
|    "source": [
 | |
|     "model_url = \"http://localhost:5000\""
 | |
|    ]
 | |
|   },
 | |
|   {
 | |
|    "cell_type": "code",
 | |
|    "execution_count": null,
 | |
|    "metadata": {
 | |
|     "tags": []
 | |
|    },
 | |
|    "outputs": [],
 | |
|    "source": [
 | |
|     "import langchain\n",
 | |
|     "from langchain import PromptTemplate, LLMChain\n",
 | |
|     "from langchain.llms import TextGen\n",
 | |
|     "\n",
 | |
|     "langchain.debug = True\n",
 | |
|     "\n",
 | |
|     "template = \"\"\"Question: {question}\n",
 | |
|     "\n",
 | |
|     "Answer: Let's think step by step.\"\"\"\n",
 | |
|     "\n",
 | |
|     "\n",
 | |
|     "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
 | |
|     "llm = TextGen(model_url=model_url)\n",
 | |
|     "llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
 | |
|     "question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
 | |
|     "\n",
 | |
|     "llm_chain.run(question)"
 | |
|    ]
 | |
|   },
 | |
|   {
 | |
|    "cell_type": "markdown",
 | |
|    "metadata": {},
 | |
|    "source": [
 | |
|     "### Streaming Version"
 | |
|    ]
 | |
|   },
 | |
|   {
 | |
|    "cell_type": "markdown",
 | |
|    "metadata": {},
 | |
|    "source": [
 | |
|     "You should install websocket-client to use this feature.\n",
 | |
|     "`pip install websocket-client`"
 | |
|    ]
 | |
|   },
 | |
|   {
 | |
|    "cell_type": "code",
 | |
|    "execution_count": null,
 | |
|    "metadata": {},
 | |
|    "outputs": [],
 | |
|    "source": [
 | |
|     "model_url = \"ws://localhost:5005\""
 | |
|    ]
 | |
|   },
 | |
|   {
 | |
|    "cell_type": "code",
 | |
|    "execution_count": null,
 | |
|    "metadata": {},
 | |
|    "outputs": [],
 | |
|    "source": [
 | |
|     "import langchain\n",
 | |
|     "from langchain import PromptTemplate, LLMChain\n",
 | |
|     "from langchain.llms import TextGen\n",
 | |
|     "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
 | |
|     "\n",
 | |
|     "langchain.debug = True\n",
 | |
|     "\n",
 | |
|     "template = \"\"\"Question: {question}\n",
 | |
|     "\n",
 | |
|     "Answer: Let's think step by step.\"\"\"\n",
 | |
|     "\n",
 | |
|     "\n",
 | |
|     "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
 | |
|     "llm = TextGen(model_url=model_url, streaming=True, callbacks=[StreamingStdOutCallbackHandler()])\n",
 | |
|     "llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
 | |
|     "question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
 | |
|     "\n",
 | |
|     "llm_chain.run(question)"
 | |
|    ]
 | |
|   },
 | |
|   {
 | |
|    "cell_type": "code",
 | |
|    "execution_count": null,
 | |
|    "metadata": {},
 | |
|    "outputs": [],
 | |
|    "source": [
 | |
|     "llm = TextGen(\n",
 | |
|     "    model_url = model_url,\n",
 | |
|     "    streaming=True\n",
 | |
|     ")\n",
 | |
|     "for chunk in llm.stream(\"Ask 'Hi, how are you?' like a pirate:'\",\n",
 | |
|     "        stop=[\"'\",\"\\n\"]):\n",
 | |
|     "    print(chunk, end='', flush=True)"
 | |
|    ]
 | |
|   }
 | |
|  ],
 | |
|  "metadata": {
 | |
|   "kernelspec": {
 | |
|    "display_name": "Python 3 (ipykernel)",
 | |
|    "language": "python",
 | |
|    "name": "python3"
 | |
|   },
 | |
|   "language_info": {
 | |
|    "codemirror_mode": {
 | |
|     "name": "ipython",
 | |
|     "version": 3
 | |
|    },
 | |
|    "file_extension": ".py",
 | |
|    "mimetype": "text/x-python",
 | |
|    "name": "python",
 | |
|    "nbconvert_exporter": "python",
 | |
|    "pygments_lexer": "ipython3",
 | |
|    "version": "3.10.4"
 | |
|   }
 | |
|  },
 | |
|  "nbformat": 4,
 | |
|  "nbformat_minor": 4
 | |
| }
 |