mirror of
				https://github.com/hwchase17/langchain.git
				synced 2025-10-30 23:29:54 +00:00 
			
		
		
		
	should not be merged in before https://github.com/anthropics/anthropic-sdk-python/pull/11 gets released
		
			
				
	
	
		
			160 lines
		
	
	
		
			4.4 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			160 lines
		
	
	
		
			4.4 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
| {
 | |
|  "cells": [
 | |
|   {
 | |
|    "cell_type": "markdown",
 | |
|    "id": "f6574496-b360-4ffa-9523-7fd34a590164",
 | |
|    "metadata": {},
 | |
|    "source": [
 | |
|     "# How to use the async API for LLMs\n",
 | |
|     "\n",
 | |
|     "LangChain provides async support for LLMs by leveraging the [asyncio](https://docs.python.org/3/library/asyncio.html) library.\n",
 | |
|     "\n",
 | |
|     "Async support is particularly useful for calling multiple LLMs concurrently, as these calls are network-bound. Currently, `OpenAI`, `PromptLayerOpenAI`, `ChatOpenAI` and `Anthropic` are supported, but async support for other LLMs is on the roadmap.\n",
 | |
|     "\n",
 | |
|     "You can use the `agenerate` method to call an OpenAI LLM asynchronously."
 | |
|    ]
 | |
|   },
 | |
|   {
 | |
|    "cell_type": "code",
 | |
|    "execution_count": 1,
 | |
|    "id": "5e49e96c-0f88-466d-b3d3-ea0966bdf19e",
 | |
|    "metadata": {
 | |
|     "tags": []
 | |
|    },
 | |
|    "outputs": [
 | |
|     {
 | |
|      "name": "stdout",
 | |
|      "output_type": "stream",
 | |
|      "text": [
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, how about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about yourself?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you! How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you! How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about you?\n",
 | |
|       "\u001b[1mConcurrent executed in 1.39 seconds.\u001b[0m\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about you?\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about yourself?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thanks for asking. How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thanks! How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about you?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thank you. How about yourself?\n",
 | |
|       "\n",
 | |
|       "\n",
 | |
|       "I'm doing well, thanks for asking. How about you?\n",
 | |
|       "\u001b[1mSerial executed in 5.77 seconds.\u001b[0m\n"
 | |
|      ]
 | |
|     }
 | |
|    ],
 | |
|    "source": [
 | |
|     "import time\n",
 | |
|     "import asyncio\n",
 | |
|     "\n",
 | |
|     "from langchain.llms import OpenAI\n",
 | |
|     "\n",
 | |
|     "def generate_serially():\n",
 | |
|     "    llm = OpenAI(temperature=0.9)\n",
 | |
|     "    for _ in range(10):\n",
 | |
|     "        resp = llm.generate([\"Hello, how are you?\"])\n",
 | |
|     "        print(resp.generations[0][0].text)\n",
 | |
|     "\n",
 | |
|     "\n",
 | |
|     "async def async_generate(llm):\n",
 | |
|     "    resp = await llm.agenerate([\"Hello, how are you?\"])\n",
 | |
|     "    print(resp.generations[0][0].text)\n",
 | |
|     "\n",
 | |
|     "\n",
 | |
|     "async def generate_concurrently():\n",
 | |
|     "    llm = OpenAI(temperature=0.9)\n",
 | |
|     "    tasks = [async_generate(llm) for _ in range(10)]\n",
 | |
|     "    await asyncio.gather(*tasks)\n",
 | |
|     "\n",
 | |
|     "\n",
 | |
|     "s = time.perf_counter()\n",
 | |
|     "# If running this outside of Jupyter, use asyncio.run(generate_concurrently())\n",
 | |
|     "await generate_concurrently() \n",
 | |
|     "elapsed = time.perf_counter() - s\n",
 | |
|     "print('\\033[1m' + f\"Concurrent executed in {elapsed:0.2f} seconds.\" + '\\033[0m')\n",
 | |
|     "\n",
 | |
|     "s = time.perf_counter()\n",
 | |
|     "generate_serially()\n",
 | |
|     "elapsed = time.perf_counter() - s\n",
 | |
|     "print('\\033[1m' + f\"Serial executed in {elapsed:0.2f} seconds.\" + '\\033[0m')"
 | |
|    ]
 | |
|   },
 | |
|   {
 | |
|    "cell_type": "code",
 | |
|    "execution_count": null,
 | |
|    "id": "e1d3a966-3a27-44e8-9441-ed72f01b86f4",
 | |
|    "metadata": {},
 | |
|    "outputs": [],
 | |
|    "source": []
 | |
|   }
 | |
|  ],
 | |
|  "metadata": {
 | |
|   "kernelspec": {
 | |
|    "display_name": "Python 3 (ipykernel)",
 | |
|    "language": "python",
 | |
|    "name": "python3"
 | |
|   },
 | |
|   "language_info": {
 | |
|    "codemirror_mode": {
 | |
|     "name": "ipython",
 | |
|     "version": 3
 | |
|    },
 | |
|    "file_extension": ".py",
 | |
|    "mimetype": "text/x-python",
 | |
|    "name": "python",
 | |
|    "nbconvert_exporter": "python",
 | |
|    "pygments_lexer": "ipython3",
 | |
|    "version": "3.10.9"
 | |
|   }
 | |
|  },
 | |
|  "nbformat": 4,
 | |
|  "nbformat_minor": 5
 | |
| }
 |