From 05ad399abed775fe14abee54a74ef49b55eb2ca7 Mon Sep 17 00:00:00 2001 From: Jonathan Pedoeem Date: Thu, 16 Feb 2023 01:48:09 -0500 Subject: [PATCH] Update PromptLayerOpenAI LLM to include support for ASYNC API (#1066) This PR updates `PromptLayerOpenAI` to now support requests using the [Async API](https://langchain.readthedocs.io/en/latest/modules/llms/async_llm.html) It also updates the documentation on Async API to let users know that PromptLayerOpenAI also supports this. `PromptLayerOpenAI` now redefines `_agenerate` a similar was to how it redefines `_generate` --- docs/modules/llms/async_llm.ipynb | 3 ++- langchain/llms/promptlayer_openai.py | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/docs/modules/llms/async_llm.ipynb b/docs/modules/llms/async_llm.ipynb index 47493b8e9e6..730d0102103 100644 --- a/docs/modules/llms/async_llm.ipynb +++ b/docs/modules/llms/async_llm.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "id": "f6574496-b360-4ffa-9523-7fd34a590164", "metadata": {}, @@ -9,7 +10,7 @@ "\n", "LangChain provides async support for LLMs by leveraging the [asyncio](https://docs.python.org/3/library/asyncio.html) library.\n", "\n", - "Async support is particularly useful for calling multiple LLMs concurrently, as these calls are network-bound. Currently, only `OpenAI` is supported, but async support for other LLMs is on the roadmap.\n", + "Async support is particularly useful for calling multiple LLMs concurrently, as these calls are network-bound. Currently, only `OpenAI` and `PromptLayerOpenAI` is supported, but async support for other LLMs is on the roadmap.\n", "\n", "You can use the `agenerate` method to call an OpenAI LLM asynchronously." ] diff --git a/langchain/llms/promptlayer_openai.py b/langchain/llms/promptlayer_openai.py index bdad7eab769..23cba853df3 100644 --- a/langchain/llms/promptlayer_openai.py +++ b/langchain/llms/promptlayer_openai.py @@ -53,3 +53,27 @@ class PromptLayerOpenAI(OpenAI, BaseModel): get_api_key(), ) return generated_responses + + async def _agenerate( + self, prompts: List[str], stop: Optional[List[str]] = None + ) -> LLMResult: + from promptlayer.utils import get_api_key, promptlayer_api_request + + request_start_time = datetime.datetime.now().timestamp() + generated_responses = await super()._agenerate(prompts, stop) + request_end_time = datetime.datetime.now().timestamp() + for i in range(len(prompts)): + prompt = prompts[i] + resp = generated_responses.generations[i] + promptlayer_api_request( + "langchain.PromptLayerOpenAI.async", + "langchain", + [prompt], + self._identifying_params, + self.pl_tags, + resp[0].text, + request_start_time, + request_end_time, + get_api_key(), + ) + return generated_responses