From 1152f4d48b5ddfdb7abeb159c01d4f6e6c08543a Mon Sep 17 00:00:00 2001 From: Alonso Silva Allende Date: Wed, 19 Jul 2023 03:12:09 +0200 Subject: [PATCH] Allow chat models that do not return token usage (#7907) - Description: It allows to use chat models that do not return token usage - Issue: [#7900](https://github.com/hwchase17/langchain/issues/7900) - Dependencies: None - Tag maintainer: @agola11 @hwchase17 - Twitter handle: @alonsosilva --------- Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com> Co-authored-by: William FH <13333726+hinthornw@users.noreply.github.com> --- langchain/chat_models/openai.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/langchain/chat_models/openai.py b/langchain/chat_models/openai.py index ebab719e87a..e09ac9abf72 100644 --- a/langchain/chat_models/openai.py +++ b/langchain/chat_models/openai.py @@ -391,7 +391,8 @@ class ChatOpenAI(BaseChatModel): generation_info=dict(finish_reason=res.get("finish_reason")), ) generations.append(gen) - llm_output = {"token_usage": response["usage"], "model_name": self.model_name} + token_usage = response.get("usage", {}) + llm_output = {"token_usage": token_usage, "model_name": self.model_name} return ChatResult(generations=generations, llm_output=llm_output) async def _agenerate(