From 2d91d2b978c586f54a265ae9aed3ab55ecc2d577 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Sun, 17 Dec 2023 17:59:27 -0800 Subject: [PATCH] community: Add logprobs in gen output (#14826) Now that it's supported again for OAI chat models . Shame this wouldn't include it in the `.invoke()` output though (it's not included in the message itself). Would need to do a follow-up for that to be the case --- libs/community/langchain_community/chat_models/openai.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libs/community/langchain_community/chat_models/openai.py b/libs/community/langchain_community/chat_models/openai.py index 7026624c1b0..acbcc943f0d 100644 --- a/libs/community/langchain_community/chat_models/openai.py +++ b/libs/community/langchain_community/chat_models/openai.py @@ -454,9 +454,12 @@ class ChatOpenAI(BaseChatModel): response = response.dict() for res in response["choices"]: message = convert_dict_to_message(res["message"]) + generation_info = dict(finish_reason=res.get("finish_reason")) + if "logprobs" in res: + generation_info["logprobs"] = res["logprobs"] gen = ChatGeneration( message=message, - generation_info=dict(finish_reason=res.get("finish_reason")), + generation_info=generation_info, ) generations.append(gen) token_usage = response.get("usage", {})