community[patch]: update use of deprecated llm methods (#20393)

.predict and .predict_messages for BaseLanguageModel and BaseChatModel
This commit is contained in:
ccurme
2024-04-12 17:28:23 -04:00
committed by GitHub
parent 3a068b26f3
commit 38faa74c23
13 changed files with 25 additions and 24 deletions

View File

@@ -4,6 +4,7 @@ from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import LLM
from langchain_core.messages import AIMessage
from langchain_core.pydantic_v1 import Extra, root_validator
from langchain_core.utils import get_from_dict_or_env
@@ -95,10 +96,11 @@ class OpaquePrompts(LLM):
# TODO: Add in callbacks once child runs for LLMs are supported by LangSmith.
# call the LLM with the sanitized prompt and get the response
llm_response = self.base_llm.predict(
llm_response = self.base_llm.bind(stop=stop).invoke(
sanitized_prompt_value_str,
stop=stop,
)
if isinstance(llm_response, AIMessage):
llm_response = llm_response.content
# desanitize the response by restoring the original sensitive information
desanitize_response: op.DesanitizeResponse = op.desanitize(