mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-17 23:41:46 +00:00
community[patch]: update use of deprecated llm methods (#20393)
.predict and .predict_messages for BaseLanguageModel and BaseChatModel
This commit is contained in:
@@ -4,6 +4,7 @@ from typing import Any, Dict, List, Optional
|
||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.language_models.llms import LLM
|
||||
from langchain_core.messages import AIMessage
|
||||
from langchain_core.pydantic_v1 import Extra, root_validator
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
@@ -95,10 +96,11 @@ class OpaquePrompts(LLM):
|
||||
|
||||
# TODO: Add in callbacks once child runs for LLMs are supported by LangSmith.
|
||||
# call the LLM with the sanitized prompt and get the response
|
||||
llm_response = self.base_llm.predict(
|
||||
llm_response = self.base_llm.bind(stop=stop).invoke(
|
||||
sanitized_prompt_value_str,
|
||||
stop=stop,
|
||||
)
|
||||
if isinstance(llm_response, AIMessage):
|
||||
llm_response = llm_response.content
|
||||
|
||||
# desanitize the response by restoring the original sensitive information
|
||||
desanitize_response: op.DesanitizeResponse = op.desanitize(
|
||||
|
Reference in New Issue
Block a user