diff --git a/libs/community/langchain_community/callbacks/labelstudio_callback.py b/libs/community/langchain_community/callbacks/labelstudio_callback.py index 73954820b23..0eb35af1017 100644 --- a/libs/community/langchain_community/callbacks/labelstudio_callback.py +++ b/libs/community/langchain_community/callbacks/labelstudio_callback.py @@ -98,7 +98,7 @@ class LabelStudioCallbackHandler(BaseCallbackHandler): ... mode='prompt' ... ) >>> llm = OpenAI(callbacks=[handler]) - >>> llm.predict('Tell me a story about a dog.') + >>> llm.invoke('Tell me a story about a dog.') """ DEFAULT_PROJECT_NAME: str = "LangChain-%Y-%m-%d" diff --git a/libs/community/langchain_community/callbacks/llmonitor_callback.py b/libs/community/langchain_community/callbacks/llmonitor_callback.py index 32e8820dbdb..dc7bbbfb9b0 100644 --- a/libs/community/langchain_community/callbacks/llmonitor_callback.py +++ b/libs/community/langchain_community/callbacks/llmonitor_callback.py @@ -204,7 +204,7 @@ class LLMonitorCallbackHandler(BaseCallbackHandler): llmonitor_callback = LLMonitorCallbackHandler() llm = OpenAI(callbacks=[llmonitor_callback], metadata={"userId": "user-123"}) - llm.predict("Hello, how are you?") + llm.invoke("Hello, how are you?") ``` """ diff --git a/libs/community/langchain_community/llms/opaqueprompts.py b/libs/community/langchain_community/llms/opaqueprompts.py index 9be25d4d236..0fbc801aaeb 100644 --- a/libs/community/langchain_community/llms/opaqueprompts.py +++ b/libs/community/langchain_community/llms/opaqueprompts.py @@ -4,6 +4,7 @@ from typing import Any, Dict, List, Optional from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models import BaseLanguageModel from langchain_core.language_models.llms import LLM +from langchain_core.messages import AIMessage from langchain_core.pydantic_v1 import Extra, root_validator from langchain_core.utils import get_from_dict_or_env @@ -95,10 +96,11 @@ class OpaquePrompts(LLM): # TODO: Add in callbacks once child runs for LLMs are supported by LangSmith. # call the LLM with the sanitized prompt and get the response - llm_response = self.base_llm.predict( + llm_response = self.base_llm.bind(stop=stop).invoke( sanitized_prompt_value_str, - stop=stop, ) + if isinstance(llm_response, AIMessage): + llm_response = llm_response.content # desanitize the response by restoring the original sensitive information desanitize_response: op.DesanitizeResponse = op.desanitize( diff --git a/libs/community/tests/unit_tests/chat_models/test_openai.py b/libs/community/tests/unit_tests/chat_models/test_openai.py index ad7033e4ea8..6a59bce98a1 100644 --- a/libs/community/tests/unit_tests/chat_models/test_openai.py +++ b/libs/community/tests/unit_tests/chat_models/test_openai.py @@ -96,8 +96,8 @@ def test_openai_predict(mock_completion: dict) -> None: "client", mock_client, ): - res = llm.predict("bar") - assert res == "Bar Baz" + res = llm.invoke("bar") + assert res.content == "Bar Baz" assert completed diff --git a/libs/core/tests/unit_tests/language_models/llms/test_base.py b/libs/core/tests/unit_tests/language_models/llms/test_base.py index 835ed2da9af..b384c426141 100644 --- a/libs/core/tests/unit_tests/language_models/llms/test_base.py +++ b/libs/core/tests/unit_tests/language_models/llms/test_base.py @@ -60,7 +60,7 @@ def test_batch_size() -> None: llm = FakeListLLM(responses=["foo"] * 1) with collect_runs() as cb: - llm.predict("foo") + llm.invoke("foo") assert len(cb.traced_runs) == 1 assert (cb.traced_runs[0].extra or {}).get("batch_size") == 1 diff --git a/libs/core/tests/unit_tests/tracers/test_run_collector.py b/libs/core/tests/unit_tests/tracers/test_run_collector.py index 36c7b17c0d6..95c0052cf21 100644 --- a/libs/core/tests/unit_tests/tracers/test_run_collector.py +++ b/libs/core/tests/unit_tests/tracers/test_run_collector.py @@ -9,7 +9,7 @@ from langchain_core.tracers.context import collect_runs def test_collect_runs() -> None: llm = FakeListLLM(responses=["hello"]) with collect_runs() as cb: - llm.predict("hi") + llm.invoke("hi") assert cb.traced_runs assert len(cb.traced_runs) == 1 assert isinstance(cb.traced_runs[0].id, uuid.UUID) diff --git a/libs/experimental/langchain_experimental/llms/anthropic_functions.py b/libs/experimental/langchain_experimental/llms/anthropic_functions.py index 3d290f5f178..cbfb7b48176 100644 --- a/libs/experimental/langchain_experimental/llms/anthropic_functions.py +++ b/libs/experimental/langchain_experimental/llms/anthropic_functions.py @@ -183,7 +183,7 @@ class AnthropicFunctions(BaseChatModel): raise ValueError( "if `function_call` provided, `functions` must also be" ) - response = self.model.predict_messages( + response = self.model.invoke( messages, stop=stop, callbacks=run_manager, **kwargs ) completion = cast(str, response.content) diff --git a/libs/experimental/langchain_experimental/llms/ollama_functions.py b/libs/experimental/langchain_experimental/llms/ollama_functions.py index 16f858b2290..af5d7a478bf 100644 --- a/libs/experimental/langchain_experimental/llms/ollama_functions.py +++ b/libs/experimental/langchain_experimental/llms/ollama_functions.py @@ -89,7 +89,7 @@ function in "functions".' ) if "functions" in kwargs: del kwargs["functions"] - response_message = self.llm.predict_messages( + response_message = self.llm.invoke( [system_message] + messages, stop=stop, callbacks=run_manager, **kwargs ) chat_generation_content = response_message.content diff --git a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_llama2chat.py b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_llama2chat.py index dd25a1bf387..55501833373 100644 --- a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_llama2chat.py +++ b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_llama2chat.py @@ -49,7 +49,7 @@ def model_cfg_sys_msg() -> Llama2Chat: def test_default_system_message(model: Llama2Chat) -> None: messages = [HumanMessage(content="usr-msg-1")] - actual = model.predict_messages(messages).content # type: ignore + actual = model.invoke(messages).content # type: ignore expected = ( f"[INST] <>\n{DEFAULT_SYSTEM_PROMPT}\n<>\n\nusr-msg-1 [/INST]" ) @@ -62,7 +62,7 @@ def test_configured_system_message( ) -> None: messages = [HumanMessage(content="usr-msg-1")] - actual = model_cfg_sys_msg.predict_messages(messages).content # type: ignore + actual = model_cfg_sys_msg.invoke(messages).content # type: ignore expected = "[INST] <>\nsys-msg\n<>\n\nusr-msg-1 [/INST]" assert actual == expected @@ -73,7 +73,7 @@ async def test_configured_system_message_async( ) -> None: messages = [HumanMessage(content="usr-msg-1")] - actual = await model_cfg_sys_msg.apredict_messages(messages) # type: ignore + actual = await model_cfg_sys_msg.ainvoke(messages) # type: ignore expected = "[INST] <>\nsys-msg\n<>\n\nusr-msg-1 [/INST]" assert actual.content == expected @@ -87,7 +87,7 @@ def test_provided_system_message( HumanMessage(content="usr-msg-1"), ] - actual = model_cfg_sys_msg.predict_messages(messages).content + actual = model_cfg_sys_msg.invoke(messages).content expected = "[INST] <>\ncustom-sys-msg\n<>\n\nusr-msg-1 [/INST]" assert actual == expected @@ -102,7 +102,7 @@ def test_human_ai_dialogue(model_cfg_sys_msg: Llama2Chat) -> None: HumanMessage(content="usr-msg-3"), ] - actual = model_cfg_sys_msg.predict_messages(messages).content + actual = model_cfg_sys_msg.invoke(messages).content expected = ( "[INST] <>\nsys-msg\n<>\n\nusr-msg-1 [/INST] ai-msg-1 " "[INST] usr-msg-2 [/INST] ai-msg-2 [INST] usr-msg-3 [/INST]" @@ -113,14 +113,14 @@ def test_human_ai_dialogue(model_cfg_sys_msg: Llama2Chat) -> None: def test_no_message(model: Llama2Chat) -> None: with pytest.raises(ValueError) as info: - model.predict_messages([]) + model.invoke([]) assert info.value.args[0] == "at least one HumanMessage must be provided" def test_ai_message_first(model: Llama2Chat) -> None: with pytest.raises(ValueError) as info: - model.predict_messages([AIMessage(content="ai-msg-1")]) + model.invoke([AIMessage(content="ai-msg-1")]) assert ( info.value.args[0] @@ -136,7 +136,7 @@ def test_human_ai_messages_not_alternating(model: Llama2Chat) -> None: ] with pytest.raises(ValueError) as info: - model.predict_messages(messages) # type: ignore + model.invoke(messages) # type: ignore assert info.value.args[0] == ( "messages must be alternating human- and ai-messages, " @@ -151,6 +151,6 @@ def test_last_message_not_human_message(model: Llama2Chat) -> None: ] with pytest.raises(ValueError) as info: - model.predict_messages(messages) + model.invoke(messages) assert info.value.args[0] == "last message must be a HumanMessage" diff --git a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_mixtral.py b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_mixtral.py index 881f78f6a77..797c6080231 100644 --- a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_mixtral.py +++ b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_mixtral.py @@ -23,7 +23,7 @@ def test_prompt(model: Mixtral) -> None: HumanMessage(content="usr-msg-2"), ] - actual = model.predict_messages(messages).content # type: ignore + actual = model.invoke(messages).content # type: ignore expected = ( "[INST] sys-msg\nusr-msg-1 [/INST] ai-msg-1 [INST] usr-msg-2 [/INST]" # noqa: E501 ) diff --git a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_orca.py b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_orca.py index 902d163c379..c0ecb609877 100644 --- a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_orca.py +++ b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_orca.py @@ -23,7 +23,7 @@ def test_prompt(model: Orca) -> None: HumanMessage(content="usr-msg-2"), ] - actual = model.predict_messages(messages).content # type: ignore + actual = model.invoke(messages).content # type: ignore expected = "### System:\nsys-msg\n\n### User:\nusr-msg-1\n\n### Assistant:\nai-msg-1\n\n### User:\nusr-msg-2\n\n" # noqa: E501 assert actual == expected diff --git a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_vicuna.py b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_vicuna.py index 8722b3ec5fc..4948c7a8deb 100644 --- a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_vicuna.py +++ b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_vicuna.py @@ -23,7 +23,7 @@ def test_prompt(model: Vicuna) -> None: HumanMessage(content="usr-msg-2"), ] - actual = model.predict_messages(messages).content # type: ignore + actual = model.invoke(messages).content # type: ignore expected = "sys-msg USER: usr-msg-1 ASSISTANT: ai-msg-1 USER: usr-msg-2 " assert actual == expected diff --git a/libs/langchain/tests/integration_tests/cache/test_upstash_redis_cache.py b/libs/langchain/tests/integration_tests/cache/test_upstash_redis_cache.py index 6cd81eb0662..8b1b0d4dcfd 100644 --- a/libs/langchain/tests/integration_tests/cache/test_upstash_redis_cache.py +++ b/libs/langchain/tests/integration_tests/cache/test_upstash_redis_cache.py @@ -86,6 +86,5 @@ def test_redis_cache_chat() -> None: llm = FakeChatModel() params = llm.dict() params["stop"] = None - with pytest.warns(): - llm.predict("foo") + llm.invoke("foo") langchain.llm_cache.redis.flushall()