From 5abfc85fec40641a25241ce81906062cd1e30e6d Mon Sep 17 00:00:00 2001 From: Jerron Lim Date: Tue, 30 Jul 2024 22:17:38 +0800 Subject: [PATCH] langchain: init_chat_model() to support ChatOllama from langchain-ollama (#24818) Description: Since moving away from `langchain-community` is recommended, `init_chat_models()` should import ChatOllama from `langchain-ollama` instead. --- libs/langchain/langchain/chat_models/base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/langchain/langchain/chat_models/base.py b/libs/langchain/langchain/chat_models/base.py index 9a3f8b7151a..9f90a3365ca 100644 --- a/libs/langchain/langchain/chat_models/base.py +++ b/libs/langchain/langchain/chat_models/base.py @@ -118,7 +118,7 @@ def init_chat_model( - mistralai (langchain-mistralai) - huggingface (langchain-huggingface) - groq (langchain-groq) - - ollama (langchain-community) + - ollama (langchain-ollama) Will attempt to infer model_provider from model if not specified. The following providers will be inferred based on these model prefixes: @@ -336,8 +336,8 @@ def _init_chat_model_helper( return ChatFireworks(model=model, **kwargs) elif model_provider == "ollama": - _check_pkg("langchain_community") - from langchain_community.chat_models import ChatOllama + _check_pkg("langchain_ollama") + from langchain_ollama import ChatOllama return ChatOllama(model=model, **kwargs) elif model_provider == "together":