From b7bbfc7c67f2c05d1a980ef4b600388cbe037efe Mon Sep 17 00:00:00 2001 From: ccurme Date: Tue, 30 Jul 2024 10:23:36 -0400 Subject: [PATCH] langchain: revert "init_chat_model() to support ChatOllama from langchain-ollama" (#24819) Reverts langchain-ai/langchain#24818 Overlooked discussion in https://github.com/langchain-ai/langchain/pull/24801. --- libs/langchain/langchain/chat_models/base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/langchain/langchain/chat_models/base.py b/libs/langchain/langchain/chat_models/base.py index 9f90a3365ca..9a3f8b7151a 100644 --- a/libs/langchain/langchain/chat_models/base.py +++ b/libs/langchain/langchain/chat_models/base.py @@ -118,7 +118,7 @@ def init_chat_model( - mistralai (langchain-mistralai) - huggingface (langchain-huggingface) - groq (langchain-groq) - - ollama (langchain-ollama) + - ollama (langchain-community) Will attempt to infer model_provider from model if not specified. The following providers will be inferred based on these model prefixes: @@ -336,8 +336,8 @@ def _init_chat_model_helper( return ChatFireworks(model=model, **kwargs) elif model_provider == "ollama": - _check_pkg("langchain_ollama") - from langchain_ollama import ChatOllama + _check_pkg("langchain_community") + from langchain_community.chat_models import ChatOllama return ChatOllama(model=model, **kwargs) elif model_provider == "together":