mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-22 23:00:00 +00:00
feat: add support for IBM WatsonX AI chat models (#29688)
**Description:** Updated init_chat_model to support Granite models deployed on IBM WatsonX **Dependencies:** [langchain-ibm](https://github.com/langchain-ai/langchain-ibm) Tagging @baskaryan @efriis for review when you get a chance.
This commit is contained in:
parent
c7d74eb7a3
commit
994c5465e0
@ -118,6 +118,7 @@ def init_chat_model(
|
|||||||
- 'ollama' -> langchain-ollama
|
- 'ollama' -> langchain-ollama
|
||||||
- 'google_anthropic_vertex' -> langchain-google-vertexai
|
- 'google_anthropic_vertex' -> langchain-google-vertexai
|
||||||
- 'deepseek' -> langchain-deepseek
|
- 'deepseek' -> langchain-deepseek
|
||||||
|
- 'ibm' -> langchain-ibm
|
||||||
- 'nvidia' -> langchain-nvidia-ai-endpoints
|
- 'nvidia' -> langchain-nvidia-ai-endpoints
|
||||||
|
|
||||||
Will attempt to infer model_provider from model if not specified. The
|
Will attempt to infer model_provider from model if not specified. The
|
||||||
@ -428,6 +429,11 @@ def _init_chat_model_helper(
|
|||||||
from langchain_nvidia_ai_endpoints import ChatNVIDIA
|
from langchain_nvidia_ai_endpoints import ChatNVIDIA
|
||||||
|
|
||||||
return ChatNVIDIA(model=model, **kwargs)
|
return ChatNVIDIA(model=model, **kwargs)
|
||||||
|
elif model_provider == "ibm":
|
||||||
|
_check_pkg("langchain_ibm")
|
||||||
|
from langchain_ibm import ChatWatsonx
|
||||||
|
|
||||||
|
return ChatWatsonx(model_id=model, **kwargs)
|
||||||
else:
|
else:
|
||||||
supported = ", ".join(_SUPPORTED_PROVIDERS)
|
supported = ", ".join(_SUPPORTED_PROVIDERS)
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@ -453,6 +459,7 @@ _SUPPORTED_PROVIDERS = {
|
|||||||
"bedrock_converse",
|
"bedrock_converse",
|
||||||
"google_anthropic_vertex",
|
"google_anthropic_vertex",
|
||||||
"deepseek",
|
"deepseek",
|
||||||
|
"ibm",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user