From eb9eddae0cb3658e273d7a639bb76b23b2682afb Mon Sep 17 00:00:00 2001 From: Erick Friis Date: Fri, 7 Feb 2025 12:39:27 -0800 Subject: [PATCH] docs: use init_chat_model (#29623) --- docs/docs/how_to/agent_executor.ipynb | 2 +- docs/docs/how_to/extraction_examples.ipynb | 2 +- docs/docs/how_to/extraction_long_text.ipynb | 2 +- docs/docs/how_to/function_calling.ipynb | 2 +- docs/docs/how_to/tool_calling.ipynb | 7 +- .../how_to/tool_results_pass_to_model.ipynb | 2 +- docs/docs/how_to/tool_runtime.ipynb | 2 +- docs/docs/how_to/tools_prompting.ipynb | 2 +- docs/docs/integrations/chat/index.mdx | 2 +- docs/docs/integrations/providers/falkordb.mdx | 34 --- docs/docs/tutorials/agents.ipynb | 2 +- docs/docs/tutorials/chatbot.ipynb | 6 +- docs/docs/tutorials/llm_chain.ipynb | 2 +- .../migrating_memory/chat_history.ipynb | 2 +- docs/src/theme/ChatModelTabs.js | 205 ++++++------------ libs/langchain/langchain/chat_models/base.py | 7 + 16 files changed, 95 insertions(+), 186 deletions(-) delete mode 100644 docs/docs/integrations/providers/falkordb.mdx diff --git a/docs/docs/how_to/agent_executor.ipynb b/docs/docs/how_to/agent_executor.ipynb index 65aa36898b2..8119cf26a01 100644 --- a/docs/docs/how_to/agent_executor.ipynb +++ b/docs/docs/how_to/agent_executor.ipynb @@ -270,7 +270,7 @@ "\n", "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", - "\n" + "\n" ] }, { diff --git a/docs/docs/how_to/extraction_examples.ipynb b/docs/docs/how_to/extraction_examples.ipynb index ff565fb35a2..06f03e6c070 100644 --- a/docs/docs/how_to/extraction_examples.ipynb +++ b/docs/docs/how_to/extraction_examples.ipynb @@ -354,7 +354,7 @@ "\n", "\n" ] }, diff --git a/docs/docs/how_to/extraction_long_text.ipynb b/docs/docs/how_to/extraction_long_text.ipynb index 2f1c38abfec..c7ee5678bca 100644 --- a/docs/docs/how_to/extraction_long_text.ipynb +++ b/docs/docs/how_to/extraction_long_text.ipynb @@ -179,7 +179,7 @@ "\n", "\n" ] }, diff --git a/docs/docs/how_to/function_calling.ipynb b/docs/docs/how_to/function_calling.ipynb index 042b40eae52..b039a214b92 100644 --- a/docs/docs/how_to/function_calling.ipynb +++ b/docs/docs/how_to/function_calling.ipynb @@ -167,7 +167,7 @@ "\n", "\n", "\n", "We can use the `bind_tools()` method to handle converting\n", diff --git a/docs/docs/how_to/tool_calling.ipynb b/docs/docs/how_to/tool_calling.ipynb index c827bf81958..dbff12bf4cd 100644 --- a/docs/docs/how_to/tool_calling.ipynb +++ b/docs/docs/how_to/tool_calling.ipynb @@ -200,7 +200,12 @@ "\n", "\n" ] }, diff --git a/docs/docs/how_to/tool_results_pass_to_model.ipynb b/docs/docs/how_to/tool_results_pass_to_model.ipynb index 4263b08749f..c7e4f168f5c 100644 --- a/docs/docs/how_to/tool_results_pass_to_model.ipynb +++ b/docs/docs/how_to/tool_results_pass_to_model.ipynb @@ -33,7 +33,7 @@ "\n", "\n" ] }, diff --git a/docs/docs/how_to/tool_runtime.ipynb b/docs/docs/how_to/tool_runtime.ipynb index 3356aaa059f..cc4477e4bb1 100644 --- a/docs/docs/how_to/tool_runtime.ipynb +++ b/docs/docs/how_to/tool_runtime.ipynb @@ -46,7 +46,7 @@ "\n", "\n" ] }, diff --git a/docs/docs/how_to/tools_prompting.ipynb b/docs/docs/how_to/tools_prompting.ipynb index e570077db57..6ca054f151d 100644 --- a/docs/docs/how_to/tools_prompting.ipynb +++ b/docs/docs/how_to/tools_prompting.ipynb @@ -91,7 +91,7 @@ "\n", "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", - "\n", + "\n", "\n", "To illustrate the idea, we'll use `phi3` via Ollama, which does **NOT** have native support for tool calling. If you'd like to use `Ollama` as well follow [these instructions](/docs/integrations/chat/ollama/)." ] diff --git a/docs/docs/integrations/chat/index.mdx b/docs/docs/integrations/chat/index.mdx index 537cdc2c11a..fb43dcdc89b 100644 --- a/docs/docs/integrations/chat/index.mdx +++ b/docs/docs/integrations/chat/index.mdx @@ -17,7 +17,7 @@ If you'd like to contribute an integration, see [Contributing integrations](/doc import ChatModelTabs from "@theme/ChatModelTabs"; - + ```python model.invoke("Hello, world!") diff --git a/docs/docs/integrations/providers/falkordb.mdx b/docs/docs/integrations/providers/falkordb.mdx deleted file mode 100644 index 4cd2f9cdecb..00000000000 --- a/docs/docs/integrations/providers/falkordb.mdx +++ /dev/null @@ -1,34 +0,0 @@ -# FalkorDB - ->[FalkorDB](https://www.falkordb.com/) is a creator of the [FalkorDB](https://docs.falkordb.com/), -> a low-latency Graph Database that delivers knowledge to GenAI. - - -## Installation and Setup - -See [installation instructions here](/docs/integrations/graphs/falkordb/). - - -## Graphs - -See a [usage example](/docs/integrations/graphs/falkordb). - -```python -from langchain_community.graphs import FalkorDBGraph -``` - -## Chains - -See a [usage example](/docs/integrations/graphs/falkordb). - -```python -from langchain_community.chains.graph_qa.falkordb import FalkorDBQAChain -``` - -## Memory - -See a [usage example](/docs/integrations/memory/falkordb_chat_message_history). - -```python -from langchain_falkordb import FalkorDBChatMessageHistory -``` diff --git a/docs/docs/tutorials/agents.ipynb b/docs/docs/tutorials/agents.ipynb index 5b9e677459f..4b513b2f427 100644 --- a/docs/docs/tutorials/agents.ipynb +++ b/docs/docs/tutorials/agents.ipynb @@ -215,7 +215,7 @@ "\n", "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", - "\n" + "\n" ] }, { diff --git a/docs/docs/tutorials/chatbot.ipynb b/docs/docs/tutorials/chatbot.ipynb index bddd9d08b13..86020175f26 100644 --- a/docs/docs/tutorials/chatbot.ipynb +++ b/docs/docs/tutorials/chatbot.ipynb @@ -108,7 +108,7 @@ "\n", "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", - "\n" + "\n" ] }, { @@ -935,7 +935,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": ".venv", "language": "python", "name": "python3" }, @@ -949,7 +949,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.4" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/docs/docs/tutorials/llm_chain.ipynb b/docs/docs/tutorials/llm_chain.ipynb index c1b47f06071..0b9547568b2 100644 --- a/docs/docs/tutorials/llm_chain.ipynb +++ b/docs/docs/tutorials/llm_chain.ipynb @@ -91,7 +91,7 @@ "\n", "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", - "\n" + "\n" ] }, { diff --git a/docs/docs/versions/migrating_memory/chat_history.ipynb b/docs/docs/versions/migrating_memory/chat_history.ipynb index 164e897369f..0e052fd0a32 100644 --- a/docs/docs/versions/migrating_memory/chat_history.ipynb +++ b/docs/docs/versions/migrating_memory/chat_history.ipynb @@ -194,7 +194,7 @@ "id": "4c0766af-a3b3-4293-b253-3a10f365ab5d", "metadata": {}, "source": [ - ":::hint\n", + ":::tip\n", "\n", "This also supports streaming LLM content token by token if using langgraph >= 0.2.28.\n", ":::" diff --git a/docs/src/theme/ChatModelTabs.js b/docs/src/theme/ChatModelTabs.js index d2c8fc0ba2c..9c63ece47a9 100644 --- a/docs/src/theme/ChatModelTabs.js +++ b/docs/src/theme/ChatModelTabs.js @@ -91,29 +91,7 @@ export const CustomDropdown = ({ selectedOption, options, onSelect, modelType }) /** * @typedef {Object} ChatModelTabsProps - Component props. - * @property {string} [openaiParams] - Parameters for OpenAI chat model. Defaults to `model="gpt-3.5-turbo-0125"` - * @property {string} [anthropicParams] - Parameters for Anthropic chat model. Defaults to `model="claude-3-sonnet-20240229"` - * @property {string} [cohereParams] - Parameters for Cohere chat model. Defaults to `model="command-r-plus"` - * @property {string} [fireworksParams] - Parameters for Fireworks chat model. Defaults to `model="accounts/fireworks/models/mixtral-8x7b-instruct"` - * @property {string} [groqParams] - Parameters for Groq chat model. Defaults to `model="llama3-8b-8192"` - * @property {string} [mistralParams] - Parameters for Mistral chat model. Defaults to `model="mistral-large-latest"` - * @property {string} [googleParams] - Parameters for Google chat model. Defaults to `model="gemini-pro"` - * @property {string} [togetherParams] - Parameters for Together chat model. Defaults to `model="mistralai/Mixtral-8x7B-Instruct-v0.1"` - * @property {string} [nvidiaParams] - Parameters for Nvidia NIM model. Defaults to `model="meta/llama3-70b-instruct"` - * @property {string} [databricksParams] - Parameters for Databricks model. Defaults to `endpoint="databricks-meta-llama-3-1-70b-instruct"` - * @property {string} [awsBedrockParams] - Parameters for AWS Bedrock chat model. - * @property {boolean} [hideOpenai] - Whether or not to hide OpenAI chat model. - * @property {boolean} [hideAnthropic] - Whether or not to hide Anthropic chat model. - * @property {boolean} [hideCohere] - Whether or not to hide Cohere chat model. - * @property {boolean} [hideFireworks] - Whether or not to hide Fireworks chat model. - * @property {boolean} [hideGroq] - Whether or not to hide Groq chat model. - * @property {boolean} [hideMistral] - Whether or not to hide Mistral chat model. - * @property {boolean} [hideGoogle] - Whether or not to hide Google VertexAI chat model. - * @property {boolean} [hideTogether] - Whether or not to hide Together chat model. - * @property {boolean} [hideAzure] - Whether or not to hide Microsoft Azure OpenAI chat model. - * @property {boolean} [hideNvidia] - Whether or not to hide NVIDIA NIM model. - * @property {boolean} [hideAWS] - Whether or not to hide AWS models. - * @property {boolean} [hideDatabricks] - Whether or not to hide Databricks models. + * @property {Object} [overrideParams] - An object for overriding the default parameters for each chat model, e.g. `{ openai: { model: "gpt-4o-mini" } }` * @property {string} [customVarName] - Custom variable name for the model. Defaults to `model`. */ @@ -121,198 +99,151 @@ export const CustomDropdown = ({ selectedOption, options, onSelect, modelType }) * @param {ChatModelTabsProps} props - Component props. */ export default function ChatModelTabs(props) { - const [selectedModel, setSelectedModel] = useState("Groq"); + const [selectedModel, setSelectedModel] = useState("groq"); const { - openaiParams, - anthropicParams, - cohereParams, - fireworksParams, - groqParams, - mistralParams, - googleParams, - togetherParams, - azureParams, - nvidiaParams, - awsBedrockParams, - databricksParams, - hideOpenai, - hideAnthropic, - hideCohere, - hideFireworks, - hideGroq, - hideMistral, - hideGoogle, - hideTogether, - hideAzure, - hideNvidia, - hideAWS, - hideDatabricks, + overrideParams, customVarName, } = props; - const openAIParamsOrDefault = openaiParams ?? `model="gpt-4o-mini"`; - const anthropicParamsOrDefault = - anthropicParams ?? `model="claude-3-5-sonnet-20240620"`; - const cohereParamsOrDefault = cohereParams ?? `model="command-r-plus"`; - const fireworksParamsOrDefault = - fireworksParams ?? - `model="accounts/fireworks/models/llama-v3p1-70b-instruct"`; - const groqParamsOrDefault = groqParams ?? `model="llama3-8b-8192"`; - const mistralParamsOrDefault = - mistralParams ?? `model="mistral-large-latest"`; - const googleParamsOrDefault = googleParams ?? `model="gemini-1.5-flash"`; - const togetherParamsOrDefault = - togetherParams ?? - `\n base_url="https://api.together.xyz/v1",\n api_key=os.environ["TOGETHER_API_KEY"],\n model="mistralai/Mixtral-8x7B-Instruct-v0.1",\n`; - const azureParamsOrDefault = - azureParams ?? - `\n azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],\n azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],\n openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],\n`; - const nvidiaParamsOrDefault = nvidiaParams ?? `model="meta/llama3-70b-instruct"` - const awsBedrockParamsOrDefault = awsBedrockParams ?? `model="anthropic.claude-3-5-sonnet-20240620-v1:0",\n beta_use_converse_api=True`; - const databricksParamsOrDefault = databricksParams ?? `endpoint="databricks-meta-llama-3-1-70b-instruct"` - const llmVarName = customVarName ?? "model"; const tabItems = [ { - value: "Groq", + value: "groq", label: "Groq", - text: `from langchain_groq import ChatGroq\n\n${llmVarName} = ChatGroq(${groqParamsOrDefault})`, + model: "llama3-8b-8192", apiKeyName: "GROQ_API_KEY", packageName: "langchain-groq", - shouldHide: hideGroq, }, { - value: "OpenAI", + value: "openai", label: "OpenAI", - text: `from langchain_openai import ChatOpenAI\n\n${llmVarName} = ChatOpenAI(${openAIParamsOrDefault})`, + model: "gpt-4o-mini", apiKeyName: "OPENAI_API_KEY", packageName: "langchain-openai", - shouldHide: hideOpenai, }, { - value: "Anthropic", + value: "anthropic", label: "Anthropic", - text: `from langchain_anthropic import ChatAnthropic\n\n${llmVarName} = ChatAnthropic(${anthropicParamsOrDefault})`, + model: "claude-3-5-sonnet-latest", apiKeyName: "ANTHROPIC_API_KEY", packageName: "langchain-anthropic", - shouldHide: hideAnthropic, }, { - value: "Azure", + value: "azure", label: "Azure", - text: `from langchain_openai import AzureChatOpenAI\n\n${llmVarName} = AzureChatOpenAI(${azureParamsOrDefault})`, + text: `from langchain_openai import AzureChatOpenAI + +${llmVarName} = AzureChatOpenAI( + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], + azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], + openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"], +)`, apiKeyName: "AZURE_OPENAI_API_KEY", packageName: "langchain-openai", - shouldHide: hideAzure, }, { - value: "Google", - label: "Google", - text: `from langchain_google_vertexai import ChatVertexAI\n\n${llmVarName} = ChatVertexAI(${googleParamsOrDefault})`, + value: "google_vertexai", + label: "Google Vertex", + model: "gemini-2.0-flash", apiKeyText: "# Ensure your VertexAI credentials are configured", packageName: "langchain-google-vertexai", - shouldHide: hideGoogle, }, { - value: "AWS", + value: "aws", label: "AWS", - text: `from langchain_aws import ChatBedrock\n\n${llmVarName} = ChatBedrock(${awsBedrockParamsOrDefault})`, + model: "anthropic.claude-3-5-sonnet-20240620-v1:0", + kwargs: "beta_use_converse_api=True", apiKeyText: "# Ensure your AWS credentials are configured", packageName: "langchain-aws", - shouldHide: hideAWS, }, { - value: "Cohere", + value: "cohere", label: "Cohere", - text: `from langchain_cohere import ChatCohere\n\n${llmVarName} = ChatCohere(${cohereParamsOrDefault})`, + model: "command-r-plus", apiKeyName: "COHERE_API_KEY", packageName: "langchain-cohere", - shouldHide: hideCohere, }, { - value: "NVIDIA", + value: "nvidia", label: "NVIDIA", - text: `from langchain_nvidia_ai_endpoints import ChatNVIDIA\n\n${llmVarName} = ChatNVIDIA(${nvidiaParamsOrDefault})`, + model: "meta/llama3-70b-instruct", apiKeyName: "NVIDIA_API_KEY", packageName: "langchain-nvidia-ai-endpoints", - shouldHide: hideNvidia, }, { - value: "FireworksAI", + value: "fireworks", label: "Fireworks AI", - text: `from langchain_fireworks import ChatFireworks\n\n${llmVarName} = ChatFireworks(${fireworksParamsOrDefault})`, + model: "accounts/fireworks/models/llama-v3p1-70b-instruct", apiKeyName: "FIREWORKS_API_KEY", packageName: "langchain-fireworks", - shouldHide: hideFireworks, }, { - value: "MistralAI", + value: "mistralai", label: "Mistral AI", - text: `from langchain_mistralai import ChatMistralAI\n\n${llmVarName} = ChatMistralAI(${mistralParamsOrDefault})`, + model: "mistral-large-latest", apiKeyName: "MISTRAL_API_KEY", packageName: "langchain-mistralai", - shouldHide: hideMistral, }, { - value: "TogetherAI", + value: "together", label: "Together AI", - text: `from langchain_openai import ChatOpenAI\n\n${llmVarName} = ChatOpenAI(${togetherParamsOrDefault})`, + model: "mistralai/Mixtral-8x7B-Instruct-v0.1", apiKeyName: "TOGETHER_API_KEY", - packageName: "langchain-openai", - shouldHide: hideTogether, + packageName: "langchain-together", }, { - value: "Databricks", + value: "databricks", label: "Databricks", - text: `from databricks_langchain import ChatDatabricks\n\nos.environ["DATABRICKS_HOST"] = "https://example.staging.cloud.databricks.com/serving-endpoints"\n\n${llmVarName} = ChatDatabricks(${databricksParamsOrDefault})`, + text: `from databricks_langchain import ChatDatabricks\n\nos.environ["DATABRICKS_HOST"] = "https://example.staging.cloud.databricks.com/serving-endpoints"\n\n${llmVarName} = ChatDatabricks(endpoint="databricks-meta-llama-3-1-70b-instruct")`, apiKeyName: "DATABRICKS_TOKEN", packageName: "databricks-langchain", - shouldHide: hideDatabricks, }, - ]; + ].map((item) => ({ + ...item, + ...overrideParams?.[item.value], + })); const modelOptions = tabItems - .filter((item) => !item.shouldHide) .map((item) => ({ value: item.value, label: item.label, - text: item.text, - apiKeyName: item.apiKeyName, - apiKeyText: item.apiKeyText, - packageName: item.packageName, })); -const selectedOption = modelOptions.find( - (option) => option.value === selectedModel -); + const selectedTabItem = tabItems.find( + (option) => option.value === selectedModel + ); let apiKeyText = ""; -if (selectedOption.apiKeyName) { +if (selectedTabItem.apiKeyName) { apiKeyText = `import getpass import os -if not os.environ.get("${selectedOption.apiKeyName}"): - os.environ["${selectedOption.apiKeyName}"] = getpass.getpass("Enter API key for ${selectedOption.label}: ")`; - } else if (selectedOption.apiKeyText) { - apiKeyText = selectedOption.apiKeyText; +if not os.environ.get("${selectedTabItem.apiKeyName}"): + os.environ["${selectedTabItem.apiKeyName}"] = getpass.getpass("Enter API key for ${selectedTabItem.label}: ")`; + } else if (selectedTabItem.apiKeyText) { + apiKeyText = selectedTabItem.apiKeyText; } -return ( -
- + const initModelText = selectedTabItem?.text || `from langchain.chat_models import init_chat_model - - {`pip install -qU ${selectedOption.packageName}`} - - - {apiKeyText ? apiKeyText + "\n\n" + selectedOption.text : selectedOption.text} - -
-); +${llmVarName} = init_chat_model("${selectedTabItem.model}", *, model_provider="${selectedTabItem.value}"${selectedTabItem?.kwargs ? `, ${selectedTabItem.kwargs}` : ""})`; + + return ( +
+ + + + {`pip install -qU langchain ${selectedTabItem.packageName}`} + + + {apiKeyText ? apiKeyText + "\n\n" + initModelText : initModelText} + +
+ ); } \ No newline at end of file diff --git a/libs/langchain/langchain/chat_models/base.py b/libs/langchain/langchain/chat_models/base.py index b6c52312bb5..8799f627e0b 100644 --- a/libs/langchain/langchain/chat_models/base.py +++ b/libs/langchain/langchain/chat_models/base.py @@ -117,6 +117,8 @@ def init_chat_model( - 'groq' -> langchain-groq - 'ollama' -> langchain-ollama - 'google_anthropic_vertex' -> langchain-google-vertexai + - 'deepseek' -> langchain-deepseek + - 'nvidia' -> langchain-nvidia-ai-endpoints Will attempt to infer model_provider from model if not specified. The following providers will be inferred based on these model prefixes: @@ -421,6 +423,11 @@ def _init_chat_model_helper( from langchain_deepseek import ChatDeepSeek return ChatDeepSeek(model=model, **kwargs) + elif model_provider == "nvidia": + _check_pkg("langchain_nvidia_ai_endpoints") + from langchain_nvidia_ai_endpoints import ChatNVIDIA + + return ChatNVIDIA(model=model, **kwargs) else: supported = ", ".join(_SUPPORTED_PROVIDERS) raise ValueError(