docs: formatting cleanup (#32188)

* formatting cleaning
* make `init_chat_model` more prominent in list of guides
This commit is contained in:
Mason Daugherty 2025-07-22 15:46:15 -04:00 committed by GitHub
parent 0c4054a7fc
commit a02ad3d192
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 87 additions and 100 deletions

View File

@ -20,8 +20,7 @@ LangChain is a framework that consists of a number of packages.
This package contains base abstractions for different components and ways to compose them together.
The interfaces for core components like chat models, vector stores, tools and more are defined here.
No third-party integrations are defined here.
The dependencies are very lightweight.
**No third-party integrations are defined here.** The dependencies are kept purposefully very lightweight.
## langchain

View File

@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain>=0.2.8 langchain-openai langchain-anthropic langchain-google-vertexai"
"%pip install -qU langchain langchain-openai langchain-anthropic langchain-google-genai"
]
},
{
@ -38,7 +38,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 5,
"id": "79e14913-803c-4382-9009-5c6af3d75d35",
"metadata": {
"execution": {
@ -49,38 +49,15 @@
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/4j/2rz3865x6qg07tx43146py8h0000gn/T/ipykernel_95293/571506279.py:4: LangChainBetaWarning: The function `init_chat_model` is in beta. It is actively being worked on, so the API may change.\n",
" gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"GPT-4o: Im called ChatGPT. How can I assist you today?\n",
"\n",
"Claude Opus: My name is Claude. It's nice to meet you!\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Gemini 1.5: I am a large language model, trained by Google. \n",
"\n",
"I don't have a name like a person does. You can call me Bard if you like! 😊 \n",
"\n",
"Gemini 2.5: I do not have a name. I am a large language model, trained by Google.\n",
"\n"
]
}
@ -88,6 +65,10 @@
"source": [
"from langchain.chat_models import init_chat_model\n",
"\n",
"# Don't forget to set your environment variables for the API keys of the respective providers!\n",
"# For example, you can set them in your terminal or in a .env file:\n",
"# export OPENAI_API_KEY=\"your_openai_api_key\"\n",
"\n",
"# Returns a langchain_openai.ChatOpenAI instance.\n",
"gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)\n",
"# Returns a langchain_anthropic.ChatAnthropic instance.\n",
@ -96,13 +77,13 @@
")\n",
"# Returns a langchain_google_vertexai.ChatVertexAI instance.\n",
"gemini_15 = init_chat_model(\n",
" \"gemini-1.5-pro\", model_provider=\"google_vertexai\", temperature=0\n",
" \"gemini-2.5-pro\", model_provider=\"google_genai\", temperature=0\n",
")\n",
"\n",
"# Since all model integrations implement the ChatModel interface, you can use them in the same way.\n",
"print(\"GPT-4o: \" + gpt_4o.invoke(\"what's your name\").content + \"\\n\")\n",
"print(\"Claude Opus: \" + claude_opus.invoke(\"what's your name\").content + \"\\n\")\n",
"print(\"Gemini 1.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")"
"print(\"Gemini 2.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")"
]
},
{
@ -117,7 +98,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"id": "0378ccc6-95bc-4d50-be50-fccc193f0a71",
"metadata": {
"execution": {
@ -131,7 +112,7 @@
"source": [
"gpt_4o = init_chat_model(\"gpt-4o\", temperature=0)\n",
"claude_opus = init_chat_model(\"claude-3-opus-20240229\", temperature=0)\n",
"gemini_15 = init_chat_model(\"gemini-1.5-pro\", temperature=0)"
"gemini_15 = init_chat_model(\"gemini-2.5-pro\", temperature=0)"
]
},
{
@ -146,7 +127,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 7,
"id": "6c037f27-12d7-4e83-811e-4245c0e3ba58",
"metadata": {
"execution": {
@ -160,10 +141,10 @@
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 11, 'total_tokens': 34}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_25624ae3a5', 'finish_reason': 'stop', 'logprobs': None}, id='run-b41df187-4627-490d-af3c-1c96282d3eb0-0', usage_metadata={'input_tokens': 11, 'output_tokens': 23, 'total_tokens': 34})"
"AIMessage(content='Im called ChatGPT. How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 11, 'total_tokens': 24, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-2024-08-06', 'system_fingerprint': 'fp_07871e2ad8', 'id': 'chatcmpl-BwCyyBpMqn96KED6zPhLm4k9SQMiQ', 'service_tier': 'default', 'finish_reason': 'stop', 'logprobs': None}, id='run--fada10c3-4128-406c-b83d-a850d16b365f-0', usage_metadata={'input_tokens': 11, 'output_tokens': 13, 'total_tokens': 24, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})"
]
},
"execution_count": 4,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@ -178,7 +159,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 8,
"id": "321e3036-abd2-4e1f-bcc6-606efd036954",
"metadata": {
"execution": {
@ -192,10 +173,10 @@
{
"data": {
"text/plain": [
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", additional_kwargs={}, response_metadata={'id': 'msg_01Fx9P74A7syoFkwE73CdMMY', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-a0fd2bbd-3b7e-46bf-8d69-a48c7e60b03c-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", additional_kwargs={}, response_metadata={'id': 'msg_01VDGrG9D6yefanbBG9zPJrc', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 11, 'output_tokens': 15, 'server_tool_use': None, 'service_tier': 'standard'}, 'model_name': 'claude-3-5-sonnet-20240620'}, id='run--f0156087-debf-4b4b-9aaa-f3328a81ef92-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26, 'input_token_details': {'cache_read': 0, 'cache_creation': 0}})"
]
},
"execution_count": 5,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@ -394,9 +375,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-2",
"display_name": "langchain",
"language": "python",
"name": "poetry-venv-2"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
@ -408,7 +389,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.10.16"
}
},
"nbformat": 4,

View File

@ -34,6 +34,8 @@ These are the core building blocks you can use when building applications.
[Chat Models](/docs/concepts/chat_models) are newer forms of language models that take messages in and output a message.
See [supported integrations](/docs/integrations/chat/) for details on getting started with chat models from a specific provider.
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
- [How to: work with local models](/docs/how_to/local_llms)
- [How to: do function/tool calling](/docs/how_to/tool_calling)
- [How to: get models to return structured output](/docs/how_to/structured_output)
- [How to: cache model responses](/docs/how_to/chat_model_caching)
@ -48,8 +50,6 @@ See [supported integrations](/docs/integrations/chat/) for details on getting st
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific)
- [How to: force a specific tool call](/docs/how_to/tool_choice)
- [How to: work with local models](/docs/how_to/local_llms)
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
- [How to: pass multimodal data directly to models](/docs/how_to/multimodal_inputs/)
### Messages

View File

@ -13,15 +13,15 @@
"\n",
"This has at least two important benefits:\n",
"\n",
"1. `Privacy`: Your data is not sent to a third party, and it is not subject to the terms of service of a commercial service\n",
"2. `Cost`: There is no inference fee, which is important for token-intensive applications (e.g., [long-running simulations](https://twitter.com/RLanceMartin/status/1691097659262820352?s=20), summarization)\n",
"1. **Privacy**: Your data is not sent to a third party, and it is not subject to the terms of service of a commercial service\n",
"2. **Cost**: There is no inference fee, which is important for token-intensive applications (e.g., [long-running simulations](https://twitter.com/RLanceMartin/status/1691097659262820352?s=20), summarization)\n",
"\n",
"## Overview\n",
"\n",
"Running an LLM locally requires a few things:\n",
"\n",
"1. `Open-source LLM`: An open-source LLM that can be freely modified and shared \n",
"2. `Inference`: Ability to run this LLM on your device w/ acceptable latency\n",
"1. **Open-source LLM**: An open-source LLM that can be freely modified and shared \n",
"2. **Inference**: Ability to run this LLM on your device w/ acceptable latency\n",
"\n",
"### Open-source LLMs\n",
"\n",
@ -29,8 +29,8 @@
"\n",
"These LLMs can be assessed across at least two dimensions (see figure):\n",
" \n",
"1. `Base model`: What is the base-model and how was it trained?\n",
"2. `Fine-tuning approach`: Was the base-model fine-tuned and, if so, what [set of instructions](https://cameronrwolfe.substack.com/p/beyond-llama-the-power-of-open-llms#%C2%A7alpaca-an-instruction-following-llama-model) was used?\n",
"1. **Base model**: What is the base-model and how was it trained?\n",
"2. **Fine-tuning approach**: Was the base-model fine-tuned and, if so, what [set of instructions](https://cameronrwolfe.substack.com/p/beyond-llama-the-power-of-open-llms#%C2%A7alpaca-an-instruction-following-llama-model) was used?\n",
"\n",
"![Image description](../../static/img/OSS_LLM_overview.png)\n",
"\n",
@ -51,8 +51,8 @@
"\n",
"In general, these frameworks will do a few things:\n",
"\n",
"1. `Quantization`: Reduce the memory footprint of the raw model weights\n",
"2. `Efficient implementation for inference`: Support inference on consumer hardware (e.g., CPU or laptop GPU)\n",
"1. **Quantization**: Reduce the memory footprint of the raw model weights\n",
"2. **Efficient implementation for inference**: Support inference on consumer hardware (e.g., CPU or laptop GPU)\n",
"\n",
"In particular, see [this excellent post](https://finbarr.ca/how-is-llama-cpp-possible/) on the importance of quantization.\n",
"\n",
@ -679,11 +679,17 @@
"\n",
"In general, use cases for local LLMs can be driven by at least two factors:\n",
"\n",
"* `Privacy`: private data (e.g., journals, etc) that a user does not want to share \n",
"* `Cost`: text preprocessing (extraction/tagging), summarization, and agent simulations are token-use-intensive tasks\n",
"* **Privacy**: private data (e.g., journals, etc) that a user does not want to share \n",
"* **Cost**: text preprocessing (extraction/tagging), summarization, and agent simulations are token-use-intensive tasks\n",
"\n",
"In addition, [here](https://blog.langchain.dev/using-langsmith-to-support-fine-tuning-of-open-source-llms/) is an overview on fine-tuning, which can utilize open-source LLMs."
]
},
{
"cell_type": "markdown",
"id": "14c2c170",
"metadata": {},
"source": []
}
],
"metadata": {

View File

@ -4,7 +4,7 @@ The interfaces for core components like chat models, LLMs, vector stores, retrie
and more are defined here. The universal invocation protocol (Runnables) along with
a syntax for combining components (LangChain Expression Language) are also defined here.
No third-party integrations are defined here. The dependencies are kept purposefully
**No third-party integrations are defined here.** The dependencies are kept purposefully
very lightweight.
"""

View File

@ -86,59 +86,60 @@ def init_chat_model(
config_prefix: Optional[str] = None,
**kwargs: Any,
) -> Union[BaseChatModel, _ConfigurableModel]:
"""Initialize a ChatModel from the model name and provider.
"""Initialize a ChatModel in a single line using the model's name and provider.
**Note:** Must have the integration package corresponding to the model provider
installed.
.. note::
Must have the integration package corresponding to the model provider installed.
You should look at the `provider integration's API reference <https://python.langchain.com/api_reference/reference.html#integrations>`__
to see what parameters are supported by the model.
Args:
model: The name of the model, e.g. "o3-mini", "claude-3-5-sonnet-latest". You can
model: The name of the model, e.g. ``'o3-mini'``, ``'claude-3-5-sonnet-latest'``. You can
also specify model and model provider in a single argument using
'{model_provider}:{model}' format, e.g. "openai:o1".
``'{model_provider}:{model}'`` format, e.g. ``'openai:o1'``.
model_provider: The model provider if not specified as part of model arg (see
above). Supported model_provider values and the corresponding integration
package are:
- 'openai' -> langchain-openai
- 'anthropic' -> langchain-anthropic
- 'azure_openai' -> langchain-openai
- 'azure_ai' -> langchain-azure-ai
- 'google_vertexai' -> langchain-google-vertexai
- 'google_genai' -> langchain-google-genai
- 'bedrock' -> langchain-aws
- 'bedrock_converse' -> langchain-aws
- 'cohere' -> langchain-cohere
- 'fireworks' -> langchain-fireworks
- 'together' -> langchain-together
- 'mistralai' -> langchain-mistralai
- 'huggingface' -> langchain-huggingface
- 'groq' -> langchain-groq
- 'ollama' -> langchain-ollama
- 'google_anthropic_vertex' -> langchain-google-vertexai
- 'deepseek' -> langchain-deepseek
- 'ibm' -> langchain-ibm
- 'nvidia' -> langchain-nvidia-ai-endpoints
- 'xai' -> langchain-xai
- 'perplexity' -> langchain-perplexity
- ``openai`` -> ``langchain-openai``
- ``anthropic`` -> ``langchain-anthropic``
- ``azure_openai`` -> ``langchain-openai``
- ``azure_ai`` -> ``langchain-azure-ai``
- ``google_vertexai`` -> ``langchain-google-vertexai``
- ``google_genai`` -> ``langchain-google-genai``
- ``bedrock`` -> ``langchain-aws``
- ``bedrock_converse`` -> ``langchain-aws``
- ``cohere`` -> ``langchain-cohere``
- ``fireworks`` -> ``langchain-fireworks``
- ``together`` -> ``langchain-together``
- ``mistralai`` -> ``langchain-mistralai``
- ``huggingface`` -> ``langchain-huggingface``
- ``groq`` -> ``langchain-groq``
- ``ollama`` -> ``langchain-ollama``
- ``google_anthropic_vertex`` -> ``langchain-google-vertexai``
- ``deepseek`` -> ``langchain-deepseek``
- ``ibm`` -> ``langchain-ibm``
- ``nvidia`` -> ``langchain-nvidia-ai-endpoints``
- ``xai`` -> ``langchain-xai``
- ``perplexity`` -> ``langchain-perplexity``
Will attempt to infer model_provider from model if not specified. The
following providers will be inferred based on these model prefixes:
- 'gpt-3...' | 'gpt-4...' | 'o1...' -> 'openai'
- 'claude...' -> 'anthropic'
- 'amazon....' -> 'bedrock'
- 'gemini...' -> 'google_vertexai'
- 'command...' -> 'cohere'
- 'accounts/fireworks...' -> 'fireworks'
- 'mistral...' -> 'mistralai'
- 'deepseek...' -> 'deepseek'
- 'grok...' -> 'xai'
- 'sonar...' -> 'perplexity'
configurable_fields: Which model parameters are
configurable:
- ``gpt-3...`` | ``gpt-4...`` | ``o1...`` -> ``openai``
- ``claude...`` -> ``anthropic``
- ``amazon...`` -> ``bedrock``
- ``gemini...`` -> ``google_vertexai``
- ``command...`` -> ``cohere``
- ``accounts/fireworks...`` -> ``fireworks``
- ``mistral...`` -> ``mistralai``
- ``deepseek...`` -> ``deepseek``
- ``grok...`` -> ``xai``
- ``sonar...`` -> ``perplexity``
configurable_fields: Which model parameters are configurable:
- None: No configurable fields.
- "any": All fields are configurable. *See Security Note below.*
- ``'any'``: All fields are configurable. **See Security Note below.**
- Union[List[str], Tuple[str, ...]]: Specified fields are configurable.
Fields are assumed to have config_prefix stripped if there is a
@ -146,15 +147,15 @@ def init_chat_model(
not specified, then defaults to ``("model", "model_provider")``.
***Security Note***: Setting ``configurable_fields="any"`` means fields like
api_key, base_url, etc. can be altered at runtime, potentially redirecting
``api_key``, ``base_url``, etc. can be altered at runtime, potentially redirecting
model requests to a different service/user. Make sure that if you're
accepting untrusted configurations that you enumerate the
``configurable_fields=(...)`` explicitly.
config_prefix: If config_prefix is a non-empty string then model will be
config_prefix: If ``'config_prefix'`` is a non-empty string then model will be
configurable at runtime via the
``config["configurable"]["{config_prefix}_{param}"]`` keys. If
config_prefix is an empty string then model will be configurable via
``'config_prefix'`` is an empty string then model will be configurable via
``config["configurable"]["{param}"]``.
temperature: Model temperature.
max_tokens: Max output tokens.

View File

@ -1,7 +1,7 @@
"""**Embedding models** are wrappers around embedding models
from different APIs and services.
**Embedding models** can be LLMs or not.
Embedding models can be LLMs or not.
**Class hierarchy:**

View File

@ -1,4 +1,4 @@
"""Interface with the LangChain Hub."""
"""Interface with the `LangChain Hub <https://smith.langchain.com/hub>`__."""
from __future__ import annotations