mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-03 02:06:33 +00:00
docs: formatting cleanup (#32188)
* formatting cleaning * make `init_chat_model` more prominent in list of guides
This commit is contained in:
parent
0c4054a7fc
commit
a02ad3d192
@ -20,8 +20,7 @@ LangChain is a framework that consists of a number of packages.
|
|||||||
|
|
||||||
This package contains base abstractions for different components and ways to compose them together.
|
This package contains base abstractions for different components and ways to compose them together.
|
||||||
The interfaces for core components like chat models, vector stores, tools and more are defined here.
|
The interfaces for core components like chat models, vector stores, tools and more are defined here.
|
||||||
No third-party integrations are defined here.
|
**No third-party integrations are defined here.** The dependencies are kept purposefully very lightweight.
|
||||||
The dependencies are very lightweight.
|
|
||||||
|
|
||||||
## langchain
|
## langchain
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%pip install -qU langchain>=0.2.8 langchain-openai langchain-anthropic langchain-google-vertexai"
|
"%pip install -qU langchain langchain-openai langchain-anthropic langchain-google-genai"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -38,7 +38,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": 5,
|
||||||
"id": "79e14913-803c-4382-9009-5c6af3d75d35",
|
"id": "79e14913-803c-4382-9009-5c6af3d75d35",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"execution": {
|
"execution": {
|
||||||
@ -49,38 +49,15 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"/var/folders/4j/2rz3865x6qg07tx43146py8h0000gn/T/ipykernel_95293/571506279.py:4: LangChainBetaWarning: The function `init_chat_model` is in beta. It is actively being worked on, so the API may change.\n",
|
|
||||||
" gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\n",
|
|
||||||
"\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
|
"GPT-4o: I’m called ChatGPT. How can I assist you today?\n",
|
||||||
|
"\n",
|
||||||
"Claude Opus: My name is Claude. It's nice to meet you!\n",
|
"Claude Opus: My name is Claude. It's nice to meet you!\n",
|
||||||
"\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"Gemini 1.5: I am a large language model, trained by Google. \n",
|
|
||||||
"\n",
|
|
||||||
"I don't have a name like a person does. You can call me Bard if you like! 😊 \n",
|
|
||||||
"\n",
|
"\n",
|
||||||
|
"Gemini 2.5: I do not have a name. I am a large language model, trained by Google.\n",
|
||||||
"\n"
|
"\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@ -88,6 +65,10 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from langchain.chat_models import init_chat_model\n",
|
"from langchain.chat_models import init_chat_model\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"# Don't forget to set your environment variables for the API keys of the respective providers!\n",
|
||||||
|
"# For example, you can set them in your terminal or in a .env file:\n",
|
||||||
|
"# export OPENAI_API_KEY=\"your_openai_api_key\"\n",
|
||||||
|
"\n",
|
||||||
"# Returns a langchain_openai.ChatOpenAI instance.\n",
|
"# Returns a langchain_openai.ChatOpenAI instance.\n",
|
||||||
"gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)\n",
|
"gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)\n",
|
||||||
"# Returns a langchain_anthropic.ChatAnthropic instance.\n",
|
"# Returns a langchain_anthropic.ChatAnthropic instance.\n",
|
||||||
@ -96,13 +77,13 @@
|
|||||||
")\n",
|
")\n",
|
||||||
"# Returns a langchain_google_vertexai.ChatVertexAI instance.\n",
|
"# Returns a langchain_google_vertexai.ChatVertexAI instance.\n",
|
||||||
"gemini_15 = init_chat_model(\n",
|
"gemini_15 = init_chat_model(\n",
|
||||||
" \"gemini-1.5-pro\", model_provider=\"google_vertexai\", temperature=0\n",
|
" \"gemini-2.5-pro\", model_provider=\"google_genai\", temperature=0\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Since all model integrations implement the ChatModel interface, you can use them in the same way.\n",
|
"# Since all model integrations implement the ChatModel interface, you can use them in the same way.\n",
|
||||||
"print(\"GPT-4o: \" + gpt_4o.invoke(\"what's your name\").content + \"\\n\")\n",
|
"print(\"GPT-4o: \" + gpt_4o.invoke(\"what's your name\").content + \"\\n\")\n",
|
||||||
"print(\"Claude Opus: \" + claude_opus.invoke(\"what's your name\").content + \"\\n\")\n",
|
"print(\"Claude Opus: \" + claude_opus.invoke(\"what's your name\").content + \"\\n\")\n",
|
||||||
"print(\"Gemini 1.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")"
|
"print(\"Gemini 2.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -117,7 +98,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": null,
|
||||||
"id": "0378ccc6-95bc-4d50-be50-fccc193f0a71",
|
"id": "0378ccc6-95bc-4d50-be50-fccc193f0a71",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"execution": {
|
"execution": {
|
||||||
@ -131,7 +112,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"gpt_4o = init_chat_model(\"gpt-4o\", temperature=0)\n",
|
"gpt_4o = init_chat_model(\"gpt-4o\", temperature=0)\n",
|
||||||
"claude_opus = init_chat_model(\"claude-3-opus-20240229\", temperature=0)\n",
|
"claude_opus = init_chat_model(\"claude-3-opus-20240229\", temperature=0)\n",
|
||||||
"gemini_15 = init_chat_model(\"gemini-1.5-pro\", temperature=0)"
|
"gemini_15 = init_chat_model(\"gemini-2.5-pro\", temperature=0)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -146,7 +127,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": 7,
|
||||||
"id": "6c037f27-12d7-4e83-811e-4245c0e3ba58",
|
"id": "6c037f27-12d7-4e83-811e-4245c0e3ba58",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"execution": {
|
"execution": {
|
||||||
@ -160,10 +141,10 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"AIMessage(content=\"I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 11, 'total_tokens': 34}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_25624ae3a5', 'finish_reason': 'stop', 'logprobs': None}, id='run-b41df187-4627-490d-af3c-1c96282d3eb0-0', usage_metadata={'input_tokens': 11, 'output_tokens': 23, 'total_tokens': 34})"
|
"AIMessage(content='I’m called ChatGPT. How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 11, 'total_tokens': 24, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-2024-08-06', 'system_fingerprint': 'fp_07871e2ad8', 'id': 'chatcmpl-BwCyyBpMqn96KED6zPhLm4k9SQMiQ', 'service_tier': 'default', 'finish_reason': 'stop', 'logprobs': None}, id='run--fada10c3-4128-406c-b83d-a850d16b365f-0', usage_metadata={'input_tokens': 11, 'output_tokens': 13, 'total_tokens': 24, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 4,
|
"execution_count": 7,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"output_type": "execute_result"
|
"output_type": "execute_result"
|
||||||
}
|
}
|
||||||
@ -178,7 +159,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": 8,
|
||||||
"id": "321e3036-abd2-4e1f-bcc6-606efd036954",
|
"id": "321e3036-abd2-4e1f-bcc6-606efd036954",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"execution": {
|
"execution": {
|
||||||
@ -192,10 +173,10 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", additional_kwargs={}, response_metadata={'id': 'msg_01Fx9P74A7syoFkwE73CdMMY', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-a0fd2bbd-3b7e-46bf-8d69-a48c7e60b03c-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
|
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", additional_kwargs={}, response_metadata={'id': 'msg_01VDGrG9D6yefanbBG9zPJrc', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 11, 'output_tokens': 15, 'server_tool_use': None, 'service_tier': 'standard'}, 'model_name': 'claude-3-5-sonnet-20240620'}, id='run--f0156087-debf-4b4b-9aaa-f3328a81ef92-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26, 'input_token_details': {'cache_read': 0, 'cache_creation': 0}})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 5,
|
"execution_count": 8,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"output_type": "execute_result"
|
"output_type": "execute_result"
|
||||||
}
|
}
|
||||||
@ -394,9 +375,9 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "poetry-venv-2",
|
"display_name": "langchain",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "poetry-venv-2"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
@ -408,7 +389,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.9"
|
"version": "3.10.16"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -34,6 +34,8 @@ These are the core building blocks you can use when building applications.
|
|||||||
[Chat Models](/docs/concepts/chat_models) are newer forms of language models that take messages in and output a message.
|
[Chat Models](/docs/concepts/chat_models) are newer forms of language models that take messages in and output a message.
|
||||||
See [supported integrations](/docs/integrations/chat/) for details on getting started with chat models from a specific provider.
|
See [supported integrations](/docs/integrations/chat/) for details on getting started with chat models from a specific provider.
|
||||||
|
|
||||||
|
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
|
||||||
|
- [How to: work with local models](/docs/how_to/local_llms)
|
||||||
- [How to: do function/tool calling](/docs/how_to/tool_calling)
|
- [How to: do function/tool calling](/docs/how_to/tool_calling)
|
||||||
- [How to: get models to return structured output](/docs/how_to/structured_output)
|
- [How to: get models to return structured output](/docs/how_to/structured_output)
|
||||||
- [How to: cache model responses](/docs/how_to/chat_model_caching)
|
- [How to: cache model responses](/docs/how_to/chat_model_caching)
|
||||||
@ -48,8 +50,6 @@ See [supported integrations](/docs/integrations/chat/) for details on getting st
|
|||||||
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
|
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
|
||||||
- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific)
|
- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific)
|
||||||
- [How to: force a specific tool call](/docs/how_to/tool_choice)
|
- [How to: force a specific tool call](/docs/how_to/tool_choice)
|
||||||
- [How to: work with local models](/docs/how_to/local_llms)
|
|
||||||
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
|
|
||||||
- [How to: pass multimodal data directly to models](/docs/how_to/multimodal_inputs/)
|
- [How to: pass multimodal data directly to models](/docs/how_to/multimodal_inputs/)
|
||||||
|
|
||||||
### Messages
|
### Messages
|
||||||
|
@ -13,15 +13,15 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"This has at least two important benefits:\n",
|
"This has at least two important benefits:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"1. `Privacy`: Your data is not sent to a third party, and it is not subject to the terms of service of a commercial service\n",
|
"1. **Privacy**: Your data is not sent to a third party, and it is not subject to the terms of service of a commercial service\n",
|
||||||
"2. `Cost`: There is no inference fee, which is important for token-intensive applications (e.g., [long-running simulations](https://twitter.com/RLanceMartin/status/1691097659262820352?s=20), summarization)\n",
|
"2. **Cost**: There is no inference fee, which is important for token-intensive applications (e.g., [long-running simulations](https://twitter.com/RLanceMartin/status/1691097659262820352?s=20), summarization)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Overview\n",
|
"## Overview\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Running an LLM locally requires a few things:\n",
|
"Running an LLM locally requires a few things:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"1. `Open-source LLM`: An open-source LLM that can be freely modified and shared \n",
|
"1. **Open-source LLM**: An open-source LLM that can be freely modified and shared \n",
|
||||||
"2. `Inference`: Ability to run this LLM on your device w/ acceptable latency\n",
|
"2. **Inference**: Ability to run this LLM on your device w/ acceptable latency\n",
|
||||||
"\n",
|
"\n",
|
||||||
"### Open-source LLMs\n",
|
"### Open-source LLMs\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -29,8 +29,8 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"These LLMs can be assessed across at least two dimensions (see figure):\n",
|
"These LLMs can be assessed across at least two dimensions (see figure):\n",
|
||||||
" \n",
|
" \n",
|
||||||
"1. `Base model`: What is the base-model and how was it trained?\n",
|
"1. **Base model**: What is the base-model and how was it trained?\n",
|
||||||
"2. `Fine-tuning approach`: Was the base-model fine-tuned and, if so, what [set of instructions](https://cameronrwolfe.substack.com/p/beyond-llama-the-power-of-open-llms#%C2%A7alpaca-an-instruction-following-llama-model) was used?\n",
|
"2. **Fine-tuning approach**: Was the base-model fine-tuned and, if so, what [set of instructions](https://cameronrwolfe.substack.com/p/beyond-llama-the-power-of-open-llms#%C2%A7alpaca-an-instruction-following-llama-model) was used?\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -51,8 +51,8 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"In general, these frameworks will do a few things:\n",
|
"In general, these frameworks will do a few things:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"1. `Quantization`: Reduce the memory footprint of the raw model weights\n",
|
"1. **Quantization**: Reduce the memory footprint of the raw model weights\n",
|
||||||
"2. `Efficient implementation for inference`: Support inference on consumer hardware (e.g., CPU or laptop GPU)\n",
|
"2. **Efficient implementation for inference**: Support inference on consumer hardware (e.g., CPU or laptop GPU)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In particular, see [this excellent post](https://finbarr.ca/how-is-llama-cpp-possible/) on the importance of quantization.\n",
|
"In particular, see [this excellent post](https://finbarr.ca/how-is-llama-cpp-possible/) on the importance of quantization.\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -679,11 +679,17 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"In general, use cases for local LLMs can be driven by at least two factors:\n",
|
"In general, use cases for local LLMs can be driven by at least two factors:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"* `Privacy`: private data (e.g., journals, etc) that a user does not want to share \n",
|
"* **Privacy**: private data (e.g., journals, etc) that a user does not want to share \n",
|
||||||
"* `Cost`: text preprocessing (extraction/tagging), summarization, and agent simulations are token-use-intensive tasks\n",
|
"* **Cost**: text preprocessing (extraction/tagging), summarization, and agent simulations are token-use-intensive tasks\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In addition, [here](https://blog.langchain.dev/using-langsmith-to-support-fine-tuning-of-open-source-llms/) is an overview on fine-tuning, which can utilize open-source LLMs."
|
"In addition, [here](https://blog.langchain.dev/using-langsmith-to-support-fine-tuning-of-open-source-llms/) is an overview on fine-tuning, which can utilize open-source LLMs."
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "14c2c170",
|
||||||
|
"metadata": {},
|
||||||
|
"source": []
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
@ -4,7 +4,7 @@ The interfaces for core components like chat models, LLMs, vector stores, retrie
|
|||||||
and more are defined here. The universal invocation protocol (Runnables) along with
|
and more are defined here. The universal invocation protocol (Runnables) along with
|
||||||
a syntax for combining components (LangChain Expression Language) are also defined here.
|
a syntax for combining components (LangChain Expression Language) are also defined here.
|
||||||
|
|
||||||
No third-party integrations are defined here. The dependencies are kept purposefully
|
**No third-party integrations are defined here.** The dependencies are kept purposefully
|
||||||
very lightweight.
|
very lightweight.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -86,59 +86,60 @@ def init_chat_model(
|
|||||||
config_prefix: Optional[str] = None,
|
config_prefix: Optional[str] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Union[BaseChatModel, _ConfigurableModel]:
|
) -> Union[BaseChatModel, _ConfigurableModel]:
|
||||||
"""Initialize a ChatModel from the model name and provider.
|
"""Initialize a ChatModel in a single line using the model's name and provider.
|
||||||
|
|
||||||
**Note:** Must have the integration package corresponding to the model provider
|
.. note::
|
||||||
installed.
|
Must have the integration package corresponding to the model provider installed.
|
||||||
|
You should look at the `provider integration's API reference <https://python.langchain.com/api_reference/reference.html#integrations>`__
|
||||||
|
to see what parameters are supported by the model.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
model: The name of the model, e.g. "o3-mini", "claude-3-5-sonnet-latest". You can
|
model: The name of the model, e.g. ``'o3-mini'``, ``'claude-3-5-sonnet-latest'``. You can
|
||||||
also specify model and model provider in a single argument using
|
also specify model and model provider in a single argument using
|
||||||
'{model_provider}:{model}' format, e.g. "openai:o1".
|
``'{model_provider}:{model}'`` format, e.g. ``'openai:o1'``.
|
||||||
model_provider: The model provider if not specified as part of model arg (see
|
model_provider: The model provider if not specified as part of model arg (see
|
||||||
above). Supported model_provider values and the corresponding integration
|
above). Supported model_provider values and the corresponding integration
|
||||||
package are:
|
package are:
|
||||||
|
|
||||||
- 'openai' -> langchain-openai
|
- ``openai`` -> ``langchain-openai``
|
||||||
- 'anthropic' -> langchain-anthropic
|
- ``anthropic`` -> ``langchain-anthropic``
|
||||||
- 'azure_openai' -> langchain-openai
|
- ``azure_openai`` -> ``langchain-openai``
|
||||||
- 'azure_ai' -> langchain-azure-ai
|
- ``azure_ai`` -> ``langchain-azure-ai``
|
||||||
- 'google_vertexai' -> langchain-google-vertexai
|
- ``google_vertexai`` -> ``langchain-google-vertexai``
|
||||||
- 'google_genai' -> langchain-google-genai
|
- ``google_genai`` -> ``langchain-google-genai``
|
||||||
- 'bedrock' -> langchain-aws
|
- ``bedrock`` -> ``langchain-aws``
|
||||||
- 'bedrock_converse' -> langchain-aws
|
- ``bedrock_converse`` -> ``langchain-aws``
|
||||||
- 'cohere' -> langchain-cohere
|
- ``cohere`` -> ``langchain-cohere``
|
||||||
- 'fireworks' -> langchain-fireworks
|
- ``fireworks`` -> ``langchain-fireworks``
|
||||||
- 'together' -> langchain-together
|
- ``together`` -> ``langchain-together``
|
||||||
- 'mistralai' -> langchain-mistralai
|
- ``mistralai`` -> ``langchain-mistralai``
|
||||||
- 'huggingface' -> langchain-huggingface
|
- ``huggingface`` -> ``langchain-huggingface``
|
||||||
- 'groq' -> langchain-groq
|
- ``groq`` -> ``langchain-groq``
|
||||||
- 'ollama' -> langchain-ollama
|
- ``ollama`` -> ``langchain-ollama``
|
||||||
- 'google_anthropic_vertex' -> langchain-google-vertexai
|
- ``google_anthropic_vertex`` -> ``langchain-google-vertexai``
|
||||||
- 'deepseek' -> langchain-deepseek
|
- ``deepseek`` -> ``langchain-deepseek``
|
||||||
- 'ibm' -> langchain-ibm
|
- ``ibm`` -> ``langchain-ibm``
|
||||||
- 'nvidia' -> langchain-nvidia-ai-endpoints
|
- ``nvidia`` -> ``langchain-nvidia-ai-endpoints``
|
||||||
- 'xai' -> langchain-xai
|
- ``xai`` -> ``langchain-xai``
|
||||||
- 'perplexity' -> langchain-perplexity
|
- ``perplexity`` -> ``langchain-perplexity``
|
||||||
|
|
||||||
Will attempt to infer model_provider from model if not specified. The
|
Will attempt to infer model_provider from model if not specified. The
|
||||||
following providers will be inferred based on these model prefixes:
|
following providers will be inferred based on these model prefixes:
|
||||||
|
|
||||||
- 'gpt-3...' | 'gpt-4...' | 'o1...' -> 'openai'
|
- ``gpt-3...`` | ``gpt-4...`` | ``o1...`` -> ``openai``
|
||||||
- 'claude...' -> 'anthropic'
|
- ``claude...`` -> ``anthropic``
|
||||||
- 'amazon....' -> 'bedrock'
|
- ``amazon...`` -> ``bedrock``
|
||||||
- 'gemini...' -> 'google_vertexai'
|
- ``gemini...`` -> ``google_vertexai``
|
||||||
- 'command...' -> 'cohere'
|
- ``command...`` -> ``cohere``
|
||||||
- 'accounts/fireworks...' -> 'fireworks'
|
- ``accounts/fireworks...`` -> ``fireworks``
|
||||||
- 'mistral...' -> 'mistralai'
|
- ``mistral...`` -> ``mistralai``
|
||||||
- 'deepseek...' -> 'deepseek'
|
- ``deepseek...`` -> ``deepseek``
|
||||||
- 'grok...' -> 'xai'
|
- ``grok...`` -> ``xai``
|
||||||
- 'sonar...' -> 'perplexity'
|
- ``sonar...`` -> ``perplexity``
|
||||||
configurable_fields: Which model parameters are
|
configurable_fields: Which model parameters are configurable:
|
||||||
configurable:
|
|
||||||
|
|
||||||
- None: No configurable fields.
|
- None: No configurable fields.
|
||||||
- "any": All fields are configurable. *See Security Note below.*
|
- ``'any'``: All fields are configurable. **See Security Note below.**
|
||||||
- Union[List[str], Tuple[str, ...]]: Specified fields are configurable.
|
- Union[List[str], Tuple[str, ...]]: Specified fields are configurable.
|
||||||
|
|
||||||
Fields are assumed to have config_prefix stripped if there is a
|
Fields are assumed to have config_prefix stripped if there is a
|
||||||
@ -146,15 +147,15 @@ def init_chat_model(
|
|||||||
not specified, then defaults to ``("model", "model_provider")``.
|
not specified, then defaults to ``("model", "model_provider")``.
|
||||||
|
|
||||||
***Security Note***: Setting ``configurable_fields="any"`` means fields like
|
***Security Note***: Setting ``configurable_fields="any"`` means fields like
|
||||||
api_key, base_url, etc. can be altered at runtime, potentially redirecting
|
``api_key``, ``base_url``, etc. can be altered at runtime, potentially redirecting
|
||||||
model requests to a different service/user. Make sure that if you're
|
model requests to a different service/user. Make sure that if you're
|
||||||
accepting untrusted configurations that you enumerate the
|
accepting untrusted configurations that you enumerate the
|
||||||
``configurable_fields=(...)`` explicitly.
|
``configurable_fields=(...)`` explicitly.
|
||||||
|
|
||||||
config_prefix: If config_prefix is a non-empty string then model will be
|
config_prefix: If ``'config_prefix'`` is a non-empty string then model will be
|
||||||
configurable at runtime via the
|
configurable at runtime via the
|
||||||
``config["configurable"]["{config_prefix}_{param}"]`` keys. If
|
``config["configurable"]["{config_prefix}_{param}"]`` keys. If
|
||||||
config_prefix is an empty string then model will be configurable via
|
``'config_prefix'`` is an empty string then model will be configurable via
|
||||||
``config["configurable"]["{param}"]``.
|
``config["configurable"]["{param}"]``.
|
||||||
temperature: Model temperature.
|
temperature: Model temperature.
|
||||||
max_tokens: Max output tokens.
|
max_tokens: Max output tokens.
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
"""**Embedding models** are wrappers around embedding models
|
"""**Embedding models** are wrappers around embedding models
|
||||||
from different APIs and services.
|
from different APIs and services.
|
||||||
|
|
||||||
**Embedding models** can be LLMs or not.
|
Embedding models can be LLMs or not.
|
||||||
|
|
||||||
**Class hierarchy:**
|
**Class hierarchy:**
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
"""Interface with the LangChain Hub."""
|
"""Interface with the `LangChain Hub <https://smith.langchain.com/hub>`__."""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user