docs: Add NVIDIA NIMs to Model Tab and Feature Table (#24146)

**Description:** Add NVIDIA NIMs to Model Tab and LLM Feature Table

---------

Co-authored-by: Hayden Wolff <hwolff@nvidia.com>
Co-authored-by: Erick Friis <erickfriis@gmail.com>
Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
Hayden Wolff 2024-07-26 11:20:52 -07:00 committed by GitHub
parent cda3025ee1
commit 0345990a42
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 22 additions and 0 deletions

View File

@ -87,6 +87,14 @@ CHAT_MODEL_FEAT_TABLE = {
"package": "langchain-huggingface",
"link": "/docs/integrations/chat/huggingface/",
},
"ChatNVIDIA": {
"tool_calling": True,
"json_mode": False,
"local": True,
"multimodal": False,
"package": "langchain-nvidia-ai-endpoints",
"link": "/docs/integrations/chat/nvidia_ai_endpoints/",
},
"ChatOllama": {
"tool_calling": True,
"local": True,

View File

@ -14,6 +14,7 @@ import CodeBlock from "@theme-original/CodeBlock";
* @property {string} [mistralParams] - Parameters for Mistral chat model. Defaults to `model="mistral-large-latest"`
* @property {string} [googleParams] - Parameters for Google chat model. Defaults to `model="gemini-pro"`
* @property {string} [togetherParams] - Parameters for Together chat model. Defaults to `model="mistralai/Mixtral-8x7B-Instruct-v0.1"`
* @property {string} [nvidiaParams] - Parameters for Nvidia NIM model. Defaults to `model="meta/llama3-70b-instruct"`
* @property {boolean} [hideOpenai] - Whether or not to hide OpenAI chat model.
* @property {boolean} [hideAnthropic] - Whether or not to hide Anthropic chat model.
* @property {boolean} [hideCohere] - Whether or not to hide Cohere chat model.
@ -23,6 +24,7 @@ import CodeBlock from "@theme-original/CodeBlock";
* @property {boolean} [hideGoogle] - Whether or not to hide Google VertexAI chat model.
* @property {boolean} [hideTogether] - Whether or not to hide Together chat model.
* @property {boolean} [hideAzure] - Whether or not to hide Microsoft Azure OpenAI chat model.
* @property {boolean} [hideNvidia] - Whether or not to hide NVIDIA NIM model.
* @property {string} [customVarName] - Custom variable name for the model. Defaults to `model`.
*/
@ -40,6 +42,7 @@ export default function ChatModelTabs(props) {
googleParams,
togetherParams,
azureParams,
nvidiaParams,
hideOpenai,
hideAnthropic,
hideCohere,
@ -49,6 +52,7 @@ export default function ChatModelTabs(props) {
hideGoogle,
hideTogether,
hideAzure,
hideNvidia,
customVarName,
} = props;
@ -69,6 +73,7 @@ export default function ChatModelTabs(props) {
const azureParamsOrDefault =
azureParams ??
`\n azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],\n azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],\n openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],\n`;
const nvidiaParamsOrDefault = nvidiaParams ?? `model="meta/llama3-70b-instruct"`
const llmVarName = customVarName ?? "model";
@ -118,6 +123,15 @@ export default function ChatModelTabs(props) {
default: false,
shouldHide: hideCohere,
},
{
value: "NVIDIA",
label: "NVIDIA",
text: `from langchain import ChatNVIDIA\n\n${llmVarName} = ChatNVIDIA(${nvidiaParamsOrDefault})`,
apiKeyName: "NVIDIA_API_KEY",
packageName: "langchain-nvidia-ai-endpoints",
default: false,
shouldHide: hideNvidia,
},
{
value: "FireworksAI",
label: "FireworksAI",