docs: nim model name update (#22943)

NIM Model name change in a notebook and mdx file.

Thanks!
This commit is contained in:
Daniel Glogowski 2024-06-15 16:38:28 -04:00 committed by GitHub
parent ada03dd273
commit 892bd4c29b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 3 additions and 3 deletions

View File

@ -134,7 +134,7 @@
"from langchain_nvidia_ai_endpoints import ChatNVIDIA\n", "from langchain_nvidia_ai_endpoints import ChatNVIDIA\n",
"\n", "\n",
"# connect to an embedding NIM running at localhost:8000, specifying a specific model\n", "# connect to an embedding NIM running at localhost:8000, specifying a specific model\n",
"llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta-llama3-8b-instruct\")" "llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta/llama3-8b-instruct\")"
] ]
}, },
{ {
@ -658,7 +658,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.10.13" "version": "3.10.2"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -62,7 +62,7 @@ When ready to deploy, you can self-host models with NVIDIA NIM—which is includ
from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank
# connect to an chat NIM running at localhost:8000, specifyig a specific model # connect to an chat NIM running at localhost:8000, specifyig a specific model
llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta-llama3-8b-instruct") llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta/llama3-8b-instruct")
# connect to an embedding NIM running at localhost:8080 # connect to an embedding NIM running at localhost:8080
embedder = NVIDIAEmbeddings(base_url="http://localhost:8080/v1") embedder = NVIDIAEmbeddings(base_url="http://localhost:8080/v1")