From 1d06eee3b52e999d54188118899be7aa75681daa Mon Sep 17 00:00:00 2001 From: Lance Martin <122662504+rlancemartin@users.noreply.github.com> Date: Sat, 15 Jul 2023 09:11:18 -0700 Subject: [PATCH] Fix ntbk link in docs (#7755) Minor fix to running to [docs](https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa). --- .../question_answering/local_retrieval_qa.ipynb | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/docs/extras/use_cases/question_answering/local_retrieval_qa.ipynb b/docs/extras/use_cases/question_answering/local_retrieval_qa.ipynb index 84f21a3a520..1ef85c005f7 100644 --- a/docs/extras/use_cases/question_answering/local_retrieval_qa.ipynb +++ b/docs/extras/use_cases/question_answering/local_retrieval_qa.ipynb @@ -7,11 +7,11 @@ "source": [ "# Running LLMs locally\n", "\n", - "The popularity of [PrivateGPT](https://github.com/imartinez/privateGPT) and [GPT4All](https://github.com/nomic-ai/gpt4all) underscore the importance of running LLMs locally.\n", + "The popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), and [GPT4All](https://github.com/nomic-ai/gpt4all) underscore the importance of running LLMs locally.\n", "\n", - "LangChain has integrations with many open source LLMs that can be run locally.\n", + "LangChain has [integrations](https://integrations.langchain.com/) with many open source LLMs that can be run locally.\n", "\n", - "For example, here we show how to run GPT4All locally using both gpt4all embeddings and model." + "For example, here we show how to run `GPT4All` locally (e.g., on your laptop) using local embeddings and a local LLM." ] }, { @@ -143,9 +143,15 @@ "id": "0d9579a7", "metadata": {}, "source": [ - "[Download the GPT4All model binary]((https://python.langchain.com/docs/modules/model_io/models/llms/integrations/gpt4all)).\n", + "[Download the GPT4All model binary](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/gpt4all).\n", "\n", - "Then, specify the path." + "The Model Explorer on the [GPT4All](https://gpt4all.io/index.html) is a great way to choose and download a model.\n", + "\n", + "Then, specify the path that you downloaded to to.\n", + "\n", + "E.g., for me, the model lives here:\n", + "\n", + "`/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin`" ] }, {