diff --git a/docs/modules/models/llms/examples/llm_caching.ipynb b/docs/modules/models/llms/examples/llm_caching.ipynb index 149d7969776..1f51e3bf6f2 100644 --- a/docs/modules/models/llms/examples/llm_caching.ipynb +++ b/docs/modules/models/llms/examples/llm_caching.ipynb @@ -16,10 +16,15 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI" + "import langchain\n", + "from langchain.llms import OpenAI\n", + "\n", + "# To make the caching really obvious, lets use a slower model.\n", + "llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2)" ] }, { + "attachments": {}, "cell_type": "markdown", "id": "b50f0598", "metadata": {}, @@ -34,22 +39,10 @@ "metadata": {}, "outputs": [], "source": [ - "import langchain\n", "from langchain.cache import InMemoryCache\n", "langchain.llm_cache = InMemoryCache()" ] }, - { - "cell_type": "code", - "execution_count": 6, - "id": "f69f6283", - "metadata": {}, - "outputs": [], - "source": [ - "# To make the caching really obvious, lets use a slower model.\n", - "llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2)" - ] - }, { "cell_type": "code", "execution_count": 4,