From 09c1991e9606ecc4a39adc312e4e97835e9f38ed Mon Sep 17 00:00:00 2001 From: Abderrahmane Gourragui Date: Thu, 24 Apr 2025 23:07:55 +0100 Subject: [PATCH] docs: update document examples (#31006) ## Description: As I was following the docs I found a couple of small issues on the docs. this fixes some unused imports on the [extraction page](https://python.langchain.com/docs/tutorials/extraction/#the-extractor) and updates the examples on [classification page](https://python.langchain.com/docs/tutorials/classification/#quickstart) to be independent from the chat model. --- docs/docs/tutorials/classification.ipynb | 16 +++++++--------- docs/docs/tutorials/extraction.ipynb | 5 +---- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/docs/docs/tutorials/classification.ipynb b/docs/docs/tutorials/classification.ipynb index 4efafa165c7..85ac988edd9 100644 --- a/docs/docs/tutorials/classification.ipynb +++ b/docs/docs/tutorials/classification.ipynb @@ -89,7 +89,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "39f3ce3e", "metadata": {}, "outputs": [], @@ -118,15 +118,13 @@ " language: str = Field(description=\"The language the text is written in\")\n", "\n", "\n", - "# LLM\n", - "llm = ChatOpenAI(temperature=0, model=\"gpt-4o-mini\").with_structured_output(\n", - " Classification\n", - ")" + "# Structured LLM\n", + "structured_llm = llm.with_structured_output(Classification)" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "5509b6a6", "metadata": {}, "outputs": [ @@ -144,7 +142,7 @@ "source": [ "inp = \"Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!\"\n", "prompt = tagging_prompt.invoke({\"input\": inp})\n", - "response = llm.invoke(prompt)\n", + "response = structured_llm.invoke(prompt)\n", "\n", "response" ] @@ -159,7 +157,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "9154474c", "metadata": {}, "outputs": [ @@ -177,7 +175,7 @@ "source": [ "inp = \"Estoy muy enojado con vos! Te voy a dar tu merecido!\"\n", "prompt = tagging_prompt.invoke({\"input\": inp})\n", - "response = llm.invoke(prompt)\n", + "response = structured_llm.invoke(prompt)\n", "\n", "response.model_dump()" ] diff --git a/docs/docs/tutorials/extraction.ipynb b/docs/docs/tutorials/extraction.ipynb index 18d444723b2..2c18e52ae95 100644 --- a/docs/docs/tutorials/extraction.ipynb +++ b/docs/docs/tutorials/extraction.ipynb @@ -145,15 +145,12 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "a5e490f6-35ad-455e-8ae4-2bae021583ff", "metadata": {}, "outputs": [], "source": [ - "from typing import Optional\n", - "\n", "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "from pydantic import BaseModel, Field\n", "\n", "# Define a custom prompt to provide instructions and any additional context.\n", "# 1) You can add examples into the prompt template to improve extraction quality\n",