docs: update document examples (#31006)

## Description:

As I was following the docs I found a couple of small issues on the
docs.

this fixes some unused imports on the [extraction
page](https://python.langchain.com/docs/tutorials/extraction/#the-extractor)
and updates the examples on [classification
page](https://python.langchain.com/docs/tutorials/classification/#quickstart)
to be independent from the chat model.
This commit is contained in:
Abderrahmane Gourragui 2025-04-24 23:07:55 +01:00 committed by GitHub
parent a7903280dd
commit 09c1991e96
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 8 additions and 13 deletions

View File

@ -89,7 +89,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"id": "39f3ce3e",
"metadata": {},
"outputs": [],
@ -118,15 +118,13 @@
" language: str = Field(description=\"The language the text is written in\")\n",
"\n",
"\n",
"# LLM\n",
"llm = ChatOpenAI(temperature=0, model=\"gpt-4o-mini\").with_structured_output(\n",
" Classification\n",
")"
"# Structured LLM\n",
"structured_llm = llm.with_structured_output(Classification)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": null,
"id": "5509b6a6",
"metadata": {},
"outputs": [
@ -144,7 +142,7 @@
"source": [
"inp = \"Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!\"\n",
"prompt = tagging_prompt.invoke({\"input\": inp})\n",
"response = llm.invoke(prompt)\n",
"response = structured_llm.invoke(prompt)\n",
"\n",
"response"
]
@ -159,7 +157,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": null,
"id": "9154474c",
"metadata": {},
"outputs": [
@ -177,7 +175,7 @@
"source": [
"inp = \"Estoy muy enojado con vos! Te voy a dar tu merecido!\"\n",
"prompt = tagging_prompt.invoke({\"input\": inp})\n",
"response = llm.invoke(prompt)\n",
"response = structured_llm.invoke(prompt)\n",
"\n",
"response.model_dump()"
]

View File

@ -145,15 +145,12 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"id": "a5e490f6-35ad-455e-8ae4-2bae021583ff",
"metadata": {},
"outputs": [],
"source": [
"from typing import Optional\n",
"\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from pydantic import BaseModel, Field\n",
"\n",
"# Define a custom prompt to provide instructions and any additional context.\n",
"# 1) You can add examples into the prompt template to improve extraction quality\n",