mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-23 07:09:31 +00:00
docs: update document examples (#31006)
## Description: As I was following the docs I found a couple of small issues on the docs. this fixes some unused imports on the [extraction page](https://python.langchain.com/docs/tutorials/extraction/#the-extractor) and updates the examples on [classification page](https://python.langchain.com/docs/tutorials/classification/#quickstart) to be independent from the chat model.
This commit is contained in:
parent
a7903280dd
commit
09c1991e96
@ -89,7 +89,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"id": "39f3ce3e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -118,15 +118,13 @@
|
||||
" language: str = Field(description=\"The language the text is written in\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# LLM\n",
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-4o-mini\").with_structured_output(\n",
|
||||
" Classification\n",
|
||||
")"
|
||||
"# Structured LLM\n",
|
||||
"structured_llm = llm.with_structured_output(Classification)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"id": "5509b6a6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -144,7 +142,7 @@
|
||||
"source": [
|
||||
"inp = \"Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!\"\n",
|
||||
"prompt = tagging_prompt.invoke({\"input\": inp})\n",
|
||||
"response = llm.invoke(prompt)\n",
|
||||
"response = structured_llm.invoke(prompt)\n",
|
||||
"\n",
|
||||
"response"
|
||||
]
|
||||
@ -159,7 +157,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": null,
|
||||
"id": "9154474c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -177,7 +175,7 @@
|
||||
"source": [
|
||||
"inp = \"Estoy muy enojado con vos! Te voy a dar tu merecido!\"\n",
|
||||
"prompt = tagging_prompt.invoke({\"input\": inp})\n",
|
||||
"response = llm.invoke(prompt)\n",
|
||||
"response = structured_llm.invoke(prompt)\n",
|
||||
"\n",
|
||||
"response.model_dump()"
|
||||
]
|
||||
|
@ -145,15 +145,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"id": "a5e490f6-35ad-455e-8ae4-2bae021583ff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"# Define a custom prompt to provide instructions and any additional context.\n",
|
||||
"# 1) You can add examples into the prompt template to improve extraction quality\n",
|
||||
|
Loading…
Reference in New Issue
Block a user