mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-23 15:19:33 +00:00
docs: update document examples (#31006)
## Description: As I was following the docs I found a couple of small issues on the docs. this fixes some unused imports on the [extraction page](https://python.langchain.com/docs/tutorials/extraction/#the-extractor) and updates the examples on [classification page](https://python.langchain.com/docs/tutorials/classification/#quickstart) to be independent from the chat model.
This commit is contained in:
parent
a7903280dd
commit
09c1991e96
@ -89,7 +89,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": null,
|
||||||
"id": "39f3ce3e",
|
"id": "39f3ce3e",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
@ -118,15 +118,13 @@
|
|||||||
" language: str = Field(description=\"The language the text is written in\")\n",
|
" language: str = Field(description=\"The language the text is written in\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# LLM\n",
|
"# Structured LLM\n",
|
||||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-4o-mini\").with_structured_output(\n",
|
"structured_llm = llm.with_structured_output(Classification)"
|
||||||
" Classification\n",
|
|
||||||
")"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 8,
|
"execution_count": null,
|
||||||
"id": "5509b6a6",
|
"id": "5509b6a6",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -144,7 +142,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"inp = \"Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!\"\n",
|
"inp = \"Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!\"\n",
|
||||||
"prompt = tagging_prompt.invoke({\"input\": inp})\n",
|
"prompt = tagging_prompt.invoke({\"input\": inp})\n",
|
||||||
"response = llm.invoke(prompt)\n",
|
"response = structured_llm.invoke(prompt)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"response"
|
"response"
|
||||||
]
|
]
|
||||||
@ -159,7 +157,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 10,
|
"execution_count": null,
|
||||||
"id": "9154474c",
|
"id": "9154474c",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -177,7 +175,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"inp = \"Estoy muy enojado con vos! Te voy a dar tu merecido!\"\n",
|
"inp = \"Estoy muy enojado con vos! Te voy a dar tu merecido!\"\n",
|
||||||
"prompt = tagging_prompt.invoke({\"input\": inp})\n",
|
"prompt = tagging_prompt.invoke({\"input\": inp})\n",
|
||||||
"response = llm.invoke(prompt)\n",
|
"response = structured_llm.invoke(prompt)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"response.model_dump()"
|
"response.model_dump()"
|
||||||
]
|
]
|
||||||
|
@ -145,15 +145,12 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": null,
|
||||||
"id": "a5e490f6-35ad-455e-8ae4-2bae021583ff",
|
"id": "a5e490f6-35ad-455e-8ae4-2bae021583ff",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from typing import Optional\n",
|
|
||||||
"\n",
|
|
||||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||||
"from pydantic import BaseModel, Field\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"# Define a custom prompt to provide instructions and any additional context.\n",
|
"# Define a custom prompt to provide instructions and any additional context.\n",
|
||||||
"# 1) You can add examples into the prompt template to improve extraction quality\n",
|
"# 1) You can add examples into the prompt template to improve extraction quality\n",
|
||||||
|
Loading…
Reference in New Issue
Block a user