mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-31 00:29:57 +00:00
langchain[patch]: Add deprecation warning to extraction chains (#20224)
Add deprecation warnings to extraction chains
This commit is contained in:
parent
b65a1d4cfd
commit
6470b30173
@ -1,5 +1,6 @@
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from langchain_core._api import deprecated
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.output_parsers.openai_functions import (
|
||||
JsonKeyOutputFunctionsParser,
|
||||
@ -43,6 +44,42 @@ Passage:
|
||||
""" # noqa: E501
|
||||
|
||||
|
||||
@deprecated(
|
||||
since="0.1.14",
|
||||
message=(
|
||||
"LangChain has introduced a method called `with_structured_output` that"
|
||||
"is available on ChatModels capable of tool calling."
|
||||
"You can read more about the method here: "
|
||||
"https://python.langchain.com/docs/modules/model_io/chat/structured_output/"
|
||||
"Please follow our extraction use case documentation for more guidelines"
|
||||
"on how to do information extraction with LLMs."
|
||||
"https://python.langchain.com/docs/use_cases/extraction/."
|
||||
"If you notice other issues, please provide "
|
||||
"feedback here:"
|
||||
"https://github.com/langchain-ai/langchain/discussions/18154"
|
||||
),
|
||||
removal="0.3.0",
|
||||
pending=True,
|
||||
alternative=(
|
||||
"""
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
class Joke(BaseModel):
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
|
||||
# Or any other chat model that supports tools.
|
||||
# Please reference to to the documentation of structured_output
|
||||
# to see an up to date list of which models support
|
||||
# with_structured_output.
|
||||
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
||||
structured_llm = model.with_structured_output(Joke)
|
||||
structured_llm.invoke("Tell me a joke about cats.
|
||||
Make sure to call the Joke function.")
|
||||
"""
|
||||
),
|
||||
)
|
||||
def create_extraction_chain(
|
||||
schema: dict,
|
||||
llm: BaseLanguageModel,
|
||||
@ -78,6 +115,42 @@ def create_extraction_chain(
|
||||
return chain
|
||||
|
||||
|
||||
@deprecated(
|
||||
since="0.1.14",
|
||||
message=(
|
||||
"LangChain has introduced a method called `with_structured_output` that"
|
||||
"is available on ChatModels capable of tool calling."
|
||||
"You can read more about the method here: "
|
||||
"https://python.langchain.com/docs/modules/model_io/chat/structured_output/"
|
||||
"Please follow our extraction use case documentation for more guidelines"
|
||||
"on how to do information extraction with LLMs."
|
||||
"https://python.langchain.com/docs/use_cases/extraction/."
|
||||
"If you notice other issues, please provide "
|
||||
"feedback here:"
|
||||
"https://github.com/langchain-ai/langchain/discussions/18154"
|
||||
),
|
||||
removal="0.3.0",
|
||||
pending=True,
|
||||
alternative=(
|
||||
"""
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
class Joke(BaseModel):
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
|
||||
# Or any other chat model that supports tools.
|
||||
# Please reference to to the documentation of structured_output
|
||||
# to see an up to date list of which models support
|
||||
# with_structured_output.
|
||||
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
||||
structured_llm = model.with_structured_output(Joke)
|
||||
structured_llm.invoke("Tell me a joke about cats.
|
||||
Make sure to call the Joke function.")
|
||||
"""
|
||||
),
|
||||
)
|
||||
def create_extraction_chain_pydantic(
|
||||
pydantic_schema: Any,
|
||||
llm: BaseLanguageModel,
|
||||
|
@ -1,5 +1,6 @@
|
||||
from typing import List, Type, Union
|
||||
|
||||
from langchain_core._api import deprecated
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.pydantic_v1 import BaseModel
|
||||
@ -14,6 +15,43 @@ in the following passage together with their properties.
|
||||
If a property is not present and is not required in the function parameters, do not include it in the output.""" # noqa: E501
|
||||
|
||||
|
||||
@deprecated(
|
||||
since="0.1.14",
|
||||
message=(
|
||||
"LangChain has introduced a method called `with_structured_output` that"
|
||||
"is available on ChatModels capable of tool calling."
|
||||
"You can read more about the method here: "
|
||||
"https://python.langchain.com/docs/modules/model_io/chat/structured_output/"
|
||||
"Please follow our extraction use case documentation for more guidelines"
|
||||
"on how to do information extraction with LLMs."
|
||||
"https://python.langchain.com/docs/use_cases/extraction/."
|
||||
"with_structured_output does not currently support a list of pydantic schemas. "
|
||||
"If this is a blocker or if you notice other issues, please provide "
|
||||
"feedback here:"
|
||||
"https://github.com/langchain-ai/langchain/discussions/18154"
|
||||
),
|
||||
removal="0.3.0",
|
||||
pending=True,
|
||||
alternative=(
|
||||
"""
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
class Joke(BaseModel):
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
|
||||
# Or any other chat model that supports tools.
|
||||
# Please reference to to the documentation of structured_output
|
||||
# to see an up to date list of which models support
|
||||
# with_structured_output.
|
||||
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
||||
structured_llm = model.with_structured_output(Joke)
|
||||
structured_llm.invoke("Tell me a joke about cats.
|
||||
Make sure to call the Joke function.")
|
||||
"""
|
||||
),
|
||||
)
|
||||
def create_extraction_chain_pydantic(
|
||||
pydantic_schemas: Union[List[Type[BaseModel]], Type[BaseModel]],
|
||||
llm: BaseLanguageModel,
|
||||
|
@ -1,6 +1,7 @@
|
||||
import json
|
||||
from typing import Any, Callable, Dict, Literal, Optional, Sequence, Type, Union
|
||||
|
||||
from langchain_core._api import deprecated
|
||||
from langchain_core.output_parsers import (
|
||||
BaseGenerationOutputParser,
|
||||
BaseOutputParser,
|
||||
@ -26,6 +27,42 @@ from langchain.output_parsers import (
|
||||
)
|
||||
|
||||
|
||||
@deprecated(
|
||||
since="0.1.14",
|
||||
message=(
|
||||
"LangChain has introduced a method called `with_structured_output` that"
|
||||
"is available on ChatModels capable of tool calling."
|
||||
"You can read more about the method here: "
|
||||
"https://python.langchain.com/docs/modules/model_io/chat/structured_output/"
|
||||
"Please follow our extraction use case documentation for more guidelines"
|
||||
"on how to do information extraction with LLMs."
|
||||
"https://python.langchain.com/docs/use_cases/extraction/."
|
||||
"If you notice other issues, please provide "
|
||||
"feedback here:"
|
||||
"https://github.com/langchain-ai/langchain/discussions/18154"
|
||||
),
|
||||
removal="0.3.0",
|
||||
pending=True,
|
||||
alternative=(
|
||||
"""
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
class Joke(BaseModel):
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
|
||||
# Or any other chat model that supports tools.
|
||||
# Please reference to to the documentation of structured_output
|
||||
# to see an up to date list of which models support
|
||||
# with_structured_output.
|
||||
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
||||
structured_llm = model.with_structured_output(Joke)
|
||||
structured_llm.invoke("Tell me a joke about cats.
|
||||
Make sure to call the Joke function.")
|
||||
"""
|
||||
),
|
||||
)
|
||||
def create_openai_fn_runnable(
|
||||
functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
|
||||
llm: Runnable,
|
||||
|
Loading…
Reference in New Issue
Block a user