langchain[patch]: Add deprecation warning to extraction chains (#20224)

Add deprecation warnings to extraction chains
This commit is contained in:
Eugene Yurtsev 2024-04-12 10:24:32 -04:00 committed by GitHub
parent b65a1d4cfd
commit 6470b30173
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 148 additions and 0 deletions

View File

@ -1,5 +1,6 @@
from typing import Any, List, Optional from typing import Any, List, Optional
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_functions import ( from langchain_core.output_parsers.openai_functions import (
JsonKeyOutputFunctionsParser, JsonKeyOutputFunctionsParser,
@ -43,6 +44,42 @@ Passage:
""" # noqa: E501 """ # noqa: E501
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"https://python.langchain.com/docs/modules/model_io/chat/structured_output/"
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"https://python.langchain.com/docs/use_cases/extraction/."
"If you notice other issues, please provide "
"feedback here:"
"https://github.com/langchain-ai/langchain/discussions/18154"
),
removal="0.3.0",
pending=True,
alternative=(
"""
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_extraction_chain( def create_extraction_chain(
schema: dict, schema: dict,
llm: BaseLanguageModel, llm: BaseLanguageModel,
@ -78,6 +115,42 @@ def create_extraction_chain(
return chain return chain
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"https://python.langchain.com/docs/modules/model_io/chat/structured_output/"
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"https://python.langchain.com/docs/use_cases/extraction/."
"If you notice other issues, please provide "
"feedback here:"
"https://github.com/langchain-ai/langchain/discussions/18154"
),
removal="0.3.0",
pending=True,
alternative=(
"""
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_extraction_chain_pydantic( def create_extraction_chain_pydantic(
pydantic_schema: Any, pydantic_schema: Any,
llm: BaseLanguageModel, llm: BaseLanguageModel,

View File

@ -1,5 +1,6 @@
from typing import List, Type, Union from typing import List, Type, Union
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import ChatPromptTemplate from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel from langchain_core.pydantic_v1 import BaseModel
@ -14,6 +15,43 @@ in the following passage together with their properties.
If a property is not present and is not required in the function parameters, do not include it in the output.""" # noqa: E501 If a property is not present and is not required in the function parameters, do not include it in the output.""" # noqa: E501
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"https://python.langchain.com/docs/modules/model_io/chat/structured_output/"
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"https://python.langchain.com/docs/use_cases/extraction/."
"with_structured_output does not currently support a list of pydantic schemas. "
"If this is a blocker or if you notice other issues, please provide "
"feedback here:"
"https://github.com/langchain-ai/langchain/discussions/18154"
),
removal="0.3.0",
pending=True,
alternative=(
"""
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_extraction_chain_pydantic( def create_extraction_chain_pydantic(
pydantic_schemas: Union[List[Type[BaseModel]], Type[BaseModel]], pydantic_schemas: Union[List[Type[BaseModel]], Type[BaseModel]],
llm: BaseLanguageModel, llm: BaseLanguageModel,

View File

@ -1,6 +1,7 @@
import json import json
from typing import Any, Callable, Dict, Literal, Optional, Sequence, Type, Union from typing import Any, Callable, Dict, Literal, Optional, Sequence, Type, Union
from langchain_core._api import deprecated
from langchain_core.output_parsers import ( from langchain_core.output_parsers import (
BaseGenerationOutputParser, BaseGenerationOutputParser,
BaseOutputParser, BaseOutputParser,
@ -26,6 +27,42 @@ from langchain.output_parsers import (
) )
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"https://python.langchain.com/docs/modules/model_io/chat/structured_output/"
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"https://python.langchain.com/docs/use_cases/extraction/."
"If you notice other issues, please provide "
"feedback here:"
"https://github.com/langchain-ai/langchain/discussions/18154"
),
removal="0.3.0",
pending=True,
alternative=(
"""
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_openai_fn_runnable( def create_openai_fn_runnable(
functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]], functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
llm: Runnable, llm: Runnable,