langchain: Add ruff rule W (#31876)

All auto-fixes
See https://docs.astral.sh/ruff/rules/#warning-w

Co-authored-by: Chester Curme <chester.curme@gmail.com>
This commit is contained in:
Christophe Bornet 2025-07-05 23:57:30 +02:00 committed by GitHub
parent 3f4b355eef
commit bf05229029
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
27 changed files with 125 additions and 125 deletions

View File

@ -23,22 +23,22 @@ class AgentType(str, Enum):
REACT_DOCSTORE = "react-docstore" REACT_DOCSTORE = "react-docstore"
"""A zero shot agent that does a reasoning step before acting. """A zero shot agent that does a reasoning step before acting.
This agent has access to a document store that allows it to look up This agent has access to a document store that allows it to look up
relevant information to answering the question. relevant information to answering the question.
""" """
SELF_ASK_WITH_SEARCH = "self-ask-with-search" SELF_ASK_WITH_SEARCH = "self-ask-with-search"
"""An agent that breaks down a complex question into a series of simpler questions. """An agent that breaks down a complex question into a series of simpler questions.
This agent uses a search tool to look up answers to the simpler questions This agent uses a search tool to look up answers to the simpler questions
in order to answer the original complex question. in order to answer the original complex question.
""" """
CONVERSATIONAL_REACT_DESCRIPTION = "conversational-react-description" CONVERSATIONAL_REACT_DESCRIPTION = "conversational-react-description"
CHAT_ZERO_SHOT_REACT_DESCRIPTION = "chat-zero-shot-react-description" CHAT_ZERO_SHOT_REACT_DESCRIPTION = "chat-zero-shot-react-description"
"""A zero shot agent that does a reasoning step before acting. """A zero shot agent that does a reasoning step before acting.
This agent is designed to be used in conjunction This agent is designed to be used in conjunction
""" """
CHAT_CONVERSATIONAL_REACT_DESCRIPTION = "chat-conversational-react-description" CHAT_CONVERSATIONAL_REACT_DESCRIPTION = "chat-conversational-react-description"
@ -47,7 +47,7 @@ class AgentType(str, Enum):
"structured-chat-zero-shot-react-description" "structured-chat-zero-shot-react-description"
) )
"""An zero-shot react agent optimized for chat models. """An zero-shot react agent optimized for chat models.
This agent is capable of invoking tools that have multiple inputs. This agent is capable of invoking tools that have multiple inputs.
""" """

View File

@ -27,10 +27,10 @@ def create_json_chat_agent(
tools: Tools this agent has access to. tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more. prompt: The prompt to use. See Prompt section below for more.
stop_sequence: bool or list of str. stop_sequence: bool or list of str.
If True, adds a stop token of "Observation:" to avoid hallucinates. If True, adds a stop token of "Observation:" to avoid hallucinates.
If False, does not add a stop token. If False, does not add a stop token.
If a list of str, uses the provided list as the stop tokens. If a list of str, uses the provided list as the stop tokens.
Default is True. You may to set this to False if the LLM you are using Default is True. You may to set this to False if the LLM you are using
does not support stop sequences. does not support stop sequences.
tools_renderer: This controls how the tools are converted into a string and tools_renderer: This controls how the tools are converted into a string and
@ -43,7 +43,7 @@ def create_json_chat_agent(
A Runnable sequence representing an agent. It takes as input all the same input A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish. AgentAction or AgentFinish.
Raises: Raises:
ValueError: If the prompt is missing required variables. ValueError: If the prompt is missing required variables.
ValueError: If the template_tool_response is missing ValueError: If the template_tool_response is missing
@ -79,18 +79,18 @@ def create_json_chat_agent(
) )
Prompt: Prompt:
The prompt must have input keys: The prompt must have input keys:
* `tools`: contains descriptions and arguments for each tool. * `tools`: contains descriptions and arguments for each tool.
* `tool_names`: contains all tool names. * `tool_names`: contains all tool names.
* `agent_scratchpad`: must be a MessagesPlaceholder. Contains previous agent actions and tool outputs as messages. * `agent_scratchpad`: must be a MessagesPlaceholder. Contains previous agent actions and tool outputs as messages.
Here's an example: Here's an example:
.. code-block:: python .. code-block:: python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
system = '''Assistant is a large language model trained by OpenAI. system = '''Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering \ Assistant is designed to be able to assist with a wide range of tasks, from answering \
@ -110,7 +110,7 @@ def create_json_chat_agent(
and provide valuable insights and information on a wide range of topics. Whether \ and provide valuable insights and information on a wide range of topics. Whether \
you need help with a specific question or just want to have a conversation about \ you need help with a specific question or just want to have a conversation about \
a particular topic, Assistant is here to assist.''' a particular topic, Assistant is here to assist.'''
human = '''TOOLS human = '''TOOLS
------ ------
Assistant can ask the user to use tools to look up information that may be helpful in \ Assistant can ask the user to use tools to look up information that may be helpful in \
@ -151,7 +151,7 @@ def create_json_chat_agent(
blob with a single action, and NOTHING else): blob with a single action, and NOTHING else):
{input}''' {input}'''
prompt = ChatPromptTemplate.from_messages( prompt = ChatPromptTemplate.from_messages(
[ [
("system", system), ("system", system),

View File

@ -201,10 +201,10 @@ try:
output_key: str = "output" #: :meta private: output_key: str = "output" #: :meta private:
limit_to_domains: Optional[Sequence[str]] = Field(default_factory=list) # type: ignore[arg-type] limit_to_domains: Optional[Sequence[str]] = Field(default_factory=list) # type: ignore[arg-type]
"""Use to limit the domains that can be accessed by the API chain. """Use to limit the domains that can be accessed by the API chain.
* For example, to limit to just the domain `https://www.example.com`, set * For example, to limit to just the domain `https://www.example.com`, set
`limit_to_domains=["https://www.example.com"]`. `limit_to_domains=["https://www.example.com"]`.
* The default value is an empty tuple, which means that no domains are * The default value is an empty tuple, which means that no domains are
allowed by default. By design this will raise an error on instantiation. allowed by default. By design this will raise an error on instantiation.
* Use a None if you want to allow all domains by default -- this is not * Use a None if you want to allow all domains by default -- this is not

View File

@ -93,7 +93,7 @@ class BaseConversationalRetrievalChain(Chain):
"""An optional function to get a string of the chat history. """An optional function to get a string of the chat history.
If None is provided, will use a default.""" If None is provided, will use a default."""
response_if_no_docs_found: Optional[str] = None response_if_no_docs_found: Optional[str] = None
"""If specified, the chain will return a fixed response if no docs """If specified, the chain will return a fixed response if no docs
are found for the question. """ are found for the question. """
model_config = ConfigDict( model_config = ConfigDict(

View File

@ -88,7 +88,7 @@ class LLMChain(Chain):
output_key: str = "text" #: :meta private: output_key: str = "text" #: :meta private:
output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser) output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser)
"""Output parser to use. """Output parser to use.
Defaults to one that takes the most likely string but does not change it Defaults to one that takes the most likely string but does not change it
otherwise.""" otherwise."""
return_final_only: bool = True return_final_only: bool = True
"""Whether to return only the final parsed result. Defaults to True. """Whether to return only the final parsed result. Defaults to True.

View File

@ -63,18 +63,18 @@ Passage:
""" """
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic from langchain_anthropic import ChatAnthropic
class Joke(BaseModel): class Joke(BaseModel):
setup: str = Field(description="The setup of the joke") setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke") punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools. # Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output # Please reference to to the documentation of structured_output
# to see an up to date list of which models support # to see an up to date list of which models support
# with_structured_output. # with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke) structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats. structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.") Make sure to call the Joke function.")
""" """
), ),
@ -133,18 +133,18 @@ def create_extraction_chain(
""" """
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic from langchain_anthropic import ChatAnthropic
class Joke(BaseModel): class Joke(BaseModel):
setup: str = Field(description="The setup of the joke") setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke") punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools. # Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output # Please reference to to the documentation of structured_output
# to see an up to date list of which models support # to see an up to date list of which models support
# with_structured_output. # with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke) structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats. structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.") Make sure to call the Joke function.")
""" """
), ),

View File

@ -34,18 +34,18 @@ If a property is not present and is not required in the function parameters, do
""" """
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic from langchain_anthropic import ChatAnthropic
class Joke(BaseModel): class Joke(BaseModel):
setup: str = Field(description="The setup of the joke") setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke") punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools. # Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output # Please reference to to the documentation of structured_output
# to see an up to date list of which models support # to see an up to date list of which models support
# with_structured_output. # with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke) structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats. structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.") Make sure to call the Joke function.")
""" """
), ),

View File

@ -59,7 +59,7 @@ class MultiRouteChain(Chain):
default_chain: Chain default_chain: Chain
"""Default chain to use when none of the destination chains are suitable.""" """Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False silent_errors: bool = False
"""If True, use default_chain when an invalid destination name is provided. """If True, use default_chain when an invalid destination name is provided.
Defaults to False.""" Defaults to False."""
model_config = ConfigDict( model_config = ConfigDict(

View File

@ -47,18 +47,18 @@ from pydantic import BaseModel
""" """
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic from langchain_anthropic import ChatAnthropic
class Joke(BaseModel): class Joke(BaseModel):
setup: str = Field(description="The setup of the joke") setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke") punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools. # Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output # Please reference to to the documentation of structured_output
# to see an up to date list of which models support # to see an up to date list of which models support
# with_structured_output. # with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke) structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats. structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.") Make sure to call the Joke function.")
""" """
), ),
@ -168,15 +168,15 @@ def create_openai_fn_runnable(
class Joke(BaseModel): class Joke(BaseModel):
setup: str = Field(description="The setup of the joke") setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke") punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools. # Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output # Please reference to to the documentation of structured_output
# to see an up to date list of which models support # to see an up to date list of which models support
# with_structured_output. # with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke) structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats. structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.") Make sure to call the Joke function.")
""" """
), ),
@ -201,39 +201,39 @@ def create_structured_output_runnable(
is passed in, it's assumed to already be a valid JsonSchema. is passed in, it's assumed to already be a valid JsonSchema.
For best results, pydantic.BaseModels should have docstrings describing what For best results, pydantic.BaseModels should have docstrings describing what
the schema represents and descriptions for the parameters. the schema represents and descriptions for the parameters.
llm: Language model to use. Assumed to support the OpenAI function-calling API llm: Language model to use. Assumed to support the OpenAI function-calling API
if mode is 'openai-function'. Assumed to support OpenAI response_format if mode is 'openai-function'. Assumed to support OpenAI response_format
parameter if mode is 'openai-json'. parameter if mode is 'openai-json'.
prompt: BasePromptTemplate to pass to the model. If mode is 'openai-json' and prompt: BasePromptTemplate to pass to the model. If mode is 'openai-json' and
prompt has input variable 'output_schema' then the given output_schema prompt has input variable 'output_schema' then the given output_schema
will be converted to a JsonSchema and inserted in the prompt. will be converted to a JsonSchema and inserted in the prompt.
output_parser: Output parser to use for parsing model outputs. By default output_parser: Output parser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModel is passed will be inferred from the function types. If pydantic.BaseModel is passed
in, then the OutputParser will try to parse outputs using the pydantic in, then the OutputParser will try to parse outputs using the pydantic
class. Otherwise model outputs will be parsed as JSON. class. Otherwise model outputs will be parsed as JSON.
mode: How structured outputs are extracted from the model. If 'openai-functions' mode: How structured outputs are extracted from the model. If 'openai-functions'
then OpenAI function calling is used with the deprecated 'functions', then OpenAI function calling is used with the deprecated 'functions',
'function_call' schema. If 'openai-tools' then OpenAI function 'function_call' schema. If 'openai-tools' then OpenAI function
calling with the latest 'tools', 'tool_choice' schema is used. This is calling with the latest 'tools', 'tool_choice' schema is used. This is
recommended over 'openai-functions'. If 'openai-json' then OpenAI model recommended over 'openai-functions'. If 'openai-json' then OpenAI model
with response_format set to JSON is used. with response_format set to JSON is used.
enforce_function_usage: Only applies when mode is 'openai-tools' or enforce_function_usage: Only applies when mode is 'openai-tools' or
'openai-functions'. If True, then the model will be forced to use the given 'openai-functions'. If True, then the model will be forced to use the given
output schema. If False, then the model can elect whether to use the output output schema. If False, then the model can elect whether to use the output
schema. schema.
return_single: Only applies when mode is 'openai-tools'. Whether to a list of return_single: Only applies when mode is 'openai-tools'. Whether to a list of
structured outputs or a single one. If True and model does not return any structured outputs or a single one. If True and model does not return any
structured outputs then chain output is None. If False and model does not structured outputs then chain output is None. If False and model does not
return any structured outputs then chain output is an empty list. return any structured outputs then chain output is an empty list.
kwargs: Additional named arguments. kwargs: Additional named arguments.
Returns: Returns:
A runnable sequence that will return a structured output(s) matching the given A runnable sequence that will return a structured output(s) matching the given
output_schema. output_schema.
OpenAI tools example with Pydantic schema (mode='openai-tools'): OpenAI tools example with Pydantic schema (mode='openai-tools'):
.. code-block:: python .. code-block:: python
from typing import Optional from typing import Optional
from langchain.chains import create_structured_output_runnable from langchain.chains import create_structured_output_runnable
@ -251,23 +251,23 @@ def create_structured_output_runnable(
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
prompt = ChatPromptTemplate.from_messages( prompt = ChatPromptTemplate.from_messages(
[ [
("system", "You are an extraction algorithm. Please extract every possible instance"), ("system", "You are an extraction algorithm. Please extract every possible instance"),
('human', '{input}') ('human', '{input}')
] ]
) )
structured_llm = create_structured_output_runnable( structured_llm = create_structured_output_runnable(
RecordDog, RecordDog,
llm, llm,
mode="openai-tools", mode="openai-tools",
enforce_function_usage=True, enforce_function_usage=True,
return_single=True return_single=True
) )
structured_llm.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) structured_llm.invoke({"input": "Harry was a chubby brown beagle who loved chicken"})
# -> RecordDog(name="Harry", color="brown", fav_food="chicken") # -> RecordDog(name="Harry", color="brown", fav_food="chicken")
OpenAI tools example with dict schema (mode="openai-tools"): OpenAI tools example with dict schema (mode="openai-tools"):
.. code-block:: python .. code-block:: python
from typing import Optional from typing import Optional
from langchain.chains import create_structured_output_runnable from langchain.chains import create_structured_output_runnable
@ -303,15 +303,15 @@ def create_structured_output_runnable(
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
structured_llm = create_structured_output_runnable( structured_llm = create_structured_output_runnable(
dog_schema, dog_schema,
llm, llm,
mode="openai-tools", mode="openai-tools",
enforce_function_usage=True, enforce_function_usage=True,
return_single=True return_single=True
) )
structured_llm.invoke("Harry was a chubby brown beagle who loved chicken") structured_llm.invoke("Harry was a chubby brown beagle who loved chicken")
# -> {'name': 'Harry', 'color': 'brown', 'fav_food': 'chicken'} # -> {'name': 'Harry', 'color': 'brown', 'fav_food': 'chicken'}
OpenAI functions example (mode="openai-functions"): OpenAI functions example (mode="openai-functions"):
.. code-block:: python .. code-block:: python
@ -332,7 +332,7 @@ def create_structured_output_runnable(
structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-functions") structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-functions")
structured_llm.invoke("Harry was a chubby brown beagle who loved chicken") structured_llm.invoke("Harry was a chubby brown beagle who loved chicken")
# -> Dog(name="Harry", color="brown", fav_food="chicken") # -> Dog(name="Harry", color="brown", fav_food="chicken")
OpenAI functions with prompt example: OpenAI functions with prompt example:
.. code-block:: python .. code-block:: python
@ -361,7 +361,7 @@ def create_structured_output_runnable(
# -> Dog(name="Harry", color="brown", fav_food="chicken") # -> Dog(name="Harry", color="brown", fav_food="chicken")
OpenAI json response format example (mode="openai-json"): OpenAI json response format example (mode="openai-json"):
.. code-block:: python .. code-block:: python
from typing import Optional from typing import Optional
from langchain.chains import create_structured_output_runnable from langchain.chains import create_structured_output_runnable
@ -379,9 +379,9 @@ def create_structured_output_runnable(
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-json") structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-json")
system = '''You are a world class assistant for extracting information in structured JSON formats. \ system = '''You are a world class assistant for extracting information in structured JSON formats. \
Extract a valid JSON blob from the user input that matches the following JSON Schema: Extract a valid JSON blob from the user input that matches the following JSON Schema:
{output_schema}''' {output_schema}'''
prompt = ChatPromptTemplate.from_messages( prompt = ChatPromptTemplate.from_messages(
[("system", system), ("human", "{input}"),] [("system", system), ("human", "{input}"),]

View File

@ -34,7 +34,7 @@ class EvaluatorType(str, Enum):
"""The pairwise string evaluator, which predicts the preferred prediction from """The pairwise string evaluator, which predicts the preferred prediction from
between two models.""" between two models."""
SCORE_STRING = "score_string" SCORE_STRING = "score_string"
"""The scored string evaluator, which gives a score between 1 and 10 """The scored string evaluator, which gives a score between 1 and 10
to a prediction.""" to a prediction."""
LABELED_PAIRWISE_STRING = "labeled_pairwise_string" LABELED_PAIRWISE_STRING = "labeled_pairwise_string"
"""The labeled pairwise string evaluator, which predicts the preferred prediction """The labeled pairwise string evaluator, which predicts the preferred prediction

View File

@ -23,7 +23,7 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
DEFAULT_HISTORY_TEMPLATE = """ DEFAULT_HISTORY_TEMPLATE = """
Current date and time: {current_time}. Current date and time: {current_time}.
Potentially relevant timestamped excerpts of previous conversations (you Potentially relevant timestamped excerpts of previous conversations (you
do not need to use these if irrelevant): do not need to use these if irrelevant):
{previous_history} {previous_history}

View File

@ -10,7 +10,7 @@ class DatetimeOutputParser(BaseOutputParser[datetime]):
format: str = "%Y-%m-%dT%H:%M:%S.%fZ" format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
"""The string value that is used as the datetime format. """The string value that is used as the datetime format.
Update this to match the desired datetime format for your application. Update this to match the desired datetime format for your application.
""" """

View File

@ -20,7 +20,7 @@ class YamlOutputParser(BaseOutputParser[T]):
pattern: re.Pattern = re.compile( pattern: re.Pattern = re.compile(
r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL
) )
"""Regex pattern to match yaml code blocks """Regex pattern to match yaml code blocks
within triple backticks with optional yaml or yml prefix.""" within triple backticks with optional yaml or yml prefix."""
def parse(self, text: str) -> T: def parse(self, text: str) -> T:

View File

@ -36,7 +36,7 @@ class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query.""" """Filter that drops documents that aren't relevant to the query."""
llm_chain: Runnable llm_chain: Runnable
"""LLM wrapper to use for filtering documents. """LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser.""" The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input get_input: Callable[[str, Document], dict] = default_get_input

View File

@ -24,7 +24,7 @@ class CohereRerank(BaseDocumentCompressor):
model: str = "rerank-english-v2.0" model: str = "rerank-english-v2.0"
"""Model to use for reranking.""" """Model to use for reranking."""
cohere_api_key: Optional[str] = None cohere_api_key: Optional[str] = None
"""Cohere API key. Must be specified directly or via environment variable """Cohere API key. Must be specified directly or via environment variable
COHERE_API_KEY.""" COHERE_API_KEY."""
user_agent: str = "langchain" user_agent: str = "langchain"
"""Identifier for the application making the request.""" """Identifier for the application making the request."""

View File

@ -73,8 +73,8 @@ class LLMListwiseRerank(BaseDocumentCompressor):
""" """
reranker: Runnable[dict, list[Document]] reranker: Runnable[dict, list[Document]]
"""LLM-based reranker to use for filtering documents. Expected to take in a dict """LLM-based reranker to use for filtering documents. Expected to take in a dict
with 'documents: Sequence[Document]' and 'query: str' keys and output a with 'documents: Sequence[Document]' and 'query: str' keys and output a
List[Document].""" List[Document]."""
top_n: int = 3 top_n: int = 3

View File

@ -31,12 +31,12 @@ class LineListOutputParser(BaseOutputParser[list[str]]):
# Default prompt # Default prompt
DEFAULT_QUERY_PROMPT = PromptTemplate( DEFAULT_QUERY_PROMPT = PromptTemplate(
input_variables=["question"], input_variables=["question"],
template="""You are an AI language model assistant. Your task is template="""You are an AI language model assistant. Your task is
to generate 3 different versions of the given user to generate 3 different versions of the given user
question to retrieve relevant documents from a vector database. question to retrieve relevant documents from a vector database.
By generating multiple perspectives on the user question, By generating multiple perspectives on the user question,
your goal is to help the user overcome some of the limitations your goal is to help the user overcome some of the limitations
of distance-based similarity search. Provide these alternative of distance-based similarity search. Provide these alternative
questions separated by newlines. Original question: {question}""", questions separated by newlines. Original question: {question}""",
) )

View File

@ -66,7 +66,7 @@ class ParentDocumentRetriever(MultiVectorRetriever):
If none, then the parent documents will be the raw documents passed in.""" If none, then the parent documents will be the raw documents passed in."""
child_metadata_fields: Optional[Sequence[str]] = None child_metadata_fields: Optional[Sequence[str]] = None
"""Metadata fields to leave in child documents. If None, leave all parent document """Metadata fields to leave in child documents. If None, leave all parent document
metadata. metadata.
""" """

View File

@ -238,7 +238,7 @@ class SelfQueryRetriever(BaseRetriever):
"""The underlying vector store from which documents will be retrieved.""" """The underlying vector store from which documents will be retrieved."""
query_constructor: Runnable[dict, StructuredQuery] = Field(alias="llm_chain") query_constructor: Runnable[dict, StructuredQuery] = Field(alias="llm_chain")
"""The query constructor chain for generating the vector store queries. """The query constructor chain for generating the vector store queries.
llm_chain is legacy name kept for backwards compatibility.""" llm_chain is legacy name kept for backwards compatibility."""
search_type: str = "similarity" search_type: str = "similarity"
"""The search type to perform on the vector store.""" """The search type to perform on the vector store."""

View File

@ -133,7 +133,7 @@ class RunEvalConfig(BaseModel):
:class:`EvaluatorType <langchain.evaluation.schema.EvaluatorType>`, such :class:`EvaluatorType <langchain.evaluation.schema.EvaluatorType>`, such
as `EvaluatorType.QA`, the evaluator type string ("qa"), or a configuration for a as `EvaluatorType.QA`, the evaluator type string ("qa"), or a configuration for a
given evaluator given evaluator
(e.g., (e.g.,
:class:`RunEvalConfig.QA <langchain.smith.evaluation.config.RunEvalConfig.QA>`).""" :class:`RunEvalConfig.QA <langchain.smith.evaluation.config.RunEvalConfig.QA>`)."""
custom_evaluators: Optional[list[CUSTOM_EVALUATOR_TYPE]] = None custom_evaluators: Optional[list[CUSTOM_EVALUATOR_TYPE]] = None
"""Custom evaluators to apply to the dataset run.""" """Custom evaluators to apply to the dataset run."""

View File

@ -143,7 +143,7 @@ ignore-regex = ".*(Stati Uniti|Tense=Pres).*"
ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin" ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin"
[tool.ruff.lint] [tool.ruff.lint]
select = ["E", "F", "I", "PGH003", "T201", "D", "UP", "S"] select = ["E", "F", "I", "PGH003", "T201", "D", "UP", "S", "W"]
pydocstyle.convention = "google" pydocstyle.convention = "google"
pyupgrade.keep-runtime-typing = true pyupgrade.keep-runtime-typing = true

View File

@ -71,7 +71,7 @@ def test_final_answer_after_parsable_action() -> None:
Observation: I can use the `foo` tool to achieve the goal. Observation: I can use the `foo` tool to achieve the goal.
Action: foo Action: foo
Action Input: bar Action Input: bar
Final Answer: The best pizza to eat is margaritta Final Answer: The best pizza to eat is margaritta
""" """
with pytest.raises(OutputParserException) as exception_info: with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output) mrkl_output_parser.parse(llm_output)

View File

@ -63,7 +63,7 @@ def test_parse_with_language_and_spaces() -> None:
llm_output = """I can use the `foo` tool to achieve the goal. llm_output = """I can use the `foo` tool to achieve the goal.
Action: Action:
```json ```json
{ {
"action": "foo", "action": "foo",
@ -139,24 +139,24 @@ class TestCreatePrompt:
expected = dedent( expected = dedent(
""" """
Respond to the human as helpfully and accurately as possible. You have access to the following tools: Respond to the human as helpfully and accurately as possible. You have access to the following tools:
foo: Test tool FOO, args: {'tool_input': {'type': 'string'}} foo: Test tool FOO, args: {'tool_input': {'type': 'string'}}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or foo Valid "action" values: "Final Answer" or foo
Provide only ONE action per $JSON_BLOB, as shown: Provide only ONE action per $JSON_BLOB, as shown:
``` ```
{ {
"action": $TOOL_NAME, "action": $TOOL_NAME,
"action_input": $INPUT "action_input": $INPUT
} }
``` ```
Follow this format: Follow this format:
Question: input question to answer Question: input question to answer
Thought: consider previous and subsequent steps Thought: consider previous and subsequent steps
Action: Action:
@ -173,7 +173,7 @@ class TestCreatePrompt:
"action_input": "Final response to human" "action_input": "Final response to human"
} }
``` ```
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Thought: Thought:
""" # noqa: E501 """ # noqa: E501
@ -205,25 +205,25 @@ class TestCreatePrompt:
expected = dedent( expected = dedent(
""" """
Respond to the human as helpfully and accurately as possible. You have access to the following tools: Respond to the human as helpfully and accurately as possible. You have access to the following tools:
foo: Test tool FOO, args: {'tool_input': {'type': 'string'}} foo: Test tool FOO, args: {'tool_input': {'type': 'string'}}
bar: Test tool BAR, args: {'tool_input': {'type': 'string'}} bar: Test tool BAR, args: {'tool_input': {'type': 'string'}}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or foo, bar Valid "action" values: "Final Answer" or foo, bar
Provide only ONE action per $JSON_BLOB, as shown: Provide only ONE action per $JSON_BLOB, as shown:
``` ```
{ {
"action": $TOOL_NAME, "action": $TOOL_NAME,
"action_input": $INPUT "action_input": $INPUT
} }
``` ```
Follow this format: Follow this format:
Question: input question to answer Question: input question to answer
Thought: consider previous and subsequent steps Thought: consider previous and subsequent steps
Action: Action:
@ -240,7 +240,7 @@ class TestCreatePrompt:
"action_input": "Final response to human" "action_input": "Final response to human"
} }
``` ```
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Thought: Thought:
""" # noqa: E501 """ # noqa: E501

View File

@ -105,11 +105,11 @@ GRADE:""", # noqa: E501
1. The question asks who founded the Roanoke settlement. 1. The question asks who founded the Roanoke settlement.
2. The context states that the grade incorrect answer is Walter Raleigh. 2. The context states that the grade incorrect answer is Walter Raleigh.
3. The student's answer is "Sir Walter Raleigh". 3. The student's answer is "Sir Walter Raleigh".
4. The student's answer matches the context, which states the answer is Walter Raleigh. 4. The student's answer matches the context, which states the answer is Walter Raleigh.
5. The addition of "Sir" in the student's answer does not contradict the context. It provides extra detail about Walter Raleigh's title, but the core answer of Walter Raleigh is still correct. 5. The addition of "Sir" in the student's answer does not contradict the context. It provides extra detail about Walter Raleigh's title, but the core answer of Walter Raleigh is still correct.
@ -125,7 +125,7 @@ GRADE: CORRECT""", # noqa: E501
""" CORRECT """ CORRECT
QUESTION: who was the first president of the united states? QUESTION: who was the first president of the united states?
STUDENT ANSWER: George Washington STUDENT ANSWER: George Washington
TRUE ANSWER: George Washington was the first president of the United States. TRUE ANSWER: George Washington was the first president of the United States.
GRADE:""", GRADE:""",
{ {

View File

@ -14,7 +14,7 @@ from tests.unit_tests.llms.fake_llm import FakeLLM
def test_PairwiseStringResultOutputParser_parse() -> None: def test_PairwiseStringResultOutputParser_parse() -> None:
output_parser = ScoreStringResultOutputParser() output_parser = ScoreStringResultOutputParser()
text = """This answer is really good. text = """This answer is really good.
Rating: [[10]]""" Rating: [[10]]"""
got = output_parser.parse(text) got = output_parser.parse(text)
want = { want = {
@ -24,12 +24,12 @@ Rating: [[10]]"""
assert got.get("reasoning") == want["reasoning"] assert got.get("reasoning") == want["reasoning"]
assert got.get("score") == want["score"] assert got.get("score") == want["score"]
text = """This answer is really good. text = """This answer is really good.
Rating: 10""" Rating: 10"""
with pytest.raises(ValueError): with pytest.raises(ValueError):
output_parser.parse(text) output_parser.parse(text)
text = """This answer is really good. text = """This answer is really good.
Rating: [[0]]""" Rating: [[0]]"""
# Not in range [1, 10] # Not in range [1, 10]
with pytest.raises(ValueError): with pytest.raises(ValueError):

View File

@ -63,12 +63,12 @@ class GenericFakeChatModel(BaseChatModel):
messages: Iterator[AIMessage] messages: Iterator[AIMessage]
"""Get an iterator over messages. """Get an iterator over messages.
This can be expanded to accept other types like Callables / dicts / strings This can be expanded to accept other types like Callables / dicts / strings
to make the interface more generic if needed. to make the interface more generic if needed.
Note: if you want to pass a list, you can use `iter` to convert it to an iterator. Note: if you want to pass a list, you can use `iter` to convert it to an iterator.
Please note that streaming is not implemented yet. We should try to implement it Please note that streaming is not implemented yet. We should try to implement it
in the future by delegating to invoke and then breaking the resulting output in the future by delegating to invoke and then breaking the resulting output
into message chunks. into message chunks.

View File

@ -38,7 +38,7 @@ action_input: The yamlOutputParser class is powerful
additional_fields: null additional_fields: null
for_new_lines: | for_new_lines: |
not_escape_newline: not_escape_newline:
escape_newline: escape_newline:
```""" ```"""
DEF_RESULT_NO_BACKTICKS = """ DEF_RESULT_NO_BACKTICKS = """
@ -47,7 +47,7 @@ action_input: The yamlOutputParser class is powerful
additional_fields: null additional_fields: null
for_new_lines: | for_new_lines: |
not_escape_newline: not_escape_newline:
escape_newline: escape_newline:
""" """
@ -62,7 +62,7 @@ DEF_EXPECTED_RESULT = TestModel(
action=Actions.UPDATE, action=Actions.UPDATE,
action_input="The yamlOutputParser class is powerful", action_input="The yamlOutputParser class is powerful",
additional_fields=None, additional_fields=None,
for_new_lines="not_escape_newline:\n escape_newline: \n", for_new_lines="not_escape_newline:\n escape_newline:\n",
) )