mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-20 22:03:52 +00:00
langchain[lint]: fix mypy type ignores (#30894)
* Remove unused ignores * Add type ignore codes * Add mypy rule `warn_unused_ignores` * Add ruff rule PGH003 NB: some `type: ignore[unused-ignore]` are added because the ignores are needed when `extended_testing_deps.txt` deps are installed.
This commit is contained in:
parent
f14bcee525
commit
0c723af4b0
@ -1013,7 +1013,7 @@ class Agent(BaseSingleActionAgent):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ExceptionTool(BaseTool): # type: ignore[override]
|
class ExceptionTool(BaseTool):
|
||||||
"""Tool that just returns the query."""
|
"""Tool that just returns the query."""
|
||||||
|
|
||||||
name: str = "_Exception"
|
name: str = "_Exception"
|
||||||
@ -1129,7 +1129,7 @@ class AgentExecutor(Chain):
|
|||||||
"""
|
"""
|
||||||
agent = self.agent
|
agent = self.agent
|
||||||
tools = self.tools
|
tools = self.tools
|
||||||
allowed_tools = agent.get_allowed_tools() # type: ignore
|
allowed_tools = agent.get_allowed_tools() # type: ignore[union-attr]
|
||||||
if allowed_tools is not None:
|
if allowed_tools is not None:
|
||||||
if set(allowed_tools) != set([tool.name for tool in tools]):
|
if set(allowed_tools) != set([tool.name for tool in tools]):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
|
@ -120,7 +120,7 @@ class ChatAgent(Agent):
|
|||||||
]
|
]
|
||||||
if input_variables is None:
|
if input_variables is None:
|
||||||
input_variables = ["input", "agent_scratchpad"]
|
input_variables = ["input", "agent_scratchpad"]
|
||||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type]
|
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_llm_and_tools(
|
def from_llm_and_tools(
|
||||||
|
@ -154,7 +154,7 @@ class ConversationalAgent(Agent):
|
|||||||
format_instructions=format_instructions,
|
format_instructions=format_instructions,
|
||||||
input_variables=input_variables,
|
input_variables=input_variables,
|
||||||
)
|
)
|
||||||
llm_chain = LLMChain( # type: ignore[misc]
|
llm_chain = LLMChain(
|
||||||
llm=llm,
|
llm=llm,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
callback_manager=callback_manager,
|
callback_manager=callback_manager,
|
||||||
|
@ -114,7 +114,7 @@ class ConversationalChatAgent(Agent):
|
|||||||
HumanMessagePromptTemplate.from_template(final_prompt),
|
HumanMessagePromptTemplate.from_template(final_prompt),
|
||||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||||
]
|
]
|
||||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type]
|
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
||||||
|
|
||||||
def _construct_scratchpad(
|
def _construct_scratchpad(
|
||||||
self, intermediate_steps: list[tuple[AgentAction, str]]
|
self, intermediate_steps: list[tuple[AgentAction, str]]
|
||||||
@ -165,7 +165,7 @@ class ConversationalChatAgent(Agent):
|
|||||||
input_variables=input_variables,
|
input_variables=input_variables,
|
||||||
output_parser=_output_parser,
|
output_parser=_output_parser,
|
||||||
)
|
)
|
||||||
llm_chain = LLMChain( # type: ignore[misc]
|
llm_chain = LLMChain(
|
||||||
llm=llm,
|
llm=llm,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
callback_manager=callback_manager,
|
callback_manager=callback_manager,
|
||||||
|
@ -86,7 +86,7 @@ def load_agent_from_config(
|
|||||||
del config["output_parser"]
|
del config["output_parser"]
|
||||||
|
|
||||||
combined_config = {**config, **kwargs}
|
combined_config = {**config, **kwargs}
|
||||||
return agent_cls(**combined_config) # type: ignore
|
return agent_cls(**combined_config)
|
||||||
|
|
||||||
|
|
||||||
@deprecated("0.1.0", removal="1.0")
|
@deprecated("0.1.0", removal="1.0")
|
||||||
|
@ -144,7 +144,7 @@ class ZeroShotAgent(Agent):
|
|||||||
format_instructions=format_instructions,
|
format_instructions=format_instructions,
|
||||||
input_variables=input_variables,
|
input_variables=input_variables,
|
||||||
)
|
)
|
||||||
llm_chain = LLMChain( # type: ignore[misc]
|
llm_chain = LLMChain(
|
||||||
llm=llm,
|
llm=llm,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
callback_manager=callback_manager,
|
callback_manager=callback_manager,
|
||||||
|
@ -127,7 +127,7 @@ def _get_assistants_tool(
|
|||||||
such as "code_interpreter" and "file_search".
|
such as "code_interpreter" and "file_search".
|
||||||
"""
|
"""
|
||||||
if _is_assistants_builtin_tool(tool):
|
if _is_assistants_builtin_tool(tool):
|
||||||
return tool # type: ignore
|
return tool # type: ignore[return-value]
|
||||||
else:
|
else:
|
||||||
return convert_to_openai_tool(tool)
|
return convert_to_openai_tool(tool)
|
||||||
|
|
||||||
@ -267,7 +267,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
|||||||
assistant = client.beta.assistants.create(
|
assistant = client.beta.assistants.create(
|
||||||
name=name,
|
name=name,
|
||||||
instructions=instructions,
|
instructions=instructions,
|
||||||
tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore
|
tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore[misc]
|
||||||
model=model,
|
model=model,
|
||||||
)
|
)
|
||||||
return cls(assistant_id=assistant.id, client=client, **kwargs)
|
return cls(assistant_id=assistant.id, client=client, **kwargs)
|
||||||
@ -394,7 +394,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
|||||||
assistant = await async_client.beta.assistants.create(
|
assistant = await async_client.beta.assistants.create(
|
||||||
name=name,
|
name=name,
|
||||||
instructions=instructions,
|
instructions=instructions,
|
||||||
tools=openai_tools, # type: ignore
|
tools=openai_tools, # type: ignore[arg-type]
|
||||||
model=model,
|
model=model,
|
||||||
)
|
)
|
||||||
return cls(assistant_id=assistant.id, async_client=async_client, **kwargs)
|
return cls(assistant_id=assistant.id, async_client=async_client, **kwargs)
|
||||||
|
@ -12,7 +12,7 @@ from langchain.agents.format_scratchpad import (
|
|||||||
from langchain.memory.chat_memory import BaseChatMemory
|
from langchain.memory.chat_memory import BaseChatMemory
|
||||||
|
|
||||||
|
|
||||||
class AgentTokenBufferMemory(BaseChatMemory): # type: ignore[override]
|
class AgentTokenBufferMemory(BaseChatMemory):
|
||||||
"""Memory used to save agent output AND intermediate steps.
|
"""Memory used to save agent output AND intermediate steps.
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
|
@ -241,7 +241,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|||||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
return ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
|
return ChatPromptTemplate(messages=messages)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_llm_and_tools(
|
def from_llm_and_tools(
|
||||||
|
@ -286,7 +286,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
|||||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
return ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
|
return ChatPromptTemplate(messages=messages)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_llm_and_tools(
|
def from_llm_and_tools(
|
||||||
|
@ -14,7 +14,7 @@ from langchain_core.outputs import ChatGeneration, Generation
|
|||||||
from langchain.agents.agent import MultiActionAgentOutputParser
|
from langchain.agents.agent import MultiActionAgentOutputParser
|
||||||
|
|
||||||
|
|
||||||
class ToolAgentAction(AgentActionMessageLog): # type: ignore[override]
|
class ToolAgentAction(AgentActionMessageLog):
|
||||||
tool_call_id: str
|
tool_call_id: str
|
||||||
"""Tool call that this message is responding to."""
|
"""Tool call that this message is responding to."""
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ from langchain_core.callbacks import (
|
|||||||
from langchain_core.tools import BaseTool, tool
|
from langchain_core.tools import BaseTool, tool
|
||||||
|
|
||||||
|
|
||||||
class InvalidTool(BaseTool): # type: ignore[override]
|
class InvalidTool(BaseTool):
|
||||||
"""Tool that is run when invalid tool name is encountered by agent."""
|
"""Tool that is run when invalid tool name is encountered by agent."""
|
||||||
|
|
||||||
name: str = "invalid_tool"
|
name: str = "invalid_tool"
|
||||||
|
@ -199,9 +199,7 @@ try:
|
|||||||
api_docs: str
|
api_docs: str
|
||||||
question_key: str = "question" #: :meta private:
|
question_key: str = "question" #: :meta private:
|
||||||
output_key: str = "output" #: :meta private:
|
output_key: str = "output" #: :meta private:
|
||||||
limit_to_domains: Optional[Sequence[str]] = Field(
|
limit_to_domains: Optional[Sequence[str]] = Field(default_factory=list) # type: ignore[arg-type]
|
||||||
default_factory=list # type: ignore
|
|
||||||
)
|
|
||||||
"""Use to limit the domains that can be accessed by the API chain.
|
"""Use to limit the domains that can be accessed by the API chain.
|
||||||
|
|
||||||
* For example, to limit to just the domain `https://www.example.com`, set
|
* For example, to limit to just the domain `https://www.example.com`, set
|
||||||
|
@ -110,17 +110,13 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
|
|||||||
self, config: Optional[RunnableConfig] = None
|
self, config: Optional[RunnableConfig] = None
|
||||||
) -> type[BaseModel]:
|
) -> type[BaseModel]:
|
||||||
# This is correct, but pydantic typings/mypy don't think so.
|
# This is correct, but pydantic typings/mypy don't think so.
|
||||||
return create_model( # type: ignore[call-overload]
|
return create_model("ChainInput", **{k: (Any, None) for k in self.input_keys})
|
||||||
"ChainInput", **{k: (Any, None) for k in self.input_keys}
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_output_schema(
|
def get_output_schema(
|
||||||
self, config: Optional[RunnableConfig] = None
|
self, config: Optional[RunnableConfig] = None
|
||||||
) -> type[BaseModel]:
|
) -> type[BaseModel]:
|
||||||
# This is correct, but pydantic typings/mypy don't think so.
|
# This is correct, but pydantic typings/mypy don't think so.
|
||||||
return create_model( # type: ignore[call-overload]
|
return create_model("ChainOutput", **{k: (Any, None) for k in self.output_keys})
|
||||||
"ChainOutput", **{k: (Any, None) for k in self.output_keys}
|
|
||||||
)
|
|
||||||
|
|
||||||
def invoke(
|
def invoke(
|
||||||
self,
|
self,
|
||||||
|
@ -50,7 +50,7 @@ class BaseCombineDocumentsChain(Chain, ABC):
|
|||||||
) -> type[BaseModel]:
|
) -> type[BaseModel]:
|
||||||
return create_model(
|
return create_model(
|
||||||
"CombineDocumentsInput",
|
"CombineDocumentsInput",
|
||||||
**{self.input_key: (list[Document], None)}, # type: ignore[call-overload]
|
**{self.input_key: (list[Document], None)},
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_output_schema(
|
def get_output_schema(
|
||||||
@ -58,7 +58,7 @@ class BaseCombineDocumentsChain(Chain, ABC):
|
|||||||
) -> type[BaseModel]:
|
) -> type[BaseModel]:
|
||||||
return create_model(
|
return create_model(
|
||||||
"CombineDocumentsOutput",
|
"CombineDocumentsOutput",
|
||||||
**{self.output_key: (str, None)}, # type: ignore[call-overload]
|
**{self.output_key: (str, None)},
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -249,7 +249,7 @@ class AnalyzeDocumentChain(Chain):
|
|||||||
) -> type[BaseModel]:
|
) -> type[BaseModel]:
|
||||||
return create_model(
|
return create_model(
|
||||||
"AnalyzeDocumentChain",
|
"AnalyzeDocumentChain",
|
||||||
**{self.input_key: (str, None)}, # type: ignore[call-overload]
|
**{self.input_key: (str, None)},
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_output_schema(
|
def get_output_schema(
|
||||||
|
@ -120,7 +120,7 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain):
|
|||||||
**{
|
**{
|
||||||
self.output_key: (str, None),
|
self.output_key: (str, None),
|
||||||
"intermediate_steps": (list[str], None),
|
"intermediate_steps": (list[str], None),
|
||||||
}, # type: ignore[call-overload]
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
return super().get_output_schema(config)
|
return super().get_output_schema(config)
|
||||||
|
@ -19,7 +19,7 @@ from langchain.memory.buffer import ConversationBufferMemory
|
|||||||
),
|
),
|
||||||
removal="1.0",
|
removal="1.0",
|
||||||
)
|
)
|
||||||
class ConversationChain(LLMChain): # type: ignore[override, override]
|
class ConversationChain(LLMChain):
|
||||||
"""Chain to have a conversation and load context from memory.
|
"""Chain to have a conversation and load context from memory.
|
||||||
|
|
||||||
This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer
|
This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer
|
||||||
|
@ -170,7 +170,7 @@ class ElasticsearchDatabaseChain(Chain):
|
|||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
# Append intermediate steps to exception, to aid in logging and later
|
# Append intermediate steps to exception, to aid in logging and later
|
||||||
# improvement of few shot prompt seeds
|
# improvement of few shot prompt seeds
|
||||||
exc.intermediate_steps = intermediate_steps # type: ignore
|
exc.intermediate_steps = intermediate_steps # type: ignore[attr-defined]
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -39,16 +39,14 @@ try:
|
|||||||
from langchain_community.llms.loading import load_llm, load_llm_from_config
|
from langchain_community.llms.loading import load_llm, load_llm_from_config
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
|
||||||
def load_llm(*args: Any, **kwargs: Any) -> None: # type: ignore
|
def load_llm(*args: Any, **kwargs: Any) -> None:
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
"To use this load_llm functionality you must install the "
|
"To use this load_llm functionality you must install the "
|
||||||
"langchain_community package. "
|
"langchain_community package. "
|
||||||
"You can install it with `pip install langchain_community`"
|
"You can install it with `pip install langchain_community`"
|
||||||
)
|
)
|
||||||
|
|
||||||
def load_llm_from_config( # type: ignore
|
def load_llm_from_config(*args: Any, **kwargs: Any) -> None:
|
||||||
*args: Any, **kwargs: Any
|
|
||||||
) -> None:
|
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
"To use this load_llm_from_config functionality you must install the "
|
"To use this load_llm_from_config functionality you must install the "
|
||||||
"langchain_community package. "
|
"langchain_community package. "
|
||||||
@ -95,9 +93,9 @@ def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedde
|
|||||||
else:
|
else:
|
||||||
raise ValueError("`embeddings` must be present.")
|
raise ValueError("`embeddings` must be present.")
|
||||||
return HypotheticalDocumentEmbedder(
|
return HypotheticalDocumentEmbedder(
|
||||||
llm_chain=llm_chain, # type: ignore[arg-type]
|
llm_chain=llm_chain,
|
||||||
base_embeddings=embeddings,
|
base_embeddings=embeddings,
|
||||||
**config, # type: ignore[arg-type]
|
**config,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -160,7 +158,7 @@ def _load_map_reduce_documents_chain(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain: # type: ignore[valid-type]
|
def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain:
|
||||||
combine_documents_chain = None
|
combine_documents_chain = None
|
||||||
collapse_documents_chain = None
|
collapse_documents_chain = None
|
||||||
|
|
||||||
@ -213,7 +211,7 @@ def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocuments
|
|||||||
config.pop("collapse_document_chain_path"), **kwargs
|
config.pop("collapse_document_chain_path"), **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
return ReduceDocumentsChain( # type: ignore[misc]
|
return ReduceDocumentsChain(
|
||||||
combine_documents_chain=combine_documents_chain,
|
combine_documents_chain=combine_documents_chain,
|
||||||
collapse_documents_chain=collapse_documents_chain,
|
collapse_documents_chain=collapse_documents_chain,
|
||||||
**config,
|
**config,
|
||||||
@ -245,7 +243,7 @@ def _load_llm_bash_chain(config: dict, **kwargs: Any) -> Any:
|
|||||||
elif "prompt_path" in config:
|
elif "prompt_path" in config:
|
||||||
prompt = load_prompt(config.pop("prompt_path"))
|
prompt = load_prompt(config.pop("prompt_path"))
|
||||||
if llm_chain:
|
if llm_chain:
|
||||||
return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config) # type: ignore[arg-type]
|
return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config)
|
||||||
else:
|
else:
|
||||||
return LLMBashChain(llm=llm, prompt=prompt, **config)
|
return LLMBashChain(llm=llm, prompt=prompt, **config)
|
||||||
|
|
||||||
@ -347,7 +345,7 @@ def _load_pal_chain(config: dict, **kwargs: Any) -> Any:
|
|||||||
llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs)
|
llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs)
|
||||||
else:
|
else:
|
||||||
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
|
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
|
||||||
return PALChain(llm_chain=llm_chain, **config) # type: ignore[arg-type]
|
return PALChain(llm_chain=llm_chain, **config)
|
||||||
|
|
||||||
|
|
||||||
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
|
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
|
||||||
@ -410,7 +408,7 @@ def _load_sql_database_chain(config: dict, **kwargs: Any) -> Any:
|
|||||||
if "llm_chain" in config:
|
if "llm_chain" in config:
|
||||||
llm_chain_config = config.pop("llm_chain")
|
llm_chain_config = config.pop("llm_chain")
|
||||||
chain = load_chain_from_config(llm_chain_config, **kwargs)
|
chain = load_chain_from_config(llm_chain_config, **kwargs)
|
||||||
return SQLDatabaseChain(llm_chain=chain, database=database, **config) # type: ignore[arg-type]
|
return SQLDatabaseChain(llm_chain=chain, database=database, **config)
|
||||||
if "llm" in config:
|
if "llm" in config:
|
||||||
llm_config = config.pop("llm")
|
llm_config = config.pop("llm")
|
||||||
llm = load_llm_from_config(llm_config, **kwargs)
|
llm = load_llm_from_config(llm_config, **kwargs)
|
||||||
@ -563,8 +561,8 @@ def _load_graph_cypher_chain(config: dict, **kwargs: Any) -> GraphCypherQAChain:
|
|||||||
)
|
)
|
||||||
return GraphCypherQAChain(
|
return GraphCypherQAChain(
|
||||||
graph=graph,
|
graph=graph,
|
||||||
cypher_generation_chain=cypher_generation_chain, # type: ignore[arg-type]
|
cypher_generation_chain=cypher_generation_chain,
|
||||||
qa_chain=qa_chain, # type: ignore[arg-type]
|
qa_chain=qa_chain,
|
||||||
**config,
|
**config,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ def create_openai_fn_chain(
|
|||||||
output_key: str = "function",
|
output_key: str = "function",
|
||||||
output_parser: Optional[BaseLLMOutputParser] = None,
|
output_parser: Optional[BaseLLMOutputParser] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> LLMChain: # type: ignore[valid-type]
|
) -> LLMChain:
|
||||||
"""[Legacy] Create an LLM chain that uses OpenAI functions.
|
"""[Legacy] Create an LLM chain that uses OpenAI functions.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -131,7 +131,7 @@ def create_openai_fn_chain(
|
|||||||
}
|
}
|
||||||
if len(openai_functions) == 1 and enforce_single_function_usage:
|
if len(openai_functions) == 1 and enforce_single_function_usage:
|
||||||
llm_kwargs["function_call"] = {"name": openai_functions[0]["name"]}
|
llm_kwargs["function_call"] = {"name": openai_functions[0]["name"]}
|
||||||
llm_chain = LLMChain( # type: ignore[misc]
|
llm_chain = LLMChain(
|
||||||
llm=llm,
|
llm=llm,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
output_parser=output_parser,
|
output_parser=output_parser,
|
||||||
@ -153,7 +153,7 @@ def create_structured_output_chain(
|
|||||||
output_key: str = "function",
|
output_key: str = "function",
|
||||||
output_parser: Optional[BaseLLMOutputParser] = None,
|
output_parser: Optional[BaseLLMOutputParser] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> LLMChain: # type: ignore[valid-type]
|
) -> LLMChain:
|
||||||
"""[Legacy] Create an LLMChain that uses an OpenAI function to get a structured output.
|
"""[Legacy] Create an LLMChain that uses an OpenAI function to get a structured output.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -216,7 +216,7 @@ def create_structured_output_chain(
|
|||||||
class _OutputFormatter(BaseModel):
|
class _OutputFormatter(BaseModel):
|
||||||
"""Output formatter. Should always be used to format your response to the user.""" # noqa: E501
|
"""Output formatter. Should always be used to format your response to the user.""" # noqa: E501
|
||||||
|
|
||||||
output: output_schema # type: ignore
|
output: output_schema # type: ignore[valid-type]
|
||||||
|
|
||||||
function = _OutputFormatter
|
function = _OutputFormatter
|
||||||
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
|
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
|
||||||
|
@ -148,7 +148,7 @@ def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
|
|||||||
)
|
)
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
prompt = ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
|
prompt = ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
|
||||||
|
|
||||||
chain = LLMChain(
|
chain = LLMChain(
|
||||||
llm=llm,
|
llm=llm,
|
||||||
|
@ -170,7 +170,7 @@ def create_extraction_chain_pydantic(
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
class PydanticSchema(BaseModel):
|
class PydanticSchema(BaseModel):
|
||||||
info: list[pydantic_schema] # type: ignore
|
info: list[pydantic_schema]
|
||||||
|
|
||||||
if hasattr(pydantic_schema, "model_json_schema"):
|
if hasattr(pydantic_schema, "model_json_schema"):
|
||||||
openai_schema = pydantic_schema.model_json_schema()
|
openai_schema = pydantic_schema.model_json_schema()
|
||||||
|
@ -77,7 +77,7 @@ def _openapi_params_to_json_schema(params: list[Parameter], spec: OpenAPISpec) -
|
|||||||
if p.param_schema:
|
if p.param_schema:
|
||||||
schema = spec.get_schema(p.param_schema)
|
schema = spec.get_schema(p.param_schema)
|
||||||
else:
|
else:
|
||||||
media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore
|
media_type_schema = list(p.content.values())[0].media_type_schema
|
||||||
schema = spec.get_schema(media_type_schema)
|
schema = spec.get_schema(media_type_schema)
|
||||||
if p.description and not schema.description:
|
if p.description and not schema.description:
|
||||||
schema.description = p.description
|
schema.description = p.description
|
||||||
@ -363,7 +363,7 @@ def get_openapi_chain(
|
|||||||
OpenAPISpec.from_text,
|
OpenAPISpec.from_text,
|
||||||
):
|
):
|
||||||
try:
|
try:
|
||||||
spec = conversion(spec) # type: ignore[arg-type]
|
spec = conversion(spec)
|
||||||
break
|
break
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
raise e
|
raise e
|
||||||
|
@ -96,7 +96,7 @@ def create_qa_with_structure_chain(
|
|||||||
HumanMessagePromptTemplate.from_template("Question: {question}"),
|
HumanMessagePromptTemplate.from_template("Question: {question}"),
|
||||||
HumanMessage(content="Tips: Make sure to answer in the correct format"),
|
HumanMessage(content="Tips: Make sure to answer in the correct format"),
|
||||||
]
|
]
|
||||||
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
|
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
|
||||||
|
|
||||||
chain = LLMChain(
|
chain = LLMChain(
|
||||||
llm=llm,
|
llm=llm,
|
||||||
|
@ -69,7 +69,7 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|||||||
document_prompt=document_prompt,
|
document_prompt=document_prompt,
|
||||||
document_variable_name="summaries",
|
document_variable_name="summaries",
|
||||||
)
|
)
|
||||||
reduce_documents_chain = ReduceDocumentsChain( # type: ignore[misc]
|
reduce_documents_chain = ReduceDocumentsChain(
|
||||||
combine_documents_chain=combine_results_chain
|
combine_documents_chain=combine_results_chain
|
||||||
)
|
)
|
||||||
combine_documents_chain = MapReduceDocumentsChain(
|
combine_documents_chain = MapReduceDocumentsChain(
|
||||||
|
@ -11,12 +11,12 @@ try:
|
|||||||
from lark import Lark, Transformer, v_args
|
from lark import Lark, Transformer, v_args
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
|
||||||
def v_args(*args: Any, **kwargs: Any) -> Any: # type: ignore
|
def v_args(*args: Any, **kwargs: Any) -> Any: # type: ignore[misc]
|
||||||
"""Dummy decorator for when lark is not installed."""
|
"""Dummy decorator for when lark is not installed."""
|
||||||
return lambda _: None
|
return lambda _: None
|
||||||
|
|
||||||
Transformer = object # type: ignore
|
Transformer = object # type: ignore[assignment,misc]
|
||||||
Lark = object # type: ignore
|
Lark = object # type: ignore[assignment,misc]
|
||||||
|
|
||||||
from langchain_core.structured_query import (
|
from langchain_core.structured_query import (
|
||||||
Comparator,
|
Comparator,
|
||||||
|
@ -156,7 +156,7 @@ def _load_map_reduce_chain(
|
|||||||
verbose=verbose, # type: ignore[arg-type]
|
verbose=verbose, # type: ignore[arg-type]
|
||||||
callback_manager=callback_manager,
|
callback_manager=callback_manager,
|
||||||
)
|
)
|
||||||
reduce_documents_chain = ReduceDocumentsChain( # type: ignore[misc]
|
reduce_documents_chain = ReduceDocumentsChain(
|
||||||
combine_documents_chain=combine_documents_chain,
|
combine_documents_chain=combine_documents_chain,
|
||||||
collapse_documents_chain=collapse_chain,
|
collapse_documents_chain=collapse_chain,
|
||||||
token_max=token_max,
|
token_max=token_max,
|
||||||
|
@ -20,7 +20,7 @@ from langchain.chains.router.multi_retrieval_prompt import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class MultiRetrievalQAChain(MultiRouteChain): # type: ignore[override]
|
class MultiRetrievalQAChain(MultiRouteChain):
|
||||||
"""A multi-route chain that uses an LLM router chain to choose amongst retrieval
|
"""A multi-route chain that uses an LLM router chain to choose amongst retrieval
|
||||||
qa chains."""
|
qa chains."""
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ def create_sql_query_chain(
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
return (
|
return (
|
||||||
RunnablePassthrough.assign(**inputs) # type: ignore
|
RunnablePassthrough.assign(**inputs) # type: ignore[return-value]
|
||||||
| (
|
| (
|
||||||
lambda x: {
|
lambda x: {
|
||||||
k: v
|
k: v
|
||||||
|
@ -520,7 +520,7 @@ def _create_openai_json_runnable(
|
|||||||
""""""
|
""""""
|
||||||
if isinstance(output_schema, type) and is_basemodel_subclass(output_schema):
|
if isinstance(output_schema, type) and is_basemodel_subclass(output_schema):
|
||||||
output_parser = output_parser or PydanticOutputParser(
|
output_parser = output_parser or PydanticOutputParser(
|
||||||
pydantic_object=output_schema, # type: ignore
|
pydantic_object=output_schema,
|
||||||
)
|
)
|
||||||
schema_as_dict = convert_to_openai_function(output_schema)["parameters"]
|
schema_as_dict = convert_to_openai_function(output_schema)["parameters"]
|
||||||
else:
|
else:
|
||||||
@ -559,7 +559,7 @@ def _create_openai_functions_structured_output_runnable(
|
|||||||
class _OutputFormatter(BaseModel):
|
class _OutputFormatter(BaseModel):
|
||||||
"""Output formatter. Should always be used to format your response to the user.""" # noqa: E501
|
"""Output formatter. Should always be used to format your response to the user.""" # noqa: E501
|
||||||
|
|
||||||
output: output_schema # type: ignore
|
output: output_schema # type: ignore[valid-type]
|
||||||
|
|
||||||
function = _OutputFormatter
|
function = _OutputFormatter
|
||||||
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
|
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
|
||||||
|
@ -62,14 +62,14 @@ def _load_map_reduce_chain(
|
|||||||
llm=llm,
|
llm=llm,
|
||||||
prompt=map_prompt,
|
prompt=map_prompt,
|
||||||
verbose=verbose, # type: ignore[arg-type]
|
verbose=verbose, # type: ignore[arg-type]
|
||||||
callbacks=callbacks, # type: ignore[arg-type]
|
callbacks=callbacks,
|
||||||
)
|
)
|
||||||
_reduce_llm = reduce_llm or llm
|
_reduce_llm = reduce_llm or llm
|
||||||
reduce_chain = LLMChain(
|
reduce_chain = LLMChain(
|
||||||
llm=_reduce_llm,
|
llm=_reduce_llm,
|
||||||
prompt=combine_prompt,
|
prompt=combine_prompt,
|
||||||
verbose=verbose, # type: ignore[arg-type]
|
verbose=verbose, # type: ignore[arg-type]
|
||||||
callbacks=callbacks, # type: ignore[arg-type]
|
callbacks=callbacks,
|
||||||
)
|
)
|
||||||
# TODO: document prompt
|
# TODO: document prompt
|
||||||
combine_documents_chain = StuffDocumentsChain(
|
combine_documents_chain = StuffDocumentsChain(
|
||||||
|
@ -41,7 +41,7 @@ __all__ = [
|
|||||||
|
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def init_chat_model( # type: ignore[overload-overlap]
|
def init_chat_model(
|
||||||
model: str,
|
model: str,
|
||||||
*,
|
*,
|
||||||
model_provider: Optional[str] = None,
|
model_provider: Optional[str] = None,
|
||||||
@ -347,7 +347,7 @@ def _init_chat_model_helper(
|
|||||||
_check_pkg("langchain_anthropic")
|
_check_pkg("langchain_anthropic")
|
||||||
from langchain_anthropic import ChatAnthropic
|
from langchain_anthropic import ChatAnthropic
|
||||||
|
|
||||||
return ChatAnthropic(model=model, **kwargs) # type: ignore[call-arg]
|
return ChatAnthropic(model=model, **kwargs) # type: ignore[call-arg,unused-ignore]
|
||||||
elif model_provider == "azure_openai":
|
elif model_provider == "azure_openai":
|
||||||
_check_pkg("langchain_openai")
|
_check_pkg("langchain_openai")
|
||||||
from langchain_openai import AzureChatOpenAI
|
from langchain_openai import AzureChatOpenAI
|
||||||
@ -402,7 +402,7 @@ def _init_chat_model_helper(
|
|||||||
_check_pkg("langchain_mistralai")
|
_check_pkg("langchain_mistralai")
|
||||||
from langchain_mistralai import ChatMistralAI
|
from langchain_mistralai import ChatMistralAI
|
||||||
|
|
||||||
return ChatMistralAI(model=model, **kwargs) # type: ignore[call-arg]
|
return ChatMistralAI(model=model, **kwargs) # type: ignore[call-arg,unused-ignore]
|
||||||
elif model_provider == "huggingface":
|
elif model_provider == "huggingface":
|
||||||
_check_pkg("langchain_huggingface")
|
_check_pkg("langchain_huggingface")
|
||||||
from langchain_huggingface import ChatHuggingFace
|
from langchain_huggingface import ChatHuggingFace
|
||||||
|
@ -87,7 +87,7 @@ class HypotheticalDocumentEmbedder:
|
|||||||
)
|
)
|
||||||
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
|
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
|
||||||
|
|
||||||
return H(*args, **kwargs) # type: ignore
|
return H(*args, **kwargs) # type: ignore[return-value]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_llm(cls, *args: Any, **kwargs: Any) -> Any:
|
def from_llm(cls, *args: Any, **kwargs: Any) -> Any:
|
||||||
|
@ -95,7 +95,7 @@ def resolve_pairwise_criteria(
|
|||||||
return criteria_
|
return criteria_
|
||||||
|
|
||||||
|
|
||||||
class PairwiseStringResultOutputParser(BaseOutputParser[dict]): # type: ignore[override]
|
class PairwiseStringResultOutputParser(BaseOutputParser[dict]):
|
||||||
"""A parser for the output of the PairwiseStringEvalChain.
|
"""A parser for the output of the PairwiseStringEvalChain.
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
@ -151,7 +151,7 @@ class PairwiseStringResultOutputParser(BaseOutputParser[dict]): # type: ignore[
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): # type: ignore[override]
|
class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain):
|
||||||
"""A chain for comparing two outputs, such as the outputs
|
"""A chain for comparing two outputs, such as the outputs
|
||||||
of two models, prompts, or outputs of a single model on similar inputs.
|
of two models, prompts, or outputs of a single model on similar inputs.
|
||||||
|
|
||||||
@ -391,7 +391,7 @@ Performance may be significantly worse with other models."
|
|||||||
return self._prepare_output(result)
|
return self._prepare_output(result)
|
||||||
|
|
||||||
|
|
||||||
class LabeledPairwiseStringEvalChain(PairwiseStringEvalChain): # type: ignore[override]
|
class LabeledPairwiseStringEvalChain(PairwiseStringEvalChain):
|
||||||
"""A chain for comparing two outputs, such as the outputs
|
"""A chain for comparing two outputs, such as the outputs
|
||||||
of two models, prompts, or outputs of a single model on similar inputs,
|
of two models, prompts, or outputs of a single model on similar inputs,
|
||||||
with labeled preferences.
|
with labeled preferences.
|
||||||
|
@ -165,7 +165,7 @@ def resolve_criteria(
|
|||||||
return criteria_
|
return criteria_
|
||||||
|
|
||||||
|
|
||||||
class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): # type: ignore[override]
|
class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
|
||||||
"""LLM Chain for evaluating runs against criteria.
|
"""LLM Chain for evaluating runs against criteria.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@ -509,7 +509,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): # type: ignor
|
|||||||
return self._prepare_output(result)
|
return self._prepare_output(result)
|
||||||
|
|
||||||
|
|
||||||
class LabeledCriteriaEvalChain(CriteriaEvalChain): # type: ignore[override]
|
class LabeledCriteriaEvalChain(CriteriaEvalChain):
|
||||||
"""Criteria evaluation chain that requires references."""
|
"""Criteria evaluation chain that requires references."""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -68,7 +68,7 @@ class ExactMatchStringEvaluator(StringEvaluator):
|
|||||||
"""
|
"""
|
||||||
return "exact_match"
|
return "exact_match"
|
||||||
|
|
||||||
def _evaluate_strings( # type: ignore[arg-type,override]
|
def _evaluate_strings( # type: ignore[override]
|
||||||
self,
|
self,
|
||||||
*,
|
*,
|
||||||
prediction: str,
|
prediction: str,
|
||||||
|
@ -148,9 +148,7 @@ def load_evaluator(
|
|||||||
"specify a language model explicitly."
|
"specify a language model explicitly."
|
||||||
)
|
)
|
||||||
|
|
||||||
llm = llm or ChatOpenAI( # type: ignore[call-arg]
|
llm = llm or ChatOpenAI(model="gpt-4", seed=42, temperature=0)
|
||||||
model="gpt-4", seed=42, temperature=0
|
|
||||||
)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Evaluation with the {evaluator_cls} requires a "
|
f"Evaluation with the {evaluator_cls} requires a "
|
||||||
|
@ -65,7 +65,7 @@ class RegexMatchStringEvaluator(StringEvaluator):
|
|||||||
"""
|
"""
|
||||||
return "regex_match"
|
return "regex_match"
|
||||||
|
|
||||||
def _evaluate_strings( # type: ignore[arg-type,override]
|
def _evaluate_strings( # type: ignore[override]
|
||||||
self,
|
self,
|
||||||
*,
|
*,
|
||||||
prediction: str,
|
prediction: str,
|
||||||
|
@ -144,7 +144,7 @@ class ScoreStringResultOutputParser(BaseOutputParser[dict]):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain): # type: ignore[override]
|
class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
|
||||||
"""A chain for scoring on a scale of 1-10 the output of a model.
|
"""A chain for scoring on a scale of 1-10 the output of a model.
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
@ -396,7 +396,7 @@ Performance may be significantly worse with other models."
|
|||||||
return self._prepare_output(result)
|
return self._prepare_output(result)
|
||||||
|
|
||||||
|
|
||||||
class LabeledScoreStringEvalChain(ScoreStringEvalChain): # type: ignore[override]
|
class LabeledScoreStringEvalChain(ScoreStringEvalChain):
|
||||||
"""A chain for scoring the output of a model on a scale of 1-10.
|
"""A chain for scoring the output of a model on a scale of 1-10.
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
|
@ -45,7 +45,7 @@ try:
|
|||||||
from sqlalchemy.ext.asyncio import async_sessionmaker
|
from sqlalchemy.ext.asyncio import async_sessionmaker
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# dummy for sqlalchemy < 2
|
# dummy for sqlalchemy < 2
|
||||||
async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore
|
async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore[assignment,misc]
|
||||||
|
|
||||||
Base = declarative_base()
|
Base = declarative_base()
|
||||||
|
|
||||||
|
@ -109,7 +109,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
|
|||||||
previous_history_template: str = DEFAULT_HISTORY_TEMPLATE
|
previous_history_template: str = DEFAULT_HISTORY_TEMPLATE
|
||||||
split_chunk_size: int = 1000
|
split_chunk_size: int = 1000
|
||||||
|
|
||||||
_memory_retriever: VectorStoreRetrieverMemory = PrivateAttr(default=None) # type: ignore
|
_memory_retriever: VectorStoreRetrieverMemory = PrivateAttr(default=None) # type: ignore[assignment]
|
||||||
_timestamps: list[datetime] = PrivateAttr(default_factory=list)
|
_timestamps: list[datetime] = PrivateAttr(default_factory=list)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -12,12 +12,7 @@ from langchain_core._api import warn_deprecated
|
|||||||
# * Creating namespaces for pydantic v1 and v2 should allow us to write code that
|
# * Creating namespaces for pydantic v1 and v2 should allow us to write code that
|
||||||
# unambiguously uses either v1 or v2 API.
|
# unambiguously uses either v1 or v2 API.
|
||||||
# * This change is easier to roll out and roll back.
|
# * This change is easier to roll out and roll back.
|
||||||
|
from pydantic.v1 import * # noqa: F403
|
||||||
try:
|
|
||||||
from pydantic.v1 import * # noqa: F403
|
|
||||||
except ImportError:
|
|
||||||
from pydantic import * # type: ignore # noqa: F403
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
|
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
|
||||||
|
@ -1,9 +1,5 @@
|
|||||||
from langchain_core._api import warn_deprecated
|
from langchain_core._api import warn_deprecated
|
||||||
|
from pydantic.v1.dataclasses import * # noqa: F403
|
||||||
try:
|
|
||||||
from pydantic.v1.dataclasses import * # noqa: F403
|
|
||||||
except ImportError:
|
|
||||||
from pydantic.dataclasses import * # type: ignore # noqa: F403
|
|
||||||
|
|
||||||
warn_deprecated(
|
warn_deprecated(
|
||||||
"0.3.0",
|
"0.3.0",
|
||||||
|
@ -1,9 +1,5 @@
|
|||||||
from langchain_core._api import warn_deprecated
|
from langchain_core._api import warn_deprecated
|
||||||
|
from pydantic.v1.main import * # noqa: F403
|
||||||
try:
|
|
||||||
from pydantic.v1.main import * # noqa: F403
|
|
||||||
except ImportError:
|
|
||||||
from pydantic.main import * # type: ignore # noqa: F403
|
|
||||||
|
|
||||||
warn_deprecated(
|
warn_deprecated(
|
||||||
"0.3.0",
|
"0.3.0",
|
||||||
|
@ -99,7 +99,7 @@ class LLMChainExtractor(BaseDocumentCompressor):
|
|||||||
if len(outputs[i]) == 0:
|
if len(outputs[i]) == 0:
|
||||||
continue
|
continue
|
||||||
compressed_docs.append(
|
compressed_docs.append(
|
||||||
Document(page_content=outputs[i], metadata=doc.metadata) # type: ignore[arg-type]
|
Document(page_content=outputs[i], metadata=doc.metadata)
|
||||||
)
|
)
|
||||||
return compressed_docs
|
return compressed_docs
|
||||||
|
|
||||||
|
@ -272,7 +272,7 @@ class EnsembleRetriever(BaseRetriever):
|
|||||||
# Enforce that retrieved docs are Documents for each list in retriever_docs
|
# Enforce that retrieved docs are Documents for each list in retriever_docs
|
||||||
for i in range(len(retriever_docs)):
|
for i in range(len(retriever_docs)):
|
||||||
retriever_docs[i] = [
|
retriever_docs[i] = [
|
||||||
Document(page_content=doc) if not isinstance(doc, Document) else doc # type: ignore[arg-type]
|
Document(page_content=doc) if not isinstance(doc, Document) else doc
|
||||||
for doc in retriever_docs[i]
|
for doc in retriever_docs[i]
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -180,7 +180,7 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor:
|
|||||||
return ChromaTranslator()
|
return ChromaTranslator()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from langchain_postgres import PGVector # type: ignore[no-redef]
|
from langchain_postgres import PGVector
|
||||||
from langchain_postgres import PGVectorTranslator as NewPGVectorTranslator
|
from langchain_postgres import PGVectorTranslator as NewPGVectorTranslator
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
@ -239,7 +239,7 @@ class StringExampleMapper(Serializable):
|
|||||||
return self.map(example)
|
return self.map(example)
|
||||||
|
|
||||||
|
|
||||||
class StringRunEvaluatorChain(Chain, RunEvaluator): # type: ignore[override, override]
|
class StringRunEvaluatorChain(Chain, RunEvaluator):
|
||||||
"""Evaluate Run and optional examples."""
|
"""Evaluate Run and optional examples."""
|
||||||
|
|
||||||
run_mapper: StringRunMapper
|
run_mapper: StringRunMapper
|
||||||
|
@ -91,7 +91,7 @@ lint = [
|
|||||||
"cffi; python_version >= \"3.10\"",
|
"cffi; python_version >= \"3.10\"",
|
||||||
]
|
]
|
||||||
typing = [
|
typing = [
|
||||||
"mypy<2.0,>=1.10",
|
"mypy<2.0,>=1.15",
|
||||||
"types-pyyaml<7.0.0.0,>=6.0.12.2",
|
"types-pyyaml<7.0.0.0,>=6.0.12.2",
|
||||||
"types-requests<3.0.0.0,>=2.28.11.5",
|
"types-requests<3.0.0.0,>=2.28.11.5",
|
||||||
"types-toml<1.0.0.0,>=0.10.8.1",
|
"types-toml<1.0.0.0,>=0.10.8.1",
|
||||||
@ -126,7 +126,7 @@ exclude = ["tests/integration_tests/examples/non-utf8-encoding.py"]
|
|||||||
[tool.mypy]
|
[tool.mypy]
|
||||||
ignore_missing_imports = "True"
|
ignore_missing_imports = "True"
|
||||||
disallow_untyped_defs = "True"
|
disallow_untyped_defs = "True"
|
||||||
exclude = ["notebooks", "examples", "example_data"]
|
warn_unused_ignores = "True"
|
||||||
|
|
||||||
[tool.codespell]
|
[tool.codespell]
|
||||||
skip = ".git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package-lock.json,example_data,_dist,examples,*.trig"
|
skip = ".git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package-lock.json,example_data,_dist,examples,*.trig"
|
||||||
@ -134,7 +134,7 @@ ignore-regex = ".*(Stati Uniti|Tense=Pres).*"
|
|||||||
ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin"
|
ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin"
|
||||||
|
|
||||||
[tool.ruff.lint]
|
[tool.ruff.lint]
|
||||||
select = ["E", "F", "I", "T201", "D", "UP"]
|
select = ["E", "F", "I", "PGH003", "T201", "D", "UP"]
|
||||||
ignore = ["UP007", ]
|
ignore = ["UP007", ]
|
||||||
pydocstyle = { convention = "google" }
|
pydocstyle = { convention = "google" }
|
||||||
|
|
||||||
|
@ -200,6 +200,6 @@ def custom_openapi() -> dict[str, Any]:
|
|||||||
|
|
||||||
# This lets us prevent the "servers" configuration from being overwritten in
|
# This lets us prevent the "servers" configuration from being overwritten in
|
||||||
# the auto-generated OpenAPI schema
|
# the auto-generated OpenAPI schema
|
||||||
app.openapi = custom_openapi # type: ignore
|
app.openapi = custom_openapi
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
uvicorn.run(app, host="0.0.0.0", port=PORT)
|
uvicorn.run(app, host="0.0.0.0", port=PORT)
|
||||||
|
@ -392,8 +392,8 @@ def test_agent_with_new_prefix_suffix() -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# avoids "BasePromptTemplate" has no attribute "template" error
|
# avoids "BasePromptTemplate" has no attribute "template" error
|
||||||
assert hasattr(agent.agent.llm_chain.prompt, "template") # type: ignore
|
assert hasattr(agent.agent.llm_chain.prompt, "template") # type: ignore[union-attr]
|
||||||
prompt_str = agent.agent.llm_chain.prompt.template # type: ignore
|
prompt_str = agent.agent.llm_chain.prompt.template # type: ignore[union-attr]
|
||||||
assert prompt_str.startswith(prefix), "Prompt does not start with prefix"
|
assert prompt_str.startswith(prefix), "Prompt does not start with prefix"
|
||||||
assert prompt_str.endswith(suffix), "Prompt does not end with suffix"
|
assert prompt_str.endswith(suffix), "Prompt does not end with suffix"
|
||||||
|
|
||||||
@ -463,7 +463,7 @@ async def test_runnable_agent() -> None:
|
|||||||
return AgentFinish(return_values={"foo": "meow"}, log="hard-coded-message")
|
return AgentFinish(return_values={"foo": "meow"}, log="hard-coded-message")
|
||||||
|
|
||||||
agent = template | model | fake_parse
|
agent = template | model | fake_parse
|
||||||
executor = AgentExecutor(agent=agent, tools=[]) # type: ignore[arg-type]
|
executor = AgentExecutor(agent=agent, tools=[])
|
||||||
|
|
||||||
# Invoke
|
# Invoke
|
||||||
result: Any = await asyncio.to_thread(executor.invoke, {"question": "hello"})
|
result: Any = await asyncio.to_thread(executor.invoke, {"question": "hello"})
|
||||||
@ -527,7 +527,7 @@ async def test_runnable_agent() -> None:
|
|||||||
run_log = result
|
run_log = result
|
||||||
else:
|
else:
|
||||||
# `+` is defined for RunLogPatch
|
# `+` is defined for RunLogPatch
|
||||||
run_log = run_log + result # type: ignore[union-attr]
|
run_log = run_log + result
|
||||||
|
|
||||||
assert isinstance(run_log, RunLog)
|
assert isinstance(run_log, RunLog)
|
||||||
|
|
||||||
@ -583,7 +583,7 @@ async def test_runnable_agent_with_function_calls() -> None:
|
|||||||
return "Spying from under the bed."
|
return "Spying from under the bed."
|
||||||
|
|
||||||
agent = template | model | fake_parse
|
agent = template | model | fake_parse
|
||||||
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
|
executor = AgentExecutor(agent=agent, tools=[find_pet])
|
||||||
|
|
||||||
# Invoke
|
# Invoke
|
||||||
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
|
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
|
||||||
@ -701,7 +701,7 @@ async def test_runnable_with_multi_action_per_step() -> None:
|
|||||||
return "purrrr"
|
return "purrrr"
|
||||||
|
|
||||||
agent = template | model | fake_parse
|
agent = template | model | fake_parse
|
||||||
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
|
executor = AgentExecutor(agent=agent, tools=[find_pet])
|
||||||
|
|
||||||
# Invoke
|
# Invoke
|
||||||
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
|
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
|
||||||
@ -852,10 +852,10 @@ async def test_openai_agent_with_streaming() -> None:
|
|||||||
# decorator.
|
# decorator.
|
||||||
agent = create_openai_functions_agent(
|
agent = create_openai_functions_agent(
|
||||||
model,
|
model,
|
||||||
[find_pet], # type: ignore[list-item]
|
[find_pet],
|
||||||
template,
|
template,
|
||||||
)
|
)
|
||||||
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
|
executor = AgentExecutor(agent=agent, tools=[find_pet])
|
||||||
|
|
||||||
# Invoke
|
# Invoke
|
||||||
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
|
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
|
||||||
@ -1006,7 +1006,7 @@ def _make_tools_invocation(name_to_arguments: dict[str, dict[str, Any]]) -> AIMe
|
|||||||
additional_kwargs={
|
additional_kwargs={
|
||||||
"tool_calls": raw_tool_calls,
|
"tool_calls": raw_tool_calls,
|
||||||
},
|
},
|
||||||
tool_calls=tool_calls, # type: ignore[arg-type]
|
tool_calls=tool_calls,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -1024,7 +1024,7 @@ async def test_openai_agent_tools_agent() -> None:
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
GenericFakeChatModel.bind_tools = lambda self, x: self # type: ignore
|
GenericFakeChatModel.bind_tools = lambda self, x: self # type: ignore[assignment,misc]
|
||||||
model = GenericFakeChatModel(messages=infinite_cycle)
|
model = GenericFakeChatModel(messages=infinite_cycle)
|
||||||
|
|
||||||
@tool
|
@tool
|
||||||
@ -1053,16 +1053,16 @@ async def test_openai_agent_tools_agent() -> None:
|
|||||||
# decorator.
|
# decorator.
|
||||||
openai_agent = create_openai_tools_agent(
|
openai_agent = create_openai_tools_agent(
|
||||||
model,
|
model,
|
||||||
[find_pet], # type: ignore[list-item]
|
[find_pet],
|
||||||
template,
|
template,
|
||||||
)
|
)
|
||||||
tool_calling_agent = create_tool_calling_agent(
|
tool_calling_agent = create_tool_calling_agent(
|
||||||
model,
|
model,
|
||||||
[find_pet], # type: ignore[list-item]
|
[find_pet],
|
||||||
template,
|
template,
|
||||||
)
|
)
|
||||||
for agent in [openai_agent, tool_calling_agent]:
|
for agent in [openai_agent, tool_calling_agent]:
|
||||||
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
|
executor = AgentExecutor(agent=agent, tools=[find_pet])
|
||||||
|
|
||||||
# Invoke
|
# Invoke
|
||||||
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
|
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
|
||||||
|
@ -238,7 +238,7 @@ def test_agent_iterator_properties_and_setters() -> None:
|
|||||||
assert isinstance(agent_iter.tags, type(None))
|
assert isinstance(agent_iter.tags, type(None))
|
||||||
assert isinstance(agent_iter.agent_executor, AgentExecutor)
|
assert isinstance(agent_iter.agent_executor, AgentExecutor)
|
||||||
|
|
||||||
agent_iter.inputs = "New input" # type: ignore
|
agent_iter.inputs = "New input" # type: ignore[assignment]
|
||||||
assert isinstance(agent_iter.inputs, dict)
|
assert isinstance(agent_iter.inputs, dict)
|
||||||
|
|
||||||
agent_iter.callbacks = [FakeCallbackHandler()]
|
agent_iter.callbacks = [FakeCallbackHandler()]
|
||||||
|
@ -17,7 +17,7 @@ def test_initialize_agent_with_str_agent_type() -> None:
|
|||||||
"""Test initialize_agent with a string."""
|
"""Test initialize_agent with a string."""
|
||||||
fake_llm = FakeLLM()
|
fake_llm = FakeLLM()
|
||||||
agent_executor = initialize_agent(
|
agent_executor = initialize_agent(
|
||||||
[my_tool], # type: ignore[list-item]
|
[my_tool],
|
||||||
fake_llm,
|
fake_llm,
|
||||||
"zero-shot-react-description", # type: ignore[arg-type]
|
"zero-shot-react-description", # type: ignore[arg-type]
|
||||||
)
|
)
|
||||||
|
@ -16,7 +16,7 @@ def test_valid_action_and_action_input_parse() -> None:
|
|||||||
Action: foo
|
Action: foo
|
||||||
Action Input: bar"""
|
Action Input: bar"""
|
||||||
|
|
||||||
agent_action: AgentAction = mrkl_output_parser.parse(llm_output) # type: ignore
|
agent_action: AgentAction = mrkl_output_parser.parse(llm_output) # type: ignore[assignment]
|
||||||
assert agent_action.tool == "foo"
|
assert agent_action.tool == "foo"
|
||||||
assert agent_action.tool_input == "bar"
|
assert agent_action.tool_input == "bar"
|
||||||
|
|
||||||
@ -24,7 +24,7 @@ def test_valid_action_and_action_input_parse() -> None:
|
|||||||
def test_valid_final_answer_parse() -> None:
|
def test_valid_final_answer_parse() -> None:
|
||||||
llm_output = """Final Answer: The best pizza to eat is margaritta """
|
llm_output = """Final Answer: The best pizza to eat is margaritta """
|
||||||
|
|
||||||
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore
|
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore[assignment]
|
||||||
assert (
|
assert (
|
||||||
agent_finish.return_values.get("output")
|
agent_finish.return_values.get("output")
|
||||||
== "The best pizza to eat is margaritta"
|
== "The best pizza to eat is margaritta"
|
||||||
@ -59,7 +59,7 @@ def test_final_answer_before_parsable_action() -> None:
|
|||||||
Action: foo
|
Action: foo
|
||||||
Action Input: bar
|
Action Input: bar
|
||||||
"""
|
"""
|
||||||
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore
|
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore[assignment]
|
||||||
assert (
|
assert (
|
||||||
agent_finish.return_values.get("output")
|
agent_finish.return_values.get("output")
|
||||||
== "The best pizza to eat is margaritta"
|
== "The best pizza to eat is margaritta"
|
||||||
|
@ -11,7 +11,7 @@ def _create_mock_client(*args: Any, use_async: bool = False, **kwargs: Any) -> A
|
|||||||
client = AsyncMock() if use_async else MagicMock()
|
client = AsyncMock() if use_async else MagicMock()
|
||||||
mock_assistant = MagicMock()
|
mock_assistant = MagicMock()
|
||||||
mock_assistant.id = "abc123"
|
mock_assistant.id = "abc123"
|
||||||
client.beta.assistants.create.return_value = mock_assistant # type: ignore
|
client.beta.assistants.create.return_value = mock_assistant
|
||||||
return client
|
return client
|
||||||
|
|
||||||
|
|
||||||
|
@ -254,7 +254,7 @@ class FakeCallbackHandler(BaseCallbackHandler, BaseFakeCallbackHandlerMixin):
|
|||||||
) -> Any:
|
) -> Any:
|
||||||
self.on_retriever_error_common()
|
self.on_retriever_error_common()
|
||||||
|
|
||||||
def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": # type: ignore
|
def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": # type: ignore[override]
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
@ -388,5 +388,5 @@ class FakeAsyncCallbackHandler(AsyncCallbackHandler, BaseFakeCallbackHandlerMixi
|
|||||||
) -> None:
|
) -> None:
|
||||||
self.on_text_common()
|
self.on_text_common()
|
||||||
|
|
||||||
def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": # type: ignore
|
def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": # type: ignore[override]
|
||||||
return self
|
return self
|
||||||
|
@ -173,7 +173,7 @@ def test_configurable_with_default() -> None:
|
|||||||
for method in ("get_num_tokens", "get_num_tokens_from_messages", "dict"):
|
for method in ("get_num_tokens", "get_num_tokens_from_messages", "dict"):
|
||||||
assert hasattr(model, method)
|
assert hasattr(model, method)
|
||||||
|
|
||||||
assert model.model_name == "gpt-4o" # type: ignore[attr-defined]
|
assert model.model_name == "gpt-4o"
|
||||||
|
|
||||||
model_with_tools = model.bind_tools(
|
model_with_tools = model.bind_tools(
|
||||||
[{"name": "foo", "description": "foo", "parameters": {}}]
|
[{"name": "foo", "description": "foo", "parameters": {}}]
|
||||||
|
@ -123,7 +123,7 @@ def test_trajectory_eval_chain(
|
|||||||
},
|
},
|
||||||
sequential_responses=True,
|
sequential_responses=True,
|
||||||
)
|
)
|
||||||
chain = TrajectoryEvalChain.from_llm(llm=llm, agent_tools=[foo]) # type: ignore
|
chain = TrajectoryEvalChain.from_llm(llm=llm, agent_tools=[foo])
|
||||||
# Test when ref is not provided
|
# Test when ref is not provided
|
||||||
res = chain.evaluate_agent_trajectory(
|
res = chain.evaluate_agent_trajectory(
|
||||||
input="What is your favorite food?",
|
input="What is your favorite food?",
|
||||||
@ -151,7 +151,7 @@ def test_trajectory_eval_chain_no_tools(
|
|||||||
},
|
},
|
||||||
sequential_responses=True,
|
sequential_responses=True,
|
||||||
)
|
)
|
||||||
chain = TrajectoryEvalChain.from_llm(llm=llm) # type: ignore
|
chain = TrajectoryEvalChain.from_llm(llm=llm)
|
||||||
res = chain.evaluate_agent_trajectory(
|
res = chain.evaluate_agent_trajectory(
|
||||||
input="What is your favorite food?",
|
input="What is your favorite food?",
|
||||||
agent_trajectory=intermediate_steps,
|
agent_trajectory=intermediate_steps,
|
||||||
@ -175,7 +175,7 @@ def test_old_api_works(intermediate_steps: list[tuple[AgentAction, str]]) -> Non
|
|||||||
},
|
},
|
||||||
sequential_responses=True,
|
sequential_responses=True,
|
||||||
)
|
)
|
||||||
chain = TrajectoryEvalChain.from_llm(llm=llm) # type: ignore
|
chain = TrajectoryEvalChain.from_llm(llm=llm)
|
||||||
res = chain(
|
res = chain(
|
||||||
{
|
{
|
||||||
"question": "What is your favorite food?",
|
"question": "What is your favorite food?",
|
||||||
|
@ -14,7 +14,6 @@ from tests.unit_tests.llms.fake_llm import FakeLLM
|
|||||||
|
|
||||||
|
|
||||||
def test_resolve_criteria_str() -> None:
|
def test_resolve_criteria_str() -> None:
|
||||||
# type: ignore
|
|
||||||
assert CriteriaEvalChain.resolve_criteria("helpfulness") == {
|
assert CriteriaEvalChain.resolve_criteria("helpfulness") == {
|
||||||
"helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS]
|
"helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS]
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ def test_load_criteria_evaluator() -> None:
|
|||||||
# Patch the env with an openai-api-key
|
# Patch the env with an openai-api-key
|
||||||
with patch.dict(os.environ, {"OPENAI_API_KEY": "foo"}):
|
with patch.dict(os.environ, {"OPENAI_API_KEY": "foo"}):
|
||||||
# Check it can load using a string arg (even if that's not how it's typed)
|
# Check it can load using a string arg (even if that's not how it's typed)
|
||||||
load_evaluator("criteria") # type: ignore
|
load_evaluator("criteria") # type: ignore[arg-type]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
|
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
|
||||||
@ -78,7 +78,7 @@ def test_returns_expected_results(
|
|||||||
fake_llm = FakeLLM(
|
fake_llm = FakeLLM(
|
||||||
queries={"text": "The meaning of life\nCORRECT"}, sequential_responses=True
|
queries={"text": "The meaning of life\nCORRECT"}, sequential_responses=True
|
||||||
)
|
)
|
||||||
chain = chain_cls.from_llm(fake_llm) # type: ignore
|
chain = chain_cls.from_llm(fake_llm) # type: ignore[attr-defined]
|
||||||
results = chain.evaluate_strings(
|
results = chain.evaluate_strings(
|
||||||
prediction="my prediction", reference="my reference", input="my input"
|
prediction="my prediction", reference="my reference", input="my input"
|
||||||
)
|
)
|
||||||
|
@ -14,9 +14,9 @@ def test_hashed_document_hashing() -> None:
|
|||||||
def test_hashing_with_missing_content() -> None:
|
def test_hashing_with_missing_content() -> None:
|
||||||
"""Check that ValueError is raised if page_content is missing."""
|
"""Check that ValueError is raised if page_content is missing."""
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
_HashedDocument(
|
_HashedDocument( # type: ignore[call-arg]
|
||||||
metadata={"key": "value"},
|
metadata={"key": "value"},
|
||||||
) # type: ignore
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_uid_auto_assigned_to_hash() -> None:
|
def test_uid_auto_assigned_to_hash() -> None:
|
||||||
|
@ -57,7 +57,7 @@ class InMemoryVectorStore(VectorStore):
|
|||||||
for _id in ids:
|
for _id in ids:
|
||||||
self.store.pop(_id, None)
|
self.store.pop(_id, None)
|
||||||
|
|
||||||
def add_documents( # type: ignore
|
def add_documents(
|
||||||
self,
|
self,
|
||||||
documents: Sequence[Document],
|
documents: Sequence[Document],
|
||||||
*,
|
*,
|
||||||
@ -140,7 +140,7 @@ def record_manager() -> SQLRecordManager:
|
|||||||
return record_manager
|
return record_manager
|
||||||
|
|
||||||
|
|
||||||
@pytest_asyncio.fixture # type: ignore
|
@pytest_asyncio.fixture
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def arecord_manager() -> SQLRecordManager:
|
async def arecord_manager() -> SQLRecordManager:
|
||||||
"""Timestamped set fixture."""
|
"""Timestamped set fixture."""
|
||||||
@ -292,7 +292,7 @@ def test_index_simple_delete_full(
|
|||||||
|
|
||||||
doc_texts = set(
|
doc_texts = set(
|
||||||
# Ignoring type since doc should be in the store and not a None
|
# Ignoring type since doc should be in the store and not a None
|
||||||
vector_store.store.get(uid).page_content # type: ignore
|
vector_store.store.get(uid).page_content # type: ignore[union-attr]
|
||||||
for uid in vector_store.store
|
for uid in vector_store.store
|
||||||
)
|
)
|
||||||
assert doc_texts == {"mutated document 1", "This is another document."}
|
assert doc_texts == {"mutated document 1", "This is another document."}
|
||||||
@ -368,7 +368,7 @@ async def test_aindex_simple_delete_full(
|
|||||||
|
|
||||||
doc_texts = set(
|
doc_texts = set(
|
||||||
# Ignoring type since doc should be in the store and not a None
|
# Ignoring type since doc should be in the store and not a None
|
||||||
vector_store.store.get(uid).page_content # type: ignore
|
vector_store.store.get(uid).page_content # type: ignore[union-attr]
|
||||||
for uid in vector_store.store
|
for uid in vector_store.store
|
||||||
)
|
)
|
||||||
assert doc_texts == {"mutated document 1", "This is another document."}
|
assert doc_texts == {"mutated document 1", "This is another document."}
|
||||||
@ -661,7 +661,7 @@ def test_incremental_delete(
|
|||||||
|
|
||||||
doc_texts = set(
|
doc_texts = set(
|
||||||
# Ignoring type since doc should be in the store and not a None
|
# Ignoring type since doc should be in the store and not a None
|
||||||
vector_store.store.get(uid).page_content # type: ignore
|
vector_store.store.get(uid).page_content # type: ignore[union-attr]
|
||||||
for uid in vector_store.store
|
for uid in vector_store.store
|
||||||
)
|
)
|
||||||
assert doc_texts == {"This is another document.", "This is a test document."}
|
assert doc_texts == {"This is another document.", "This is a test document."}
|
||||||
@ -720,7 +720,7 @@ def test_incremental_delete(
|
|||||||
|
|
||||||
doc_texts = set(
|
doc_texts = set(
|
||||||
# Ignoring type since doc should be in the store and not a None
|
# Ignoring type since doc should be in the store and not a None
|
||||||
vector_store.store.get(uid).page_content # type: ignore
|
vector_store.store.get(uid).page_content # type: ignore[union-attr]
|
||||||
for uid in vector_store.store
|
for uid in vector_store.store
|
||||||
)
|
)
|
||||||
assert doc_texts == {
|
assert doc_texts == {
|
||||||
@ -788,7 +788,7 @@ def test_incremental_indexing_with_batch_size(
|
|||||||
|
|
||||||
doc_texts = set(
|
doc_texts = set(
|
||||||
# Ignoring type since doc should be in the store and not a None
|
# Ignoring type since doc should be in the store and not a None
|
||||||
vector_store.store.get(uid).page_content # type: ignore
|
vector_store.store.get(uid).page_content # type: ignore[union-attr]
|
||||||
for uid in vector_store.store
|
for uid in vector_store.store
|
||||||
)
|
)
|
||||||
assert doc_texts == {"1", "2", "3", "4"}
|
assert doc_texts == {"1", "2", "3", "4"}
|
||||||
@ -838,7 +838,7 @@ def test_incremental_delete_with_batch_size(
|
|||||||
|
|
||||||
doc_texts = set(
|
doc_texts = set(
|
||||||
# Ignoring type since doc should be in the store and not a None
|
# Ignoring type since doc should be in the store and not a None
|
||||||
vector_store.store.get(uid).page_content # type: ignore
|
vector_store.store.get(uid).page_content # type: ignore[union-attr]
|
||||||
for uid in vector_store.store
|
for uid in vector_store.store
|
||||||
)
|
)
|
||||||
assert doc_texts == {"1", "2", "3", "4"}
|
assert doc_texts == {"1", "2", "3", "4"}
|
||||||
@ -984,7 +984,7 @@ async def test_aincremental_delete(
|
|||||||
|
|
||||||
doc_texts = set(
|
doc_texts = set(
|
||||||
# Ignoring type since doc should be in the store and not a None
|
# Ignoring type since doc should be in the store and not a None
|
||||||
vector_store.store.get(uid).page_content # type: ignore
|
vector_store.store.get(uid).page_content # type: ignore[union-attr]
|
||||||
for uid in vector_store.store
|
for uid in vector_store.store
|
||||||
)
|
)
|
||||||
assert doc_texts == {"This is another document.", "This is a test document."}
|
assert doc_texts == {"This is another document.", "This is a test document."}
|
||||||
@ -1043,7 +1043,7 @@ async def test_aincremental_delete(
|
|||||||
|
|
||||||
doc_texts = set(
|
doc_texts = set(
|
||||||
# Ignoring type since doc should be in the store and not a None
|
# Ignoring type since doc should be in the store and not a None
|
||||||
vector_store.store.get(uid).page_content # type: ignore
|
vector_store.store.get(uid).page_content # type: ignore[union-attr]
|
||||||
for uid in vector_store.store
|
for uid in vector_store.store
|
||||||
)
|
)
|
||||||
assert doc_texts == {
|
assert doc_texts == {
|
||||||
|
@ -139,7 +139,7 @@ def test_aliases_hidden() -> None:
|
|||||||
dumped = json.loads(dumps(test_class, pretty=True))
|
dumped = json.loads(dumps(test_class, pretty=True))
|
||||||
|
|
||||||
# Check by alias
|
# Check by alias
|
||||||
test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world") # type: ignore[call-arg]
|
test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world")
|
||||||
dumped = json.loads(dumps(test_class, pretty=True))
|
dumped = json.loads(dumps(test_class, pretty=True))
|
||||||
expected_dump = {
|
expected_dump = {
|
||||||
"lc": 1,
|
"lc": 1,
|
||||||
|
@ -25,7 +25,7 @@ def test_loads_openai_llm() -> None:
|
|||||||
|
|
||||||
llm = CommunityOpenAI(
|
llm = CommunityOpenAI(
|
||||||
model="davinci", temperature=0.5, openai_api_key="hello", top_p=0.8
|
model="davinci", temperature=0.5, openai_api_key="hello", top_p=0.8
|
||||||
) # type: ignore[call-arg]
|
)
|
||||||
llm_string = dumps(llm)
|
llm_string = dumps(llm)
|
||||||
llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"})
|
llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"})
|
||||||
|
|
||||||
@ -41,7 +41,7 @@ def test_loads_llmchain() -> None:
|
|||||||
|
|
||||||
llm = CommunityOpenAI(
|
llm = CommunityOpenAI(
|
||||||
model="davinci", temperature=0.5, openai_api_key="hello", top_p=0.8
|
model="davinci", temperature=0.5, openai_api_key="hello", top_p=0.8
|
||||||
) # type: ignore[call-arg]
|
)
|
||||||
prompt = PromptTemplate.from_template("hello {name}!")
|
prompt = PromptTemplate.from_template("hello {name}!")
|
||||||
chain = LLMChain(llm=llm, prompt=prompt)
|
chain = LLMChain(llm=llm, prompt=prompt)
|
||||||
chain_string = dumps(chain)
|
chain_string = dumps(chain)
|
||||||
@ -64,7 +64,7 @@ def test_loads_llmchain_env() -> None:
|
|||||||
if not has_env:
|
if not has_env:
|
||||||
os.environ["OPENAI_API_KEY"] = "env_variable"
|
os.environ["OPENAI_API_KEY"] = "env_variable"
|
||||||
|
|
||||||
llm = OpenAI(model="davinci", temperature=0.5, top_p=0.8) # type: ignore[call-arg]
|
llm = OpenAI(model="davinci", temperature=0.5, top_p=0.8)
|
||||||
prompt = PromptTemplate.from_template("hello {name}!")
|
prompt = PromptTemplate.from_template("hello {name}!")
|
||||||
chain = LLMChain(llm=llm, prompt=prompt)
|
chain = LLMChain(llm=llm, prompt=prompt)
|
||||||
chain_string = dumps(chain)
|
chain_string = dumps(chain)
|
||||||
@ -82,7 +82,7 @@ def test_loads_llmchain_env() -> None:
|
|||||||
|
|
||||||
@pytest.mark.requires("openai")
|
@pytest.mark.requires("openai")
|
||||||
def test_loads_llmchain_with_non_serializable_arg() -> None:
|
def test_loads_llmchain_with_non_serializable_arg() -> None:
|
||||||
llm = CommunityOpenAI( # type: ignore[call-arg]
|
llm = CommunityOpenAI(
|
||||||
model="davinci",
|
model="davinci",
|
||||||
temperature=0.5,
|
temperature=0.5,
|
||||||
openai_api_key="hello",
|
openai_api_key="hello",
|
||||||
@ -99,7 +99,7 @@ def test_loads_llmchain_with_non_serializable_arg() -> None:
|
|||||||
def test_load_openai_llm() -> None:
|
def test_load_openai_llm() -> None:
|
||||||
from langchain_openai import OpenAI
|
from langchain_openai import OpenAI
|
||||||
|
|
||||||
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
|
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
|
||||||
llm_obj = dumpd(llm)
|
llm_obj = dumpd(llm)
|
||||||
llm2 = load(llm_obj, secrets_map={"OPENAI_API_KEY": "hello"})
|
llm2 = load(llm_obj, secrets_map={"OPENAI_API_KEY": "hello"})
|
||||||
|
|
||||||
@ -112,7 +112,7 @@ def test_load_openai_llm() -> None:
|
|||||||
def test_load_llmchain() -> None:
|
def test_load_llmchain() -> None:
|
||||||
from langchain_openai import OpenAI
|
from langchain_openai import OpenAI
|
||||||
|
|
||||||
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
|
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
|
||||||
prompt = PromptTemplate.from_template("hello {name}!")
|
prompt = PromptTemplate.from_template("hello {name}!")
|
||||||
chain = LLMChain(llm=llm, prompt=prompt)
|
chain = LLMChain(llm=llm, prompt=prompt)
|
||||||
chain_obj = dumpd(chain)
|
chain_obj = dumpd(chain)
|
||||||
@ -135,7 +135,7 @@ def test_load_llmchain_env() -> None:
|
|||||||
if not has_env:
|
if not has_env:
|
||||||
os.environ["OPENAI_API_KEY"] = "env_variable"
|
os.environ["OPENAI_API_KEY"] = "env_variable"
|
||||||
|
|
||||||
llm = CommunityOpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg]
|
llm = CommunityOpenAI(model="davinci", temperature=0.5)
|
||||||
prompt = PromptTemplate.from_template("hello {name}!")
|
prompt = PromptTemplate.from_template("hello {name}!")
|
||||||
chain = LLMChain(llm=llm, prompt=prompt)
|
chain = LLMChain(llm=llm, prompt=prompt)
|
||||||
chain_obj = dumpd(chain)
|
chain_obj = dumpd(chain)
|
||||||
|
@ -141,7 +141,7 @@ def _dict_from_ast(node: ast.Dict) -> dict[str, str]:
|
|||||||
"""
|
"""
|
||||||
result: dict[str, str] = {}
|
result: dict[str, str] = {}
|
||||||
for key, value in zip(node.keys, node.values):
|
for key, value in zip(node.keys, node.values):
|
||||||
py_key = _literal_eval_str(key) # type: ignore
|
py_key = _literal_eval_str(key) # type: ignore[arg-type]
|
||||||
py_value = _literal_eval_str(value)
|
py_value = _literal_eval_str(value)
|
||||||
result[py_key] = py_value
|
result[py_key] = py_value
|
||||||
return result
|
return result
|
||||||
|
@ -21,7 +21,7 @@ def calculator(expression: str) -> str:
|
|||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def tools() -> list[BaseTool]:
|
def tools() -> list[BaseTool]:
|
||||||
return [search, calculator] # type: ignore
|
return [search, calculator]
|
||||||
|
|
||||||
|
|
||||||
def test_render_text_description(tools: list[BaseTool]) -> None:
|
def test_render_text_description(tools: list[BaseTool]) -> None:
|
||||||
|
@ -2568,7 +2568,7 @@ test-integration = [
|
|||||||
typing = [
|
typing = [
|
||||||
{ name = "langchain-core", editable = "../core" },
|
{ name = "langchain-core", editable = "../core" },
|
||||||
{ name = "langchain-text-splitters", editable = "../text-splitters" },
|
{ name = "langchain-text-splitters", editable = "../text-splitters" },
|
||||||
{ name = "mypy", specifier = ">=1.10,<2.0" },
|
{ name = "mypy", specifier = ">=1.15,<2.0" },
|
||||||
{ name = "mypy-protobuf", specifier = ">=3.0.0,<4.0.0" },
|
{ name = "mypy-protobuf", specifier = ">=3.0.0,<4.0.0" },
|
||||||
{ name = "numpy", marker = "python_full_version < '3.13'", specifier = ">=1.26.4" },
|
{ name = "numpy", marker = "python_full_version < '3.13'", specifier = ">=1.26.4" },
|
||||||
{ name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" },
|
{ name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" },
|
||||||
@ -2705,7 +2705,7 @@ wheels = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "langchain-core"
|
name = "langchain-core"
|
||||||
version = "0.3.51"
|
version = "0.3.52"
|
||||||
source = { editable = "../core" }
|
source = { editable = "../core" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "jsonpatch" },
|
{ name = "jsonpatch" },
|
||||||
@ -2745,6 +2745,8 @@ test = [
|
|||||||
{ name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" },
|
{ name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" },
|
||||||
{ name = "pytest", specifier = ">=8,<9" },
|
{ name = "pytest", specifier = ">=8,<9" },
|
||||||
{ name = "pytest-asyncio", specifier = ">=0.21.1,<1.0.0" },
|
{ name = "pytest-asyncio", specifier = ">=0.21.1,<1.0.0" },
|
||||||
|
{ name = "pytest-benchmark" },
|
||||||
|
{ name = "pytest-codspeed" },
|
||||||
{ name = "pytest-mock", specifier = ">=3.10.0,<4.0.0" },
|
{ name = "pytest-mock", specifier = ">=3.10.0,<4.0.0" },
|
||||||
{ name = "pytest-socket", specifier = ">=0.7.0,<1.0.0" },
|
{ name = "pytest-socket", specifier = ">=0.7.0,<1.0.0" },
|
||||||
{ name = "pytest-watcher", specifier = ">=0.3.4,<1.0.0" },
|
{ name = "pytest-watcher", specifier = ">=0.3.4,<1.0.0" },
|
||||||
@ -2755,8 +2757,7 @@ test = [
|
|||||||
test-integration = []
|
test-integration = []
|
||||||
typing = [
|
typing = [
|
||||||
{ name = "langchain-text-splitters", directory = "../text-splitters" },
|
{ name = "langchain-text-splitters", directory = "../text-splitters" },
|
||||||
{ name = "mypy", specifier = ">=1.10,<1.11" },
|
{ name = "mypy", specifier = ">=1.15,<1.16" },
|
||||||
{ name = "types-jinja2", specifier = ">=2.11.9,<3.0.0" },
|
|
||||||
{ name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" },
|
{ name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" },
|
||||||
{ name = "types-requests", specifier = ">=2.28.11.5,<3.0.0.0" },
|
{ name = "types-requests", specifier = ">=2.28.11.5,<3.0.0.0" },
|
||||||
]
|
]
|
||||||
@ -2882,7 +2883,7 @@ wheels = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "langchain-openai"
|
name = "langchain-openai"
|
||||||
version = "0.3.12"
|
version = "0.3.13"
|
||||||
source = { editable = "../partners/openai" }
|
source = { editable = "../partners/openai" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "langchain-core" },
|
{ name = "langchain-core" },
|
||||||
@ -2944,7 +2945,7 @@ wheels = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "langchain-tests"
|
name = "langchain-tests"
|
||||||
version = "0.3.17"
|
version = "0.3.18"
|
||||||
source = { editable = "../standard-tests" }
|
source = { editable = "../standard-tests" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "httpx" },
|
{ name = "httpx" },
|
||||||
|
Loading…
Reference in New Issue
Block a user