mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-15 15:46:47 +00:00
parent
aa785fa6ec
commit
745d2476a2
@ -97,7 +97,7 @@ class ChatAgent(Agent):
|
||||
]
|
||||
if input_variables is None:
|
||||
input_variables = ["input", "agent_scratchpad"]
|
||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type]
|
||||
|
||||
@classmethod
|
||||
def from_llm_and_tools(
|
||||
|
@ -88,7 +88,7 @@ class ConversationalChatAgent(Agent):
|
||||
HumanMessagePromptTemplate.from_template(final_prompt),
|
||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||
]
|
||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type]
|
||||
|
||||
def _construct_scratchpad(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]]
|
||||
|
@ -406,7 +406,7 @@ def _get_eleven_labs_text2speech(**kwargs: Any) -> BaseTool:
|
||||
|
||||
|
||||
def _get_memorize(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
|
||||
return Memorize(llm=llm)
|
||||
return Memorize(llm=llm) # type: ignore[arg-type]
|
||||
|
||||
|
||||
def _get_google_cloud_texttospeech(**kwargs: Any) -> BaseTool:
|
||||
|
@ -201,7 +201,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||
]
|
||||
)
|
||||
return ChatPromptTemplate(messages=messages)
|
||||
return ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
|
||||
|
||||
@classmethod
|
||||
def from_llm_and_tools(
|
||||
@ -220,7 +220,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
extra_prompt_messages=extra_prompt_messages,
|
||||
system_message=system_message,
|
||||
)
|
||||
return cls(
|
||||
return cls( # type: ignore[call-arg]
|
||||
llm=llm,
|
||||
prompt=prompt,
|
||||
tools=tools,
|
||||
|
@ -279,7 +279,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||
]
|
||||
)
|
||||
return ChatPromptTemplate(messages=messages)
|
||||
return ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
|
||||
|
||||
@classmethod
|
||||
def from_llm_and_tools(
|
||||
@ -298,7 +298,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
extra_prompt_messages=extra_prompt_messages,
|
||||
system_message=system_message,
|
||||
)
|
||||
return cls(
|
||||
return cls( # type: ignore[call-arg]
|
||||
llm=llm,
|
||||
prompt=prompt,
|
||||
tools=tools,
|
||||
|
@ -103,7 +103,7 @@ class StructuredChatAgent(Agent):
|
||||
*_memory_prompts,
|
||||
HumanMessagePromptTemplate.from_template(human_message_template),
|
||||
]
|
||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type]
|
||||
|
||||
@classmethod
|
||||
def from_llm_and_tools(
|
||||
|
@ -199,10 +199,11 @@ class GraphCypherQAChain(Chain):
|
||||
cypher_prompt if cypher_prompt is not None else CYPHER_GENERATION_PROMPT
|
||||
)
|
||||
|
||||
qa_chain = LLMChain(llm=qa_llm or llm, **use_qa_llm_kwargs)
|
||||
qa_chain = LLMChain(llm=qa_llm or llm, **use_qa_llm_kwargs) # type: ignore[arg-type]
|
||||
|
||||
cypher_generation_chain = LLMChain(
|
||||
llm=cypher_llm or llm, **use_cypher_llm_kwargs
|
||||
llm=cypher_llm or llm, # type: ignore[arg-type]
|
||||
**use_cypher_llm_kwargs, # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
if exclude_types and include_types:
|
||||
|
@ -135,7 +135,7 @@ class NeptuneSparqlQAChain(Chain):
|
||||
)
|
||||
sparql_generation_chain = LLMChain(llm=llm, prompt=sparql_prompt)
|
||||
|
||||
return cls(
|
||||
return cls( # type: ignore[call-arg]
|
||||
qa_chain=qa_chain,
|
||||
sparql_generation_chain=sparql_generation_chain,
|
||||
examples=examples,
|
||||
|
@ -54,7 +54,7 @@ def _load_question_to_checked_assertions_chain(
|
||||
revised_answer_chain,
|
||||
]
|
||||
question_to_checked_assertions_chain = SequentialChain(
|
||||
chains=chains,
|
||||
chains=chains, # type: ignore[arg-type]
|
||||
input_variables=["question"],
|
||||
output_variables=["revised_statement"],
|
||||
verbose=True,
|
||||
|
@ -69,7 +69,9 @@ def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedde
|
||||
else:
|
||||
raise ValueError("`embeddings` must be present.")
|
||||
return HypotheticalDocumentEmbedder(
|
||||
llm_chain=llm_chain, base_embeddings=embeddings, **config
|
||||
llm_chain=llm_chain, # type: ignore[arg-type]
|
||||
base_embeddings=embeddings,
|
||||
**config, # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
|
||||
@ -125,7 +127,7 @@ def _load_map_reduce_documents_chain(
|
||||
|
||||
return MapReduceDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
reduce_documents_chain=reduce_documents_chain,
|
||||
reduce_documents_chain=reduce_documents_chain, # type: ignore[arg-type]
|
||||
**config,
|
||||
)
|
||||
|
||||
@ -207,7 +209,7 @@ def _load_llm_bash_chain(config: dict, **kwargs: Any) -> Any:
|
||||
elif "prompt_path" in config:
|
||||
prompt = load_prompt(config.pop("prompt_path"))
|
||||
if llm_chain:
|
||||
return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config)
|
||||
return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config) # type: ignore[arg-type]
|
||||
else:
|
||||
return LLMBashChain(llm=llm, prompt=prompt, **config)
|
||||
|
||||
@ -250,10 +252,10 @@ def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
|
||||
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
|
||||
return LLMCheckerChain(
|
||||
llm=llm,
|
||||
create_draft_answer_prompt=create_draft_answer_prompt,
|
||||
list_assertions_prompt=list_assertions_prompt,
|
||||
check_assertions_prompt=check_assertions_prompt,
|
||||
revised_answer_prompt=revised_answer_prompt,
|
||||
create_draft_answer_prompt=create_draft_answer_prompt, # type: ignore[arg-type]
|
||||
list_assertions_prompt=list_assertions_prompt, # type: ignore[arg-type]
|
||||
check_assertions_prompt=check_assertions_prompt, # type: ignore[arg-type]
|
||||
revised_answer_prompt=revised_answer_prompt, # type: ignore[arg-type]
|
||||
**config,
|
||||
)
|
||||
|
||||
@ -281,7 +283,7 @@ def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
|
||||
elif "prompt_path" in config:
|
||||
prompt = load_prompt(config.pop("prompt_path"))
|
||||
if llm_chain:
|
||||
return LLMMathChain(llm_chain=llm_chain, prompt=prompt, **config)
|
||||
return LLMMathChain(llm_chain=llm_chain, prompt=prompt, **config) # type: ignore[arg-type]
|
||||
else:
|
||||
return LLMMathChain(llm=llm, prompt=prompt, **config)
|
||||
|
||||
@ -296,7 +298,7 @@ def _load_map_rerank_documents_chain(
|
||||
llm_chain = load_chain(config.pop("llm_chain_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
|
||||
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
|
||||
return MapRerankDocumentsChain(llm_chain=llm_chain, **config) # type: ignore[arg-type]
|
||||
|
||||
|
||||
def _load_pal_chain(config: dict, **kwargs: Any) -> Any:
|
||||
@ -309,7 +311,7 @@ def _load_pal_chain(config: dict, **kwargs: Any) -> Any:
|
||||
llm_chain = load_chain(config.pop("llm_chain_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
|
||||
return PALChain(llm_chain=llm_chain, **config)
|
||||
return PALChain(llm_chain=llm_chain, **config) # type: ignore[arg-type]
|
||||
|
||||
|
||||
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
|
||||
@ -337,8 +339,8 @@ def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocuments
|
||||
elif "document_prompt_path" in config:
|
||||
document_prompt = load_prompt(config.pop("document_prompt_path"))
|
||||
return RefineDocumentsChain(
|
||||
initial_llm_chain=initial_llm_chain,
|
||||
refine_llm_chain=refine_llm_chain,
|
||||
initial_llm_chain=initial_llm_chain, # type: ignore[arg-type]
|
||||
refine_llm_chain=refine_llm_chain, # type: ignore[arg-type]
|
||||
document_prompt=document_prompt,
|
||||
**config,
|
||||
)
|
||||
@ -355,7 +357,7 @@ def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesCha
|
||||
"One of `combine_documents_chain` or "
|
||||
"`combine_documents_chain_path` must be present."
|
||||
)
|
||||
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
|
||||
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config) # type: ignore[arg-type]
|
||||
|
||||
|
||||
def _load_sql_database_chain(config: dict, **kwargs: Any) -> Any:
|
||||
@ -368,7 +370,7 @@ def _load_sql_database_chain(config: dict, **kwargs: Any) -> Any:
|
||||
if "llm_chain" in config:
|
||||
llm_chain_config = config.pop("llm_chain")
|
||||
chain = load_chain_from_config(llm_chain_config)
|
||||
return SQLDatabaseChain(llm_chain=chain, database=database, **config)
|
||||
return SQLDatabaseChain(llm_chain=chain, database=database, **config) # type: ignore[arg-type]
|
||||
if "llm" in config:
|
||||
llm_config = config.pop("llm")
|
||||
llm = load_llm_from_config(llm_config)
|
||||
@ -403,7 +405,7 @@ def _load_vector_db_qa_with_sources_chain(
|
||||
"`combine_documents_chain_path` must be present."
|
||||
)
|
||||
return VectorDBQAWithSourcesChain(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
combine_documents_chain=combine_documents_chain, # type: ignore[arg-type]
|
||||
vectorstore=vectorstore,
|
||||
**config,
|
||||
)
|
||||
@ -425,7 +427,7 @@ def _load_retrieval_qa(config: dict, **kwargs: Any) -> RetrievalQA:
|
||||
"`combine_documents_chain_path` must be present."
|
||||
)
|
||||
return RetrievalQA(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
combine_documents_chain=combine_documents_chain, # type: ignore[arg-type]
|
||||
retriever=retriever,
|
||||
**config,
|
||||
)
|
||||
@ -449,7 +451,7 @@ def _load_retrieval_qa_with_sources_chain(
|
||||
"`combine_documents_chain_path` must be present."
|
||||
)
|
||||
return RetrievalQAWithSourcesChain(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
combine_documents_chain=combine_documents_chain, # type: ignore[arg-type]
|
||||
retriever=retriever,
|
||||
**config,
|
||||
)
|
||||
@ -471,7 +473,7 @@ def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
|
||||
"`combine_documents_chain_path` must be present."
|
||||
)
|
||||
return VectorDBQA(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
combine_documents_chain=combine_documents_chain, # type: ignore[arg-type]
|
||||
vectorstore=vectorstore,
|
||||
**config,
|
||||
)
|
||||
@ -495,8 +497,8 @@ def _load_graph_cypher_chain(config: dict, **kwargs: Any) -> GraphCypherQAChain:
|
||||
|
||||
return GraphCypherQAChain(
|
||||
graph=graph,
|
||||
cypher_generation_chain=cypher_generation_chain,
|
||||
qa_chain=qa_chain,
|
||||
cypher_generation_chain=cypher_generation_chain, # type: ignore[arg-type]
|
||||
qa_chain=qa_chain, # type: ignore[arg-type]
|
||||
**config,
|
||||
)
|
||||
|
||||
@ -525,8 +527,8 @@ def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
|
||||
else:
|
||||
raise ValueError("`requests_wrapper` must be present.")
|
||||
return APIChain(
|
||||
api_request_chain=api_request_chain,
|
||||
api_answer_chain=api_answer_chain,
|
||||
api_request_chain=api_request_chain, # type: ignore[arg-type]
|
||||
api_answer_chain=api_answer_chain, # type: ignore[arg-type]
|
||||
requests_wrapper=requests_wrapper,
|
||||
**config,
|
||||
)
|
||||
|
@ -95,7 +95,7 @@ def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
|
||||
)
|
||||
),
|
||||
]
|
||||
prompt = ChatPromptTemplate(messages=messages)
|
||||
prompt = ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
|
||||
|
||||
chain = LLMChain(
|
||||
llm=llm,
|
||||
|
@ -82,7 +82,7 @@ def create_qa_with_structure_chain(
|
||||
HumanMessagePromptTemplate.from_template("Question: {question}"),
|
||||
HumanMessage(content="Tips: Make sure to answer in the correct format"),
|
||||
]
|
||||
prompt = prompt or ChatPromptTemplate(messages=messages)
|
||||
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
|
||||
|
||||
chain = LLMChain(
|
||||
llm=llm,
|
||||
|
@ -59,12 +59,12 @@ def _load_stuff_chain(
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> StuffDocumentsChain:
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
return StuffDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
document_variable_name=document_variable_name,
|
||||
document_prompt=document_prompt,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@ -83,14 +83,14 @@ def _load_map_reduce_chain(
|
||||
token_max: int = 3000,
|
||||
**kwargs: Any,
|
||||
) -> MapReduceDocumentsChain:
|
||||
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
||||
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
_reduce_llm = reduce_llm or llm
|
||||
reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose)
|
||||
reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
combine_documents_chain = StuffDocumentsChain(
|
||||
llm_chain=reduce_chain,
|
||||
document_variable_name=combine_document_variable_name,
|
||||
document_prompt=document_prompt,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
)
|
||||
if collapse_prompt is None:
|
||||
collapse_chain = None
|
||||
@ -105,7 +105,7 @@ def _load_map_reduce_chain(
|
||||
llm_chain=LLMChain(
|
||||
llm=_collapse_llm,
|
||||
prompt=collapse_prompt,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
),
|
||||
document_variable_name=combine_document_variable_name,
|
||||
document_prompt=document_prompt,
|
||||
@ -114,13 +114,13 @@ def _load_map_reduce_chain(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
collapse_documents_chain=collapse_chain,
|
||||
token_max=token_max,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
)
|
||||
return MapReduceDocumentsChain(
|
||||
llm_chain=map_chain,
|
||||
reduce_documents_chain=reduce_documents_chain,
|
||||
document_variable_name=map_reduce_document_variable_name,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@ -136,16 +136,16 @@ def _load_refine_chain(
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> RefineDocumentsChain:
|
||||
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
||||
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
_refine_llm = refine_llm or llm
|
||||
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose)
|
||||
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
return RefineDocumentsChain(
|
||||
initial_llm_chain=initial_chain,
|
||||
refine_llm_chain=refine_chain,
|
||||
document_variable_name=document_variable_name,
|
||||
initial_response_name=initial_response_name,
|
||||
document_prompt=document_prompt,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
@ -73,7 +73,7 @@ def _load_stuff_chain(
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=_prompt,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
@ -81,7 +81,7 @@ def _load_stuff_chain(
|
||||
return StuffDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
document_variable_name=document_variable_name,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
@ -112,7 +112,7 @@ def _load_map_reduce_chain(
|
||||
map_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=_question_prompt,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
@ -120,7 +120,7 @@ def _load_map_reduce_chain(
|
||||
reduce_chain = LLMChain(
|
||||
llm=_reduce_llm,
|
||||
prompt=_combine_prompt,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
@ -128,7 +128,7 @@ def _load_map_reduce_chain(
|
||||
combine_documents_chain = StuffDocumentsChain(
|
||||
llm_chain=reduce_chain,
|
||||
document_variable_name=combine_document_variable_name,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
@ -145,12 +145,12 @@ def _load_map_reduce_chain(
|
||||
llm_chain=LLMChain(
|
||||
llm=_collapse_llm,
|
||||
prompt=collapse_prompt,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
),
|
||||
document_variable_name=combine_document_variable_name,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
reduce_documents_chain = ReduceDocumentsChain(
|
||||
@ -163,7 +163,7 @@ def _load_map_reduce_chain(
|
||||
llm_chain=map_chain,
|
||||
document_variable_name=map_reduce_document_variable_name,
|
||||
reduce_documents_chain=reduce_documents_chain,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
@ -191,7 +191,7 @@ def _load_refine_chain(
|
||||
initial_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=_question_prompt,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
@ -199,7 +199,7 @@ def _load_refine_chain(
|
||||
refine_chain = LLMChain(
|
||||
llm=_refine_llm,
|
||||
prompt=_refine_prompt,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
@ -208,7 +208,7 @@ def _load_refine_chain(
|
||||
refine_llm_chain=refine_chain,
|
||||
document_variable_name=document_variable_name,
|
||||
initial_response_name=initial_response_name,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
|
@ -30,12 +30,12 @@ def _load_stuff_chain(
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> StuffDocumentsChain:
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
# TODO: document prompt
|
||||
return StuffDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
document_variable_name=document_variable_name,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@ -57,17 +57,23 @@ def _load_map_reduce_chain(
|
||||
**kwargs: Any,
|
||||
) -> MapReduceDocumentsChain:
|
||||
map_chain = LLMChain(
|
||||
llm=llm, prompt=map_prompt, verbose=verbose, callbacks=callbacks
|
||||
llm=llm,
|
||||
prompt=map_prompt,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks, # type: ignore[arg-type]
|
||||
)
|
||||
_reduce_llm = reduce_llm or llm
|
||||
reduce_chain = LLMChain(
|
||||
llm=_reduce_llm, prompt=combine_prompt, verbose=verbose, callbacks=callbacks
|
||||
llm=_reduce_llm,
|
||||
prompt=combine_prompt,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks, # type: ignore[arg-type]
|
||||
)
|
||||
# TODO: document prompt
|
||||
combine_documents_chain = StuffDocumentsChain(
|
||||
llm_chain=reduce_chain,
|
||||
document_variable_name=combine_document_variable_name,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
)
|
||||
if collapse_prompt is None:
|
||||
@ -83,7 +89,7 @@ def _load_map_reduce_chain(
|
||||
llm_chain=LLMChain(
|
||||
llm=_collapse_llm,
|
||||
prompt=collapse_prompt,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
),
|
||||
document_variable_name=combine_document_variable_name,
|
||||
@ -92,7 +98,7 @@ def _load_map_reduce_chain(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
collapse_documents_chain=collapse_chain,
|
||||
token_max=token_max,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
collapse_max_retries=collapse_max_retries,
|
||||
)
|
||||
@ -100,7 +106,7 @@ def _load_map_reduce_chain(
|
||||
llm_chain=map_chain,
|
||||
reduce_documents_chain=reduce_documents_chain,
|
||||
document_variable_name=map_reduce_document_variable_name,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
@ -116,15 +122,15 @@ def _load_refine_chain(
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> RefineDocumentsChain:
|
||||
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
||||
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
_refine_llm = refine_llm or llm
|
||||
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose)
|
||||
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
return RefineDocumentsChain(
|
||||
initial_llm_chain=initial_chain,
|
||||
refine_llm_chain=refine_chain,
|
||||
document_variable_name=document_variable_name,
|
||||
initial_response_name=initial_response_name,
|
||||
verbose=verbose,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
@ -255,7 +255,7 @@ The following is the expected answer. Use this to measure correctness:
|
||||
prompt = TOOL_FREE_EVAL_CHAT_PROMPT
|
||||
eval_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
return cls(
|
||||
agent_tools=agent_tools,
|
||||
agent_tools=agent_tools, # type: ignore[arg-type]
|
||||
eval_chain=eval_chain,
|
||||
output_parser=output_parser or TrajectoryOutputParser(),
|
||||
**kwargs,
|
||||
|
@ -131,7 +131,7 @@ def load_evaluator(
|
||||
evaluator_cls = _EVALUATOR_MAP[evaluator]
|
||||
if issubclass(evaluator_cls, LLMEvalChain):
|
||||
try:
|
||||
llm = llm or ChatOpenAI(
|
||||
llm = llm or ChatOpenAI( # type: ignore[call-arg]
|
||||
model="gpt-4", model_kwargs={"seed": 42}, temperature=0
|
||||
)
|
||||
except Exception as e:
|
||||
|
@ -109,8 +109,8 @@ class _HashedDocument(Document):
|
||||
cls, document: Document, *, uid: Optional[str] = None
|
||||
) -> _HashedDocument:
|
||||
"""Create a HashedDocument from a Document."""
|
||||
return cls(
|
||||
uid=uid,
|
||||
return cls( # type: ignore[call-arg]
|
||||
uid=uid, # type: ignore[arg-type]
|
||||
page_content=document.page_content,
|
||||
metadata=document.metadata,
|
||||
)
|
||||
|
@ -308,7 +308,7 @@ class SQLRecordManager(RecordManager):
|
||||
pg_insert_stmt: PgInsertType = pg_insert(UpsertionRecord).values(
|
||||
records_to_upsert
|
||||
)
|
||||
stmt = pg_insert_stmt.on_conflict_do_update(
|
||||
stmt = pg_insert_stmt.on_conflict_do_update( # type: ignore[assignment]
|
||||
"uix_key_namespace", # Name of constraint
|
||||
set_=dict(
|
||||
updated_at=pg_insert_stmt.excluded.updated_at,
|
||||
@ -387,7 +387,7 @@ class SQLRecordManager(RecordManager):
|
||||
pg_insert_stmt: PgInsertType = pg_insert(UpsertionRecord).values(
|
||||
records_to_upsert
|
||||
)
|
||||
stmt = pg_insert_stmt.on_conflict_do_update(
|
||||
stmt = pg_insert_stmt.on_conflict_do_update( # type: ignore[assignment]
|
||||
"uix_key_namespace", # Name of constraint
|
||||
set_=dict(
|
||||
updated_at=pg_insert_stmt.excluded.updated_at,
|
||||
@ -472,7 +472,7 @@ class SQLRecordManager(RecordManager):
|
||||
"""List records in the SQLite database based on the provided date range."""
|
||||
session: AsyncSession
|
||||
async with self._amake_session() as session:
|
||||
query: Query = select(UpsertionRecord.key).filter(
|
||||
query: Query = select(UpsertionRecord.key).filter( # type: ignore[assignment]
|
||||
UpsertionRecord.namespace == self.namespace
|
||||
)
|
||||
|
||||
|
@ -92,7 +92,7 @@ class LLMChainExtractor(BaseDocumentCompressor):
|
||||
if len(outputs[i]) == 0:
|
||||
continue
|
||||
compressed_docs.append(
|
||||
Document(page_content=outputs[i], metadata=doc.metadata)
|
||||
Document(page_content=outputs[i], metadata=doc.metadata) # type: ignore[arg-type]
|
||||
)
|
||||
return compressed_docs
|
||||
|
||||
@ -108,4 +108,4 @@ class LLMChainExtractor(BaseDocumentCompressor):
|
||||
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
|
||||
_get_input = get_input if get_input is not None else default_get_input
|
||||
llm_chain = LLMChain(llm=llm, prompt=_prompt, **(llm_chain_kwargs or {}))
|
||||
return cls(llm_chain=llm_chain, get_input=_get_input)
|
||||
return cls(llm_chain=llm_chain, get_input=_get_input) # type: ignore[arg-type]
|
||||
|
@ -238,7 +238,7 @@ class EnsembleRetriever(BaseRetriever):
|
||||
# Enforce that retrieved docs are Documents for each list in retriever_docs
|
||||
for i in range(len(retriever_docs)):
|
||||
retriever_docs[i] = [
|
||||
Document(page_content=doc) if not isinstance(doc, Document) else doc
|
||||
Document(page_content=doc) if not isinstance(doc, Document) else doc # type: ignore[arg-type]
|
||||
for doc in retriever_docs[i]
|
||||
]
|
||||
|
||||
|
@ -251,7 +251,7 @@ class SelfQueryRetriever(BaseRetriever):
|
||||
query_constructor = query_constructor.with_config(
|
||||
run_name=QUERY_CONSTRUCTOR_RUN_NAME
|
||||
)
|
||||
return cls(
|
||||
return cls( # type: ignore[call-arg]
|
||||
query_constructor=query_constructor,
|
||||
vectorstore=vectorstore,
|
||||
use_original_query=use_original_query,
|
||||
|
@ -178,7 +178,7 @@ class RunEvalConfig(BaseModel):
|
||||
def __init__(
|
||||
self, criteria: Optional[CRITERIA_TYPE] = None, **kwargs: Any
|
||||
) -> None:
|
||||
super().__init__(criteria=criteria, **kwargs)
|
||||
super().__init__(criteria=criteria, **kwargs) # type: ignore[call-arg]
|
||||
|
||||
class LabeledCriteria(SingleKeyEvalConfig):
|
||||
"""Configuration for a labeled (with references) criteria evaluator.
|
||||
@ -198,7 +198,7 @@ class RunEvalConfig(BaseModel):
|
||||
def __init__(
|
||||
self, criteria: Optional[CRITERIA_TYPE] = None, **kwargs: Any
|
||||
) -> None:
|
||||
super().__init__(criteria=criteria, **kwargs)
|
||||
super().__init__(criteria=criteria, **kwargs) # type: ignore[call-arg]
|
||||
|
||||
class EmbeddingDistance(SingleKeyEvalConfig):
|
||||
"""Configuration for an embedding distance evaluator.
|
||||
@ -370,7 +370,7 @@ class RunEvalConfig(BaseModel):
|
||||
normalize_by: Optional[float] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
super().__init__(criteria=criteria, normalize_by=normalize_by, **kwargs)
|
||||
super().__init__(criteria=criteria, normalize_by=normalize_by, **kwargs) # type: ignore[call-arg]
|
||||
|
||||
class LabeledScoreString(ScoreString):
|
||||
evaluator_type: EvaluatorType = EvaluatorType.LABELED_SCORE_STRING
|
||||
|
165
libs/langchain/poetry.lock
generated
165
libs/langchain/poetry.lock
generated
@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiodns"
|
||||
@ -3072,6 +3072,7 @@ files = [
|
||||
{file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:227b178b22a7f91ae88525810441791b1ca1fc71c86f03190911793be15cec3d"},
|
||||
{file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:780eb6383fbae12afa819ef676fc93e1548ae4b076c004a393af26a04b460742"},
|
||||
{file = "jq-1.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:08ded6467f4ef89fec35b2bf310f210f8cd13fbd9d80e521500889edf8d22441"},
|
||||
{file = "jq-1.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:49e44ed677713f4115bd5bf2dbae23baa4cd503be350e12a1c1f506b0687848f"},
|
||||
{file = "jq-1.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:984f33862af285ad3e41e23179ac4795f1701822473e1a26bf87ff023e5a89ea"},
|
||||
{file = "jq-1.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42264fafc6166efb5611b5d4cb01058887d050a6c19334f6a3f8a13bb369df5"},
|
||||
{file = "jq-1.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a67154f150aaf76cc1294032ed588436eb002097dd4fd1e283824bf753a05080"},
|
||||
@ -4238,52 +4239,49 @@ para = ">=0.0.1"
|
||||
|
||||
[[package]]
|
||||
name = "mypy"
|
||||
version = "0.991"
|
||||
version = "1.9.0"
|
||||
description = "Optional static typing for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "mypy-0.991-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7d17e0a9707d0772f4a7b878f04b4fd11f6f5bcb9b3813975a9b13c9332153ab"},
|
||||
{file = "mypy-0.991-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0714258640194d75677e86c786e80ccf294972cc76885d3ebbb560f11db0003d"},
|
||||
{file = "mypy-0.991-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c8f3be99e8a8bd403caa8c03be619544bc2c77a7093685dcf308c6b109426c6"},
|
||||
{file = "mypy-0.991-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9ec663ed6c8f15f4ae9d3c04c989b744436c16d26580eaa760ae9dd5d662eb"},
|
||||
{file = "mypy-0.991-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4307270436fd7694b41f913eb09210faff27ea4979ecbcd849e57d2da2f65305"},
|
||||
{file = "mypy-0.991-cp310-cp310-win_amd64.whl", hash = "sha256:901c2c269c616e6cb0998b33d4adbb4a6af0ac4ce5cd078afd7bc95830e62c1c"},
|
||||
{file = "mypy-0.991-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d13674f3fb73805ba0c45eb6c0c3053d218aa1f7abead6e446d474529aafc372"},
|
||||
{file = "mypy-0.991-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1c8cd4fb70e8584ca1ed5805cbc7c017a3d1a29fb450621089ffed3e99d1857f"},
|
||||
{file = "mypy-0.991-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:209ee89fbb0deed518605edddd234af80506aec932ad28d73c08f1400ef80a33"},
|
||||
{file = "mypy-0.991-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37bd02ebf9d10e05b00d71302d2c2e6ca333e6c2a8584a98c00e038db8121f05"},
|
||||
{file = "mypy-0.991-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:26efb2fcc6b67e4d5a55561f39176821d2adf88f2745ddc72751b7890f3194ad"},
|
||||
{file = "mypy-0.991-cp311-cp311-win_amd64.whl", hash = "sha256:3a700330b567114b673cf8ee7388e949f843b356a73b5ab22dd7cff4742a5297"},
|
||||
{file = "mypy-0.991-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1f7d1a520373e2272b10796c3ff721ea1a0712288cafaa95931e66aa15798813"},
|
||||
{file = "mypy-0.991-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:641411733b127c3e0dab94c45af15fea99e4468f99ac88b39efb1ad677da5711"},
|
||||
{file = "mypy-0.991-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3d80e36b7d7a9259b740be6d8d906221789b0d836201af4234093cae89ced0cd"},
|
||||
{file = "mypy-0.991-cp37-cp37m-win_amd64.whl", hash = "sha256:e62ebaad93be3ad1a828a11e90f0e76f15449371ffeecca4a0a0b9adc99abcef"},
|
||||
{file = "mypy-0.991-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b86ce2c1866a748c0f6faca5232059f881cda6dda2a893b9a8373353cfe3715a"},
|
||||
{file = "mypy-0.991-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac6e503823143464538efda0e8e356d871557ef60ccd38f8824a4257acc18d93"},
|
||||
{file = "mypy-0.991-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cca5adf694af539aeaa6ac633a7afe9bbd760df9d31be55ab780b77ab5ae8bf"},
|
||||
{file = "mypy-0.991-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12c56bf73cdab116df96e4ff39610b92a348cc99a1307e1da3c3768bbb5b135"},
|
||||
{file = "mypy-0.991-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:652b651d42f155033a1967739788c436491b577b6a44e4c39fb340d0ee7f0d70"},
|
||||
{file = "mypy-0.991-cp38-cp38-win_amd64.whl", hash = "sha256:4175593dc25d9da12f7de8de873a33f9b2b8bdb4e827a7cae952e5b1a342e243"},
|
||||
{file = "mypy-0.991-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:98e781cd35c0acf33eb0295e8b9c55cdbef64fcb35f6d3aa2186f289bed6e80d"},
|
||||
{file = "mypy-0.991-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6d7464bac72a85cb3491c7e92b5b62f3dcccb8af26826257760a552a5e244aa5"},
|
||||
{file = "mypy-0.991-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c9166b3f81a10cdf9b49f2d594b21b31adadb3d5e9db9b834866c3258b695be3"},
|
||||
{file = "mypy-0.991-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8472f736a5bfb159a5e36740847808f6f5b659960115ff29c7cecec1741c648"},
|
||||
{file = "mypy-0.991-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e80e758243b97b618cdf22004beb09e8a2de1af481382e4d84bc52152d1c476"},
|
||||
{file = "mypy-0.991-cp39-cp39-win_amd64.whl", hash = "sha256:74e259b5c19f70d35fcc1ad3d56499065c601dfe94ff67ae48b85596b9ec1461"},
|
||||
{file = "mypy-0.991-py3-none-any.whl", hash = "sha256:de32edc9b0a7e67c2775e574cb061a537660e51210fbf6006b0b36ea695ae9bb"},
|
||||
{file = "mypy-0.991.tar.gz", hash = "sha256:3c0165ba8f354a6d9881809ef29f1a9318a236a6d81c690094c5df32107bde06"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"},
|
||||
{file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"},
|
||||
{file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
mypy-extensions = ">=0.4.3"
|
||||
mypy-extensions = ">=1.0.0"
|
||||
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
|
||||
typing-extensions = ">=3.10"
|
||||
typing-extensions = ">=4.1.0"
|
||||
|
||||
[package.extras]
|
||||
dmypy = ["psutil (>=4.0)"]
|
||||
install-types = ["pip"]
|
||||
python2 = ["typed-ast (>=1.4.0,<2)"]
|
||||
mypyc = ["setuptools (>=50)"]
|
||||
reports = ["lxml"]
|
||||
|
||||
[[package]]
|
||||
@ -4732,6 +4730,7 @@ description = "Nvidia JIT LTO Library"
|
||||
optional = true
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_aarch64.whl", hash = "sha256:75d6498c96d9adb9435f2bbdbddb479805ddfb97b5c1b32395c694185c20ca57"},
|
||||
{file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c6428836d20fe7e327191c175791d38570e10762edc588fb46749217cd444c74"},
|
||||
{file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-win_amd64.whl", hash = "sha256:991905ffa2144cb603d8ca7962d75c35334ae82bf92820b6ba78157277da1ad2"},
|
||||
]
|
||||
@ -5476,8 +5475,6 @@ files = [
|
||||
{file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"},
|
||||
{file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"},
|
||||
{file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"},
|
||||
{file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"},
|
||||
{file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"},
|
||||
{file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"},
|
||||
{file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"},
|
||||
{file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"},
|
||||
@ -5520,7 +5517,6 @@ files = [
|
||||
{file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"},
|
||||
{file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"},
|
||||
@ -5529,8 +5525,6 @@ files = [
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"},
|
||||
{file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"},
|
||||
{file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"},
|
||||
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"},
|
||||
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"},
|
||||
@ -6079,26 +6073,31 @@ python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "PyMuPDF-1.23.26-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:645a05321aecc8c45739f71f0eb574ce33138d19189582ffa5241fea3a8e2549"},
|
||||
{file = "PyMuPDF-1.23.26-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:2dfc9e010669ae92fade6fb72aaea49ebe3b8dcd7ee4dcbbe50115abcaa4d3fe"},
|
||||
{file = "PyMuPDF-1.23.26-cp310-none-manylinux2014_aarch64.whl", hash = "sha256:734ee380b3abd038602be79114194a3cb74ac102b7c943bcb333104575922c50"},
|
||||
{file = "PyMuPDF-1.23.26-cp310-none-manylinux2014_x86_64.whl", hash = "sha256:b22f8d854f8196ad5b20308c1cebad3d5189ed9f0988acbafa043947ea7e6c55"},
|
||||
{file = "PyMuPDF-1.23.26-cp310-none-win32.whl", hash = "sha256:cc0f794e3466bc96b5bf79d42fbc1551428751e3fef38ebc10ac70396b676144"},
|
||||
{file = "PyMuPDF-1.23.26-cp310-none-win_amd64.whl", hash = "sha256:2eb701247d8e685a24e45899d1175f01a3ce5fc792a4431c91fbb68633b29298"},
|
||||
{file = "PyMuPDF-1.23.26-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:e2804a64bb57da414781e312fb0561f6be67658ad57ed4a73dce008b23fc70a6"},
|
||||
{file = "PyMuPDF-1.23.26-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:97b40bb22e3056874634617a90e0ed24a5172cf71791b9e25d1d91c6743bc567"},
|
||||
{file = "PyMuPDF-1.23.26-cp311-none-manylinux2014_aarch64.whl", hash = "sha256:fab8833559bc47ab26ce736f915b8fc1dd37c108049b90396f7cd5e1004d7593"},
|
||||
{file = "PyMuPDF-1.23.26-cp311-none-manylinux2014_x86_64.whl", hash = "sha256:f25aafd3e7fb9d7761a22acf2b67d704f04cc36d4dc33a3773f0eb3f4ec3606f"},
|
||||
{file = "PyMuPDF-1.23.26-cp311-none-win32.whl", hash = "sha256:05e672ed3e82caca7ef02a88ace30130b1dd392a1190f03b2b58ffe7aa331400"},
|
||||
{file = "PyMuPDF-1.23.26-cp311-none-win_amd64.whl", hash = "sha256:92b3c4dd4d0491d495f333be2d41f4e1c155a409bc9d04b5ff29655dccbf4655"},
|
||||
{file = "PyMuPDF-1.23.26-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:a217689ede18cc6991b4e6a78afee8a440b3075d53b9dec4ba5ef7487d4547e9"},
|
||||
{file = "PyMuPDF-1.23.26-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:42ad2b819b90ce1947e11b90ec5085889df0a2e3aa0207bc97ecacfc6157cabc"},
|
||||
{file = "PyMuPDF-1.23.26-cp312-none-manylinux2014_aarch64.whl", hash = "sha256:99607649f89a02bba7d8ebe96e2410664316adc95e9337f7dfeff6a154f93049"},
|
||||
{file = "PyMuPDF-1.23.26-cp312-none-manylinux2014_x86_64.whl", hash = "sha256:bb42d4b8407b4de7cb58c28f01449f16f32a6daed88afb41108f1aeb3552bdd4"},
|
||||
{file = "PyMuPDF-1.23.26-cp312-none-win32.whl", hash = "sha256:c40d044411615e6f0baa7d3d933b3032cf97e168c7fa77d1be8a46008c109aee"},
|
||||
{file = "PyMuPDF-1.23.26-cp312-none-win_amd64.whl", hash = "sha256:3f876533aa7f9a94bcd9a0225ce72571b7808260903fec1d95c120bc842fb52d"},
|
||||
{file = "PyMuPDF-1.23.26-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:52df831d46beb9ff494f5fba3e5d069af6d81f49abf6b6e799ee01f4f8fa6799"},
|
||||
{file = "PyMuPDF-1.23.26-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:0bbb0cf6593e53524f3fc26fb5e6ead17c02c64791caec7c4afe61b677dedf80"},
|
||||
{file = "PyMuPDF-1.23.26-cp38-none-manylinux2014_aarch64.whl", hash = "sha256:5ef4360f20015673c20cf59b7e19afc97168795188c584254ed3778cde43ce77"},
|
||||
{file = "PyMuPDF-1.23.26-cp38-none-manylinux2014_x86_64.whl", hash = "sha256:d7cd88842b2e7f4c71eef4d87c98c35646b80b60e6375392d7ce40e519261f59"},
|
||||
{file = "PyMuPDF-1.23.26-cp38-none-win32.whl", hash = "sha256:6577e2f473625e2d0df5f5a3bf1e4519e94ae749733cc9937994d1b256687bfa"},
|
||||
{file = "PyMuPDF-1.23.26-cp38-none-win_amd64.whl", hash = "sha256:fbe1a3255b2cd0d769b2da2c4efdd0c0f30d4961a1aac02c0f75cf951b337aa4"},
|
||||
{file = "PyMuPDF-1.23.26-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:73fce034f2afea886a59ead2d0caedf27e2b2a8558b5da16d0286882e0b1eb82"},
|
||||
{file = "PyMuPDF-1.23.26-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:b3de8618b7cb5b36db611083840b3bcf09b11a893e2d8262f4e042102c7e65de"},
|
||||
{file = "PyMuPDF-1.23.26-cp39-none-manylinux2014_aarch64.whl", hash = "sha256:879e7f5ad35709d8760ab6103c3d5dac8ab8043a856ab3653fd324af7358ee87"},
|
||||
{file = "PyMuPDF-1.23.26-cp39-none-manylinux2014_x86_64.whl", hash = "sha256:deee96c2fd415ded7b5070d8d5b2c60679aee6ed0e28ac0d2cb998060d835c2c"},
|
||||
{file = "PyMuPDF-1.23.26-cp39-none-win32.whl", hash = "sha256:9f7f4ef99dd8ac97fb0b852efa3dcbee515798078b6c79a6a13c7b1e7c5d41a4"},
|
||||
{file = "PyMuPDF-1.23.26-cp39-none-win_amd64.whl", hash = "sha256:ba9a54552c7afb9ec85432c765e2fa9a81413acfaa7d70db7c9b528297749e5b"},
|
||||
@ -6574,7 +6573,6 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
|
||||
@ -8552,28 +8550,6 @@ build = ["cmake (>=3.20)", "lit"]
|
||||
tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)", "torch"]
|
||||
tutorials = ["matplotlib", "pandas", "tabulate", "torch"]
|
||||
|
||||
[[package]]
|
||||
name = "tritonclient"
|
||||
version = "2.41.1"
|
||||
description = "Python client library and utilities for communicating with Triton Inference Server"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "tritonclient-2.41.1-py3-none-any.whl", hash = "sha256:91cb234331a7145c407cea605caf9eecbd4276ddc5f085ddd5a6dcab64e5e70b"},
|
||||
{file = "tritonclient-2.41.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:22ad56ae5ab25518862dec85af0a8246a32a1e14e2ee1d86f1444ce432c254e1"},
|
||||
{file = "tritonclient-2.41.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:6545055add115e9bd07ca540af95db5ceda0c783009ad41df6a7f35a79d57474"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
numpy = ">=1.19.1"
|
||||
python-rapidjson = ">=0.9.1"
|
||||
|
||||
[package.extras]
|
||||
all = ["aiohttp (>=3.8.1,<4.0.0)", "cuda-python", "geventhttpclient (>=1.4.4,<=2.0.2)", "grpcio (>=1.41.0)", "numpy (>=1.19.1)", "packaging (>=14.1)", "protobuf (>=3.5.0,<5)", "python-rapidjson (>=0.9.1)"]
|
||||
cuda = ["cuda-python"]
|
||||
grpc = ["grpcio (>=1.41.0)", "numpy (>=1.19.1)", "packaging (>=14.1)", "protobuf (>=3.5.0,<5)", "python-rapidjson (>=0.9.1)"]
|
||||
http = ["aiohttp (>=3.8.1,<4.0.0)", "geventhttpclient (>=1.4.4,<=2.0.2)", "numpy (>=1.19.1)", "python-rapidjson (>=0.9.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "tritonclient"
|
||||
version = "2.43.0"
|
||||
@ -8702,20 +8678,6 @@ files = [
|
||||
cryptography = ">=35.0.0"
|
||||
types-pyOpenSSL = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-requests"
|
||||
version = "2.31.0.6"
|
||||
description = "Typing stubs for requests"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "types-requests-2.31.0.6.tar.gz", hash = "sha256:cd74ce3b53c461f1228a9b783929ac73a666658f223e28ed29753771477b3bd0"},
|
||||
{file = "types_requests-2.31.0.6-py3-none-any.whl", hash = "sha256:a2db9cb228a81da8348b49ad6db3f5519452dd20a9c1e1a868c83c5fe88fd1a9"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
types-urllib3 = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-requests"
|
||||
version = "2.31.0.20240311"
|
||||
@ -8741,17 +8703,6 @@ files = [
|
||||
{file = "types_toml-0.10.8.20240310-py3-none-any.whl", hash = "sha256:627b47775d25fa29977d9c70dc0cbab3f314f32c8d8d0c012f2ef5de7aaec05d"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-urllib3"
|
||||
version = "1.26.25.14"
|
||||
description = "Typing stubs for urllib3"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"},
|
||||
{file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typing"
|
||||
version = "3.7.4.3"
|
||||
@ -8848,22 +8799,6 @@ files = [
|
||||
[package.extras]
|
||||
dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"]
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "1.26.18"
|
||||
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
|
||||
files = [
|
||||
{file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"},
|
||||
{file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
|
||||
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
|
||||
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.2.1"
|
||||
@ -8891,6 +8826,23 @@ files = [
|
||||
{file = "uuid-1.30.tar.gz", hash = "sha256:1f87cc004ac5120466f36c5beae48b4c48cc411968eed0eaecd3da82aa96193f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vcrpy"
|
||||
version = "4.3.0"
|
||||
description = "Automatically mock your HTTP interactions to simplify and speed up testing"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "vcrpy-4.3.0-py2.py3-none-any.whl", hash = "sha256:8fbd4be412e8a7f35f623dd61034e6380a1c8dbd0edf6e87277a3289f6e98093"},
|
||||
{file = "vcrpy-4.3.0.tar.gz", hash = "sha256:49c270ce67e826dba027d83e20d25b67a5885487697e97bca6dbdf53d750a0ac"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
PyYAML = "*"
|
||||
six = ">=1.5"
|
||||
wrapt = "*"
|
||||
yarl = "*"
|
||||
|
||||
[[package]]
|
||||
name = "vcrpy"
|
||||
version = "6.0.1"
|
||||
@ -8903,7 +8855,6 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
PyYAML = "*"
|
||||
urllib3 = {version = "<2", markers = "platform_python_implementation == \"PyPy\" or python_version < \"3.10\""}
|
||||
wrapt = "*"
|
||||
yarl = "*"
|
||||
|
||||
@ -9456,4 +9407,4 @@ text-helpers = ["chardet"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "6e4b150a47dc6c9cf098b56fef8b53ec711f20e75336eb2d421fe3bbb930906c"
|
||||
content-hash = "1faf186ad3d0918b4df6c06c5db9963aa077260fc3ca3a946dd0690939e57e34"
|
||||
|
@ -184,7 +184,7 @@ ruff = "^0.1.5"
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.typing.dependencies]
|
||||
mypy = "^0.991"
|
||||
mypy = "^1"
|
||||
types-pyyaml = "^6.0.12.2"
|
||||
types-requests = "^2.28.11.5"
|
||||
types-toml = "^0.10.8.1"
|
||||
|
@ -374,7 +374,9 @@ def test_chain(model_name: str, question: str) -> None:
|
||||
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
|
||||
)
|
||||
chain = OntotextGraphDBQAChain.from_llm(
|
||||
ChatOpenAI(temperature=0, model_name=model_name), graph=graph, verbose=True
|
||||
ChatOpenAI(temperature=0, model_name=model_name),
|
||||
graph=graph,
|
||||
verbose=True, # type: ignore[call-arg]
|
||||
)
|
||||
try:
|
||||
chain.invoke({chain.input_key: question})
|
||||
|
@ -8,7 +8,7 @@ from langchain.docstore.wikipedia import Wikipedia
|
||||
|
||||
def test_react() -> None:
|
||||
"""Test functionality on a prompt."""
|
||||
llm = OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct")
|
||||
llm = OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct") # type: ignore[call-arg]
|
||||
react = ReActChain(llm=llm, docstore=Wikipedia())
|
||||
question = (
|
||||
"Author David Chanoff has collaborated with a U.S. Navy admiral "
|
||||
|
@ -23,7 +23,7 @@ To run against Elastic Cloud, set the following environment variables:
|
||||
|
||||
class TestElasticsearch:
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def elasticsearch_connection(self) -> Union[dict, Generator[dict, None, None]]:
|
||||
def elasticsearch_connection(self) -> Union[dict, Generator[dict, None, None]]: # type: ignore[return]
|
||||
# Run this integration test against Elasticsearch on localhost,
|
||||
# or an Elastic Cloud instance
|
||||
from elasticsearch import Elasticsearch
|
||||
|
@ -35,7 +35,7 @@ def test_embeddings_filter_with_state() -> None:
|
||||
state = {"embedded_doc": np.zeros(len(embedded_query))}
|
||||
docs = [_DocumentWithState(page_content=t, state=state) for t in texts]
|
||||
docs[-1].state = {"embedded_doc": embedded_query}
|
||||
relevant_filter = EmbeddingsFilter(
|
||||
relevant_filter = EmbeddingsFilter( # type: ignore[call-arg]
|
||||
embeddings=embeddings, similarity_threshold=0.75, return_similarity_scores=True
|
||||
)
|
||||
actual = relevant_filter.compress_documents(docs, query)
|
||||
|
@ -4,6 +4,6 @@ from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
|
||||
|
||||
def test_call() -> None:
|
||||
"""Test that call returns a URL in the output."""
|
||||
search = DallEAPIWrapper()
|
||||
search = DallEAPIWrapper() # type: ignore[call-arg]
|
||||
output = search.run("volcano island")
|
||||
assert "https://oaidalleapi" in output
|
||||
|
@ -457,7 +457,7 @@ async def test_runnable_agent() -> None:
|
||||
return AgentFinish(return_values={"foo": "meow"}, log="hard-coded-message")
|
||||
|
||||
agent = template | model | fake_parse
|
||||
executor = AgentExecutor(agent=agent, tools=[])
|
||||
executor = AgentExecutor(agent=agent, tools=[]) # type: ignore[arg-type]
|
||||
|
||||
# Invoke
|
||||
result = executor.invoke({"question": "hello"})
|
||||
@ -573,7 +573,7 @@ async def test_runnable_agent_with_function_calls() -> None:
|
||||
return "Spying from under the bed."
|
||||
|
||||
agent = template | model | fake_parse
|
||||
executor = AgentExecutor(agent=agent, tools=[find_pet])
|
||||
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
|
||||
|
||||
# Invoke
|
||||
result = executor.invoke({"question": "hello"})
|
||||
@ -685,7 +685,7 @@ async def test_runnable_with_multi_action_per_step() -> None:
|
||||
return "purrrr"
|
||||
|
||||
agent = template | model | fake_parse
|
||||
executor = AgentExecutor(agent=agent, tools=[find_pet])
|
||||
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
|
||||
|
||||
# Invoke
|
||||
result = executor.invoke({"question": "hello"})
|
||||
@ -819,7 +819,7 @@ async def test_openai_agent_with_streaming() -> None:
|
||||
[find_pet], # type: ignore[list-item]
|
||||
template,
|
||||
)
|
||||
executor = AgentExecutor(agent=agent, tools=[find_pet])
|
||||
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
|
||||
|
||||
# Invoke
|
||||
result = executor.invoke({"question": "hello"})
|
||||
@ -994,7 +994,7 @@ async def test_openai_agent_tools_agent() -> None:
|
||||
[find_pet], # type: ignore[list-item]
|
||||
template,
|
||||
)
|
||||
executor = AgentExecutor(agent=agent, tools=[find_pet])
|
||||
executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item]
|
||||
|
||||
# Invoke
|
||||
result = executor.invoke({"question": "hello"})
|
||||
|
@ -14,7 +14,7 @@ async def test_simplea() -> None:
|
||||
answer = "I know the answer!"
|
||||
llm = FakeListLLM(responses=[answer])
|
||||
retriever = SequentialRetriever(sequential_responses=[[]])
|
||||
memory = ConversationBufferMemory(
|
||||
memory = ConversationBufferMemory( # type: ignore[call-arg]
|
||||
k=1, output_key="answer", memory_key="chat_history", return_messages=True
|
||||
)
|
||||
qa_chain = ConversationalRetrievalChain.from_llm(
|
||||
@ -38,7 +38,7 @@ async def test_fixed_message_response_when_docs_founda() -> None:
|
||||
retriever = SequentialRetriever(
|
||||
sequential_responses=[[Document(page_content=answer)]]
|
||||
)
|
||||
memory = ConversationBufferMemory(
|
||||
memory = ConversationBufferMemory( # type: ignore[call-arg]
|
||||
k=1, output_key="answer", memory_key="chat_history", return_messages=True
|
||||
)
|
||||
qa_chain = ConversationalRetrievalChain.from_llm(
|
||||
@ -60,7 +60,7 @@ def test_fixed_message_response_when_no_docs_found() -> None:
|
||||
answer = "I know the answer!"
|
||||
llm = FakeListLLM(responses=[answer])
|
||||
retriever = SequentialRetriever(sequential_responses=[[]])
|
||||
memory = ConversationBufferMemory(
|
||||
memory = ConversationBufferMemory( # type: ignore[call-arg]
|
||||
k=1, output_key="answer", memory_key="chat_history", return_messages=True
|
||||
)
|
||||
qa_chain = ConversationalRetrievalChain.from_llm(
|
||||
@ -84,7 +84,7 @@ def test_fixed_message_response_when_docs_found() -> None:
|
||||
retriever = SequentialRetriever(
|
||||
sequential_responses=[[Document(page_content=answer)]]
|
||||
)
|
||||
memory = ConversationBufferMemory(
|
||||
memory = ConversationBufferMemory( # type: ignore[call-arg]
|
||||
k=1, output_key="answer", memory_key="chat_history", return_messages=True
|
||||
)
|
||||
qa_chain = ConversationalRetrievalChain.from_llm(
|
||||
|
@ -58,7 +58,7 @@ def test_sequential_usage_single_inputs() -> None:
|
||||
"""Test sequential on single input chains."""
|
||||
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
|
||||
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
|
||||
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
|
||||
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) # type: ignore[call-arg]
|
||||
output = chain({"foo": "123"})
|
||||
expected_output = {"baz": "123foofoo", "foo": "123"}
|
||||
assert output == expected_output
|
||||
@ -68,7 +68,7 @@ def test_sequential_usage_multiple_inputs() -> None:
|
||||
"""Test sequential on multiple input chains."""
|
||||
chain_1 = FakeChain(input_variables=["foo", "test"], output_variables=["bar"])
|
||||
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
|
||||
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"])
|
||||
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"]) # type: ignore[call-arg]
|
||||
output = chain({"foo": "123", "test": "456"})
|
||||
expected_output = {
|
||||
"baz": "123 456foo 123foo",
|
||||
@ -83,7 +83,7 @@ def test_sequential_usage_memory() -> None:
|
||||
memory = SimpleMemory(memories={"zab": "rab"})
|
||||
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
|
||||
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
|
||||
chain = SequentialChain(
|
||||
chain = SequentialChain( # type: ignore[call-arg]
|
||||
memory=memory, chains=[chain_1, chain_2], input_variables=["foo"]
|
||||
)
|
||||
output = chain({"foo": "123"})
|
||||
@ -93,7 +93,7 @@ def test_sequential_usage_memory() -> None:
|
||||
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
|
||||
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
|
||||
with pytest.raises(ValueError):
|
||||
SequentialChain(
|
||||
SequentialChain( # type: ignore[call-arg]
|
||||
memory=memory, chains=[chain_1, chain_2], input_variables=["foo"]
|
||||
)
|
||||
|
||||
@ -106,7 +106,7 @@ def test_sequential_internal_chain_use_memory() -> None:
|
||||
input_variables=["foo", "bla"], output_variables=["bar"], memory=memory
|
||||
)
|
||||
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
|
||||
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
|
||||
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) # type: ignore[call-arg]
|
||||
output = chain({"foo": "123"})
|
||||
print("HEYYY OUTPUT", output) # noqa: T201
|
||||
expected_output = {"foo": "123", "baz": "123 Human: yo\nAI: yafoofoo"}
|
||||
@ -117,7 +117,7 @@ def test_sequential_usage_multiple_outputs() -> None:
|
||||
"""Test sequential usage on multiple output chains."""
|
||||
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"])
|
||||
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
|
||||
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
|
||||
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) # type: ignore[call-arg]
|
||||
output = chain({"foo": "123"})
|
||||
expected_output = {
|
||||
"baz": "123foo 123foo",
|
||||
@ -132,7 +132,7 @@ def test_sequential_missing_inputs() -> None:
|
||||
chain_2 = FakeChain(input_variables=["bar", "test"], output_variables=["baz"])
|
||||
with pytest.raises(ValueError):
|
||||
# Also needs "test" as an input
|
||||
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
|
||||
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) # type: ignore[call-arg]
|
||||
|
||||
|
||||
def test_sequential_bad_outputs() -> None:
|
||||
@ -168,7 +168,7 @@ def test_sequential_overlapping_inputs() -> None:
|
||||
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
|
||||
with pytest.raises(ValueError):
|
||||
# "test" is specified as an input, but also is an output of one step
|
||||
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"])
|
||||
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"]) # type: ignore[call-arg]
|
||||
|
||||
|
||||
def test_simple_sequential_functionality() -> None:
|
||||
|
@ -17,7 +17,7 @@ def dummy_transform(inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
|
||||
def test_transform_chain() -> None:
|
||||
"""Test basic transform chain."""
|
||||
transform_chain = TransformChain(
|
||||
transform_chain = TransformChain( # type: ignore[call-arg]
|
||||
input_variables=["first_name", "last_name"],
|
||||
output_variables=["greeting"],
|
||||
transform=dummy_transform,
|
||||
@ -30,7 +30,7 @@ def test_transform_chain() -> None:
|
||||
|
||||
def test_transform_chain_bad_inputs() -> None:
|
||||
"""Test basic transform chain."""
|
||||
transform_chain = TransformChain(
|
||||
transform_chain = TransformChain( # type: ignore[call-arg]
|
||||
input_variables=["first_name", "last_name"],
|
||||
output_variables=["greeting"],
|
||||
transform=dummy_transform,
|
||||
|
@ -5,7 +5,7 @@ from langchain.indexes._api import _HashedDocument
|
||||
|
||||
|
||||
def test_hashed_document_hashing() -> None:
|
||||
hashed_document = _HashedDocument(
|
||||
hashed_document = _HashedDocument( # type: ignore[call-arg]
|
||||
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
|
||||
)
|
||||
assert isinstance(hashed_document.hash_, str)
|
||||
@ -21,7 +21,7 @@ def test_hashing_with_missing_content() -> None:
|
||||
|
||||
def test_uid_auto_assigned_to_hash() -> None:
|
||||
"""Test uid is auto-assigned to the hashed_document hash."""
|
||||
hashed_document = _HashedDocument(
|
||||
hashed_document = _HashedDocument( # type: ignore[call-arg]
|
||||
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
|
||||
)
|
||||
assert hashed_document.uid == hashed_document.hash_
|
||||
@ -29,7 +29,7 @@ def test_uid_auto_assigned_to_hash() -> None:
|
||||
|
||||
def test_to_document() -> None:
|
||||
"""Test to_document method."""
|
||||
hashed_document = _HashedDocument(
|
||||
hashed_document = _HashedDocument( # type: ignore[call-arg]
|
||||
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
|
||||
)
|
||||
doc = hashed_document.to_document()
|
||||
|
@ -79,7 +79,7 @@ def test_typeerror() -> None:
|
||||
@pytest.mark.requires("openai")
|
||||
def test_serialize_openai_llm(snapshot: Any) -> None:
|
||||
with patch.dict(os.environ, {"LANGCHAIN_API_KEY": "test-api-key"}):
|
||||
llm = OpenAI(
|
||||
llm = OpenAI( # type: ignore[call-arg]
|
||||
model="davinci",
|
||||
temperature=0.5,
|
||||
openai_api_key="hello",
|
||||
@ -92,7 +92,7 @@ def test_serialize_openai_llm(snapshot: Any) -> None:
|
||||
|
||||
@pytest.mark.requires("openai")
|
||||
def test_serialize_llmchain(snapshot: Any) -> None:
|
||||
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
|
||||
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
|
||||
prompt = PromptTemplate.from_template("hello {name}!")
|
||||
chain = LLMChain(llm=llm, prompt=prompt)
|
||||
assert dumps(chain, pretty=True) == snapshot
|
||||
@ -100,7 +100,7 @@ def test_serialize_llmchain(snapshot: Any) -> None:
|
||||
|
||||
@pytest.mark.requires("openai")
|
||||
def test_serialize_llmchain_env() -> None:
|
||||
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
|
||||
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
|
||||
prompt = PromptTemplate.from_template("hello {name}!")
|
||||
chain = LLMChain(llm=llm, prompt=prompt)
|
||||
|
||||
@ -110,7 +110,7 @@ def test_serialize_llmchain_env() -> None:
|
||||
if not has_env:
|
||||
os.environ["OPENAI_API_KEY"] = "env_variable"
|
||||
|
||||
llm_2 = OpenAI(model="davinci", temperature=0.5)
|
||||
llm_2 = OpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg]
|
||||
prompt_2 = PromptTemplate.from_template("hello {name}!")
|
||||
chain_2 = LLMChain(llm=llm_2, prompt=prompt_2)
|
||||
|
||||
@ -122,7 +122,7 @@ def test_serialize_llmchain_env() -> None:
|
||||
|
||||
@pytest.mark.requires("openai")
|
||||
def test_serialize_llmchain_chat(snapshot: Any) -> None:
|
||||
llm = ChatOpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
|
||||
llm = ChatOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[HumanMessagePromptTemplate.from_template("hello {name}!")]
|
||||
)
|
||||
@ -135,7 +135,7 @@ def test_serialize_llmchain_chat(snapshot: Any) -> None:
|
||||
if not has_env:
|
||||
os.environ["OPENAI_API_KEY"] = "env_variable"
|
||||
|
||||
llm_2 = ChatOpenAI(model="davinci", temperature=0.5)
|
||||
llm_2 = ChatOpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg]
|
||||
prompt_2 = ChatPromptTemplate.from_messages(
|
||||
[HumanMessagePromptTemplate.from_template("hello {name}!")]
|
||||
)
|
||||
@ -149,7 +149,7 @@ def test_serialize_llmchain_chat(snapshot: Any) -> None:
|
||||
|
||||
@pytest.mark.requires("openai")
|
||||
def test_serialize_llmchain_with_non_serializable_arg(snapshot: Any) -> None:
|
||||
llm = OpenAI(
|
||||
llm = OpenAI( # type: ignore[call-arg]
|
||||
model="davinci",
|
||||
temperature=0.5,
|
||||
openai_api_key="hello",
|
||||
@ -206,7 +206,7 @@ class TestClass(Serializable):
|
||||
|
||||
|
||||
def test_aliases_hidden() -> None:
|
||||
test_class = TestClass(my_favorite_secret="hello", my_other_secret="world")
|
||||
test_class = TestClass(my_favorite_secret="hello", my_other_secret="world") # type: ignore[call-arg]
|
||||
dumped = json.loads(dumps(test_class, pretty=True))
|
||||
expected_dump = {
|
||||
"lc": 1,
|
||||
@ -226,11 +226,11 @@ def test_aliases_hidden() -> None:
|
||||
with patch.dict(
|
||||
os.environ, {"MY_FAVORITE_SECRET": "hello", "MY_OTHER_SECRET": "world"}
|
||||
):
|
||||
test_class = TestClass()
|
||||
test_class = TestClass() # type: ignore[call-arg]
|
||||
dumped = json.loads(dumps(test_class, pretty=True))
|
||||
|
||||
# Check by alias
|
||||
test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world")
|
||||
test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world") # type: ignore[call-arg]
|
||||
dumped = json.loads(dumps(test_class, pretty=True))
|
||||
expected_dump = {
|
||||
"lc": 1,
|
||||
|
@ -17,7 +17,7 @@ class NotSerializable:
|
||||
def test_loads_openai_llm() -> None:
|
||||
from langchain_openai import OpenAI
|
||||
|
||||
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
|
||||
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
|
||||
llm_string = dumps(llm)
|
||||
llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"})
|
||||
|
||||
@ -31,7 +31,7 @@ def test_loads_openai_llm() -> None:
|
||||
def test_loads_llmchain() -> None:
|
||||
from langchain_openai import OpenAI
|
||||
|
||||
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
|
||||
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
|
||||
prompt = PromptTemplate.from_template("hello {name}!")
|
||||
chain = LLMChain(llm=llm, prompt=prompt)
|
||||
chain_string = dumps(chain)
|
||||
@ -54,7 +54,7 @@ def test_loads_llmchain_env() -> None:
|
||||
if not has_env:
|
||||
os.environ["OPENAI_API_KEY"] = "env_variable"
|
||||
|
||||
llm = OpenAI(model="davinci", temperature=0.5)
|
||||
llm = OpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg]
|
||||
prompt = PromptTemplate.from_template("hello {name}!")
|
||||
chain = LLMChain(llm=llm, prompt=prompt)
|
||||
chain_string = dumps(chain)
|
||||
@ -72,7 +72,7 @@ def test_loads_llmchain_env() -> None:
|
||||
|
||||
@pytest.mark.requires("openai")
|
||||
def test_loads_llmchain_with_non_serializable_arg() -> None:
|
||||
llm = CommunityOpenAI(
|
||||
llm = CommunityOpenAI( # type: ignore[call-arg]
|
||||
model="davinci",
|
||||
temperature=0.5,
|
||||
openai_api_key="hello",
|
||||
@ -89,7 +89,7 @@ def test_loads_llmchain_with_non_serializable_arg() -> None:
|
||||
def test_load_openai_llm() -> None:
|
||||
from langchain_openai import OpenAI
|
||||
|
||||
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
|
||||
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
|
||||
llm_obj = dumpd(llm)
|
||||
llm2 = load(llm_obj, secrets_map={"OPENAI_API_KEY": "hello"})
|
||||
|
||||
@ -102,7 +102,7 @@ def test_load_openai_llm() -> None:
|
||||
def test_load_llmchain() -> None:
|
||||
from langchain_openai import OpenAI
|
||||
|
||||
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
|
||||
llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
|
||||
prompt = PromptTemplate.from_template("hello {name}!")
|
||||
chain = LLMChain(llm=llm, prompt=prompt)
|
||||
chain_obj = dumpd(chain)
|
||||
@ -125,7 +125,7 @@ def test_load_llmchain_env() -> None:
|
||||
if not has_env:
|
||||
os.environ["OPENAI_API_KEY"] = "env_variable"
|
||||
|
||||
llm = CommunityOpenAI(model="davinci", temperature=0.5)
|
||||
llm = CommunityOpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg]
|
||||
prompt = PromptTemplate.from_template("hello {name}!")
|
||||
chain = LLMChain(llm=llm, prompt=prompt)
|
||||
chain_obj = dumpd(chain)
|
||||
@ -143,7 +143,7 @@ def test_load_llmchain_env() -> None:
|
||||
|
||||
@pytest.mark.requires("openai")
|
||||
def test_load_llmchain_with_non_serializable_arg() -> None:
|
||||
llm = CommunityOpenAI(
|
||||
llm = CommunityOpenAI( # type: ignore[call-arg]
|
||||
model="davinci",
|
||||
temperature=0.5,
|
||||
openai_api_key="hello",
|
||||
|
@ -18,7 +18,7 @@ def test_ensemble_retriever_get_relevant_docs() -> None:
|
||||
dummy_retriever = BM25Retriever.from_texts(doc_list)
|
||||
dummy_retriever.k = 1
|
||||
|
||||
ensemble_retriever = EnsembleRetriever(
|
||||
ensemble_retriever = EnsembleRetriever( # type: ignore[call-arg]
|
||||
retrievers=[dummy_retriever, dummy_retriever]
|
||||
)
|
||||
docs = ensemble_retriever.get_relevant_documents("I like apples")
|
||||
|
@ -19,7 +19,7 @@ class InMemoryVectorstoreWithSearch(InMemoryVectorStore):
|
||||
|
||||
def test_multi_vector_retriever_initialization() -> None:
|
||||
vectorstore = InMemoryVectorstoreWithSearch()
|
||||
retriever = MultiVectorRetriever(
|
||||
retriever = MultiVectorRetriever( # type: ignore[call-arg]
|
||||
vectorstore=vectorstore, docstore=InMemoryStore(), doc_id="doc_id"
|
||||
)
|
||||
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
|
||||
@ -32,7 +32,7 @@ def test_multi_vector_retriever_initialization() -> None:
|
||||
|
||||
async def test_multi_vector_retriever_initialization_async() -> None:
|
||||
vectorstore = InMemoryVectorstoreWithSearch()
|
||||
retriever = MultiVectorRetriever(
|
||||
retriever = MultiVectorRetriever( # type: ignore[call-arg]
|
||||
vectorstore=vectorstore, docstore=InMemoryStore(), doc_id="doc_id"
|
||||
)
|
||||
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
|
||||
|
@ -172,7 +172,7 @@ def test_run_llm_or_chain_with_input_mapper() -> None:
|
||||
assert "the right input" in inputs
|
||||
return {"output": "2"}
|
||||
|
||||
mock_chain = TransformChain(
|
||||
mock_chain = TransformChain( # type: ignore[call-arg]
|
||||
input_variables=["the right input"],
|
||||
output_variables=["output"],
|
||||
transform=run_val,
|
||||
|
Loading…
Reference in New Issue
Block a user