community[patch]: import flattening fix (#20110)

This PR should make it easier for linters to do type checking and for IDEs to jump to definition of code.

See #20050 as a template for this PR.
- As a byproduct: Added 3 missed `test_imports`.
- Added missed `SolarChat` in to __init___.py Added it into test_import
ut.
- Added `# type: ignore` to fix linting. It is not clear, why linting
errors appear after ^ changes.

---------

Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
This commit is contained in:
Leonid Ganeline
2024-04-10 10:01:19 -07:00
committed by GitHub
parent 12190ad728
commit 4cb5f4c353
60 changed files with 2973 additions and 163 deletions

View File

@@ -116,7 +116,7 @@ class ConversationalAgent(Agent):
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm_chain = LLMChain( # type: ignore[misc]
llm=llm,
prompt=prompt,
callback_manager=callback_manager,

View File

@@ -125,7 +125,7 @@ class ConversationalChatAgent(Agent):
input_variables=input_variables,
output_parser=_output_parser,
)
llm_chain = LLMChain(
llm_chain = LLMChain( # type: ignore[misc]
llm=llm,
prompt=prompt,
callback_manager=callback_manager,

View File

@@ -110,7 +110,7 @@ class ZeroShotAgent(Agent):
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm_chain = LLMChain( # type: ignore[misc]
llm=llm,
prompt=prompt,
callback_manager=callback_manager,

View File

@@ -27,7 +27,7 @@ class LLMRequestsChain(Chain):
See https://python.langchain.com/docs/security for more information.
"""
llm_chain: LLMChain
llm_chain: LLMChain # type: ignore[valid-type]
requests_wrapper: TextRequestsWrapper = Field(
default_factory=lambda: TextRequestsWrapper(headers=DEFAULT_HEADERS),
exclude=True,
@@ -87,7 +87,7 @@ class LLMRequestsChain(Chain):
# extract the text from the html
soup = BeautifulSoup(res, "html.parser")
other_keys[self.requests_key] = soup.get_text()[: self.text_length]
result = self.llm_chain.predict(
result = self.llm_chain.predict( # type: ignore[attr-defined]
callbacks=_run_manager.get_child(), **other_keys
)
return {self.output_key: result}

View File

@@ -134,7 +134,7 @@ def _load_map_reduce_documents_chain(
)
def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain:
def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain: # type: ignore[valid-type]
combine_documents_chain = None
collapse_documents_chain = None
@@ -187,7 +187,7 @@ def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocuments
config.pop("collapse_document_chain_path"), **kwargs
)
return ReduceDocumentsChain(
return ReduceDocumentsChain( # type: ignore[misc]
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_documents_chain,
**config,

View File

@@ -52,7 +52,7 @@ def create_openai_fn_chain(
output_key: str = "function",
output_parser: Optional[BaseLLMOutputParser] = None,
**kwargs: Any,
) -> LLMChain:
) -> LLMChain: # type: ignore[valid-type]
"""[Legacy] Create an LLM chain that uses OpenAI functions.
Args:
@@ -132,7 +132,7 @@ def create_openai_fn_chain(
}
if len(openai_functions) == 1 and enforce_single_function_usage:
llm_kwargs["function_call"] = {"name": openai_functions[0]["name"]}
llm_chain = LLMChain(
llm_chain = LLMChain( # type: ignore[misc]
llm=llm,
prompt=prompt,
output_parser=output_parser,
@@ -154,7 +154,7 @@ def create_structured_output_chain(
output_key: str = "function",
output_parser: Optional[BaseLLMOutputParser] = None,
**kwargs: Any,
) -> LLMChain:
) -> LLMChain: # type: ignore[valid-type]
"""[Legacy] Create an LLMChain that uses an OpenAI function to get a structured output.
Args:

View File

@@ -59,7 +59,7 @@ class BaseQAWithSourcesChain(Chain, ABC):
document_prompt=document_prompt,
document_variable_name="summaries",
)
reduce_documents_chain = ReduceDocumentsChain(
reduce_documents_chain = ReduceDocumentsChain( # type: ignore[misc]
combine_documents_chain=combine_results_chain
)
combine_documents_chain = MapReduceDocumentsChain(

View File

@@ -153,7 +153,7 @@ def _load_map_reduce_chain(
verbose=verbose, # type: ignore[arg-type]
callback_manager=callback_manager,
)
reduce_documents_chain = ReduceDocumentsChain(
reduce_documents_chain = ReduceDocumentsChain( # type: ignore[misc]
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_chain,
token_max=token_max,

View File

@@ -27,10 +27,10 @@ def test_daxquery() -> None:
fast_llm = ChatOpenAI(
temperature=0.5, max_tokens=1000, model_name="gpt-3.5-turbo", verbose=True
)
) # type: ignore[call-arg]
smart_llm = ChatOpenAI(
temperature=0, max_tokens=100, model_name="gpt-4", verbose=True
)
) # type: ignore[call-arg]
toolkit = PowerBIToolkit(
powerbi=PowerBIDataset(