mirror of
https://github.com/hwchase17/langchain.git
synced 2026-01-06 08:28:11 +00:00
2
.github/scripts/check_diff.py
vendored
2
.github/scripts/check_diff.py
vendored
@@ -91,4 +91,4 @@ if __name__ == "__main__":
|
||||
}
|
||||
for key, value in outputs.items():
|
||||
json_output = json.dumps(value)
|
||||
print(f"{key}={json_output}") # noqa: T201
|
||||
print(f"{key}={json_output}")
|
||||
|
||||
2
.github/scripts/get_min_versions.py
vendored
2
.github/scripts/get_min_versions.py
vendored
@@ -76,4 +76,4 @@ if __name__ == "__main__":
|
||||
|
||||
print(
|
||||
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
|
||||
) # noqa: T201
|
||||
)
|
||||
|
||||
@@ -7,4 +7,4 @@ ignore_words_list = (
|
||||
pyproject_toml.get("tool", {}).get("codespell", {}).get("ignore-words-list")
|
||||
)
|
||||
|
||||
print(f"::set-output name=ignore_words_list::{ignore_words_list}") # noqa: T201
|
||||
print(f"::set-output name=ignore_words_list::{ignore_words_list}")
|
||||
|
||||
@@ -39,12 +39,10 @@
|
||||
"from langchain_community.document_loaders.recursive_url_loader import (\n",
|
||||
" RecursiveUrlLoader,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# noqa\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"# For our example, we'll load docs from the web\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter # noqa\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"DOCSTORE_DIR = \".\"\n",
|
||||
"DOCSTORE_ID_KEY = \"doc_id\""
|
||||
|
||||
@@ -187,7 +187,7 @@ def _load_package_modules(
|
||||
modules_by_namespace[top_namespace] = _module_members
|
||||
|
||||
except ImportError as e:
|
||||
print(f"Error: Unable to import module '{namespace}' with error: {e}") # noqa: T201
|
||||
print(f"Error: Unable to import module '{namespace}' with error: {e}")
|
||||
|
||||
return modules_by_namespace
|
||||
|
||||
|
||||
@@ -300,7 +300,7 @@
|
||||
"Entities in the question map to the following database values:\n",
|
||||
"{entities_list}\n",
|
||||
"Question: {question}\n",
|
||||
"Cypher query:\"\"\" # noqa: E501\n",
|
||||
"Cypher query:\"\"\"\n",
|
||||
"\n",
|
||||
"cypher_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
@@ -377,7 +377,7 @@
|
||||
"response_template = \"\"\"Based on the the question, Cypher query, and Cypher response, write a natural language response:\n",
|
||||
"Question: {question}\n",
|
||||
"Cypher query: {query}\n",
|
||||
"Cypher Response: {response}\"\"\" # noqa: E501\n",
|
||||
"Cypher Response: {response}\"\"\"\n",
|
||||
"\n",
|
||||
"response_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
|
||||
@@ -503,7 +503,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = prompt | llm_with_tools | parser | tool # noqa\n",
|
||||
"chain = prompt | llm_with_tools | parser | tool\n",
|
||||
"chain.invoke({\"question\": \"What's the correlation between age and fare\"})"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -262,7 +262,7 @@
|
||||
" return tables\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"table_chain = category_chain | get_tables # noqa\n",
|
||||
"table_chain = category_chain | get_tables\n",
|
||||
"table_chain.invoke({\"input\": \"What are all the genres of Alanis Morisette songs\"})"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -670,7 +670,7 @@
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\n",
|
||||
" # langchain logo\n",
|
||||
" \"url\": f\"data:image/png;base64,{img_base64}\", # noqa: E501\n",
|
||||
" \"url\": f\"data:image/png;base64,{img_base64}\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" {\"type\": \"text\", \"text\": \"What is this logo for?\"},\n",
|
||||
|
||||
@@ -95,7 +95,7 @@
|
||||
" \"\"\"\n",
|
||||
" self.path = path\n",
|
||||
" self._message_line_regex = re.compile(\n",
|
||||
" r\"(.+?) — (\\w{3,9} \\d{1,2}(?:st|nd|rd|th)?(?:, \\d{4})? \\d{1,2}:\\d{2} (?:AM|PM)|Today at \\d{1,2}:\\d{2} (?:AM|PM)|Yesterday at \\d{1,2}:\\d{2} (?:AM|PM))\", # noqa\n",
|
||||
" r\"(.+?) — (\\w{3,9} \\d{1,2}(?:st|nd|rd|th)?(?:, \\d{4})? \\d{1,2}:\\d{2} (?:AM|PM)|Today at \\d{1,2}:\\d{2} (?:AM|PM)|Yesterday at \\d{1,2}:\\d{2} (?:AM|PM))\",\n",
|
||||
" flags=re.DOTALL,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
@@ -120,7 +120,7 @@
|
||||
" current_content = []\n",
|
||||
" for line in lines:\n",
|
||||
" if re.match(\n",
|
||||
" r\".+? — (\\d{2}/\\d{2}/\\d{4} \\d{1,2}:\\d{2} (?:AM|PM)|Today at \\d{1,2}:\\d{2} (?:AM|PM)|Yesterday at \\d{1,2}:\\d{2} (?:AM|PM))\", # noqa\n",
|
||||
" r\".+? — (\\d{2}/\\d{2}/\\d{4} \\d{1,2}:\\d{2} (?:AM|PM)|Today at \\d{1,2}:\\d{2} (?:AM|PM)|Yesterday at \\d{1,2}:\\d{2} (?:AM|PM))\",\n",
|
||||
" line,\n",
|
||||
" ):\n",
|
||||
" if current_sender and current_content:\n",
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
" \"\"\"\n",
|
||||
" self.path = path\n",
|
||||
" self._message_line_regex = re.compile(\n",
|
||||
" r\"(?P<sender>.+?) (?P<timestamp>\\d{4}/\\d{2}/\\d{2} \\d{1,2}:\\d{2} (?:AM|PM))\", # noqa\n",
|
||||
" r\"(?P<sender>.+?) (?P<timestamp>\\d{4}/\\d{2}/\\d{2} \\d{1,2}:\\d{2} (?:AM|PM))\",\n",
|
||||
" # flags=re.DOTALL,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
|
||||
@@ -3,7 +3,7 @@ class MyClass:
|
||||
self.name = name
|
||||
|
||||
def greet(self):
|
||||
print(f"Hello, {self.name}!") # noqa: T201
|
||||
print(f"Hello, {self.name}!")
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -389,7 +389,7 @@ class ArxivAPIWrapper(BaseModel):
|
||||
|
||||
Returns:
|
||||
List of ArxivPaper objects.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
def cut_authors(authors: list) -> list[str]:
|
||||
if len(authors) > 3:
|
||||
|
||||
@@ -186,7 +186,7 @@ def replace_imports(file):
|
||||
data = code_block_re.sub(replacer, data)
|
||||
|
||||
# if all_imports:
|
||||
# print(f"Adding {len(all_imports)} links for imports in {file}") # noqa: T201
|
||||
# print(f"Adding {len(all_imports)} links for imports in {file}")
|
||||
with open(file, "w") as f:
|
||||
f.write(data)
|
||||
return all_imports
|
||||
|
||||
@@ -116,7 +116,7 @@ Each LLM integration can optionally provide native implementations for async, st
|
||||
|
||||
{table}
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
CHAT_MODEL_TEMPLATE = """\
|
||||
---
|
||||
@@ -133,7 +133,7 @@ The following table shows all the chat models that support one or more advanced
|
||||
|
||||
{table}
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
|
||||
def get_llm_table():
|
||||
|
||||
@@ -17,7 +17,7 @@ ReplaceImportsCodemod = generate_import_replacer(
|
||||
"langchain_to_core",
|
||||
"community_to_core",
|
||||
]
|
||||
) # type: ignore[attr-defined] # noqa: E501
|
||||
) # type: ignore[attr-defined]
|
||||
|
||||
|
||||
class TestReplaceImportsCommand(CodemodTest):
|
||||
|
||||
@@ -28,7 +28,7 @@ bedrock_anthropic_callback_var: (ContextVar)[
|
||||
wandb_tracing_callback_var: ContextVar[Optional[WandbTracer]] = ContextVar(
|
||||
"tracing_wandb_callback", default=None
|
||||
)
|
||||
comet_tracing_callback_var: ContextVar[Optional[CometTracer]] = ContextVar( # noqa: E501
|
||||
comet_tracing_callback_var: ContextVar[Optional[CometTracer]] = ContextVar(
|
||||
"tracing_comet_callback", default=None
|
||||
)
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ class LangSmithDatasetChatLoader(BaseChatLoader):
|
||||
|
||||
:return: Iterator of chat sessions containing messages.
|
||||
"""
|
||||
from langchain_community.adapters import openai as oai_adapter # noqa: E402
|
||||
from langchain_community.adapters import openai as oai_adapter
|
||||
|
||||
data = self.client.read_dataset_openai_finetuning(
|
||||
dataset_name=self.dataset_name
|
||||
|
||||
@@ -62,7 +62,7 @@ class CosmosDBChatMessageHistory(BaseChatMessageHistory):
|
||||
|
||||
self.messages: List[BaseMessage] = []
|
||||
try:
|
||||
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501
|
||||
from azure.cosmos import ( # pylint: disable=import-outside-toplevel
|
||||
CosmosClient,
|
||||
)
|
||||
except ImportError as exc:
|
||||
@@ -91,7 +91,7 @@ class CosmosDBChatMessageHistory(BaseChatMessageHistory):
|
||||
Use this function or the context manager to make sure your database is ready.
|
||||
"""
|
||||
try:
|
||||
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501
|
||||
from azure.cosmos import ( # pylint: disable=import-outside-toplevel
|
||||
PartitionKey,
|
||||
)
|
||||
except ImportError as exc:
|
||||
@@ -128,7 +128,7 @@ class CosmosDBChatMessageHistory(BaseChatMessageHistory):
|
||||
if not self._container:
|
||||
raise ValueError("Container not initialized")
|
||||
try:
|
||||
from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel # noqa: E501
|
||||
from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel
|
||||
CosmosHttpResponseError,
|
||||
)
|
||||
except ImportError as exc:
|
||||
|
||||
@@ -85,7 +85,7 @@ class AzureChatOpenAI(ChatOpenAI):
|
||||
|
||||
For more:
|
||||
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
azure_ad_token_provider: Union[Callable[[], str], None] = None
|
||||
"""A function that returns an Azure Active Directory token.
|
||||
|
||||
|
||||
@@ -233,7 +233,7 @@ class AzureMLChatOnlineEndpoint(BaseChatModel, AzureMLBaseEndpoint):
|
||||
endpoint_api_key="my-api-key",
|
||||
content_formatter=chat_content_formatter,
|
||||
)
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Dict[str, Any]:
|
||||
|
||||
@@ -214,7 +214,7 @@ if TYPE_CHECKING:
|
||||
GitHubIssuesLoader,
|
||||
)
|
||||
from langchain_community.document_loaders.glue_catalog import (
|
||||
GlueCatalogLoader, # noqa: F401
|
||||
GlueCatalogLoader,
|
||||
)
|
||||
from langchain_community.document_loaders.google_speech_to_text import (
|
||||
GoogleSpeechToTextLoader,
|
||||
@@ -332,8 +332,8 @@ if TYPE_CHECKING:
|
||||
OracleAutonomousDatabaseLoader,
|
||||
)
|
||||
from langchain_community.document_loaders.oracleai import (
|
||||
OracleDocLoader, # noqa: F401
|
||||
OracleTextSplitter, # noqa: F401
|
||||
OracleDocLoader,
|
||||
OracleTextSplitter,
|
||||
)
|
||||
from langchain_community.document_loaders.org_mode import (
|
||||
UnstructuredOrgModeLoader,
|
||||
|
||||
@@ -31,7 +31,7 @@ class BlackboardLoader(WebBaseLoader):
|
||||
)
|
||||
documents = loader.load()
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
||||
@@ -34,7 +34,7 @@ class EverNoteLoader(BaseLoader):
|
||||
notes into a single long Document.
|
||||
If this is set to True (default) then the only metadata on the document will be
|
||||
the 'source' which contains the file name of the export.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
def __init__(self, file_path: Union[str, Path], load_single_document: bool = True):
|
||||
"""Initialize with file path."""
|
||||
|
||||
@@ -208,9 +208,9 @@ class PebbloSafeLoader(BaseLoader):
|
||||
if loading_end is True:
|
||||
payload["loading_end"] = "true"
|
||||
if "loader_details" in payload:
|
||||
payload["loader_details"]["source_aggregate_size"] = ( # noqa
|
||||
self.source_aggregate_size
|
||||
)
|
||||
payload["loader_details"][
|
||||
"source_aggregate_size"
|
||||
] = self.source_aggregate_size
|
||||
payload = Doc(**payload).dict(exclude_unset=True)
|
||||
load_doc_url = f"{self.classifier_url}{LOADER_DOC_URL}"
|
||||
classified_docs = []
|
||||
|
||||
@@ -81,7 +81,7 @@ _module_lookup = {
|
||||
"GoogleTranslateTransformer": "langchain_community.document_transformers.google_translate", # noqa: E501
|
||||
"Html2TextTransformer": "langchain_community.document_transformers.html2text",
|
||||
"LongContextReorder": "langchain_community.document_transformers.long_context_reorder", # noqa: E501
|
||||
"MarkdownifyTransformer": "langchain_community.document_transformers.markdownify", # noqa: E501
|
||||
"MarkdownifyTransformer": "langchain_community.document_transformers.markdownify",
|
||||
"NucliaTextTransformer": "langchain_community.document_transformers.nuclia_text_transform", # noqa: E501
|
||||
"OpenAIMetadataTagger": "langchain_community.document_transformers.openai_functions", # noqa: E501
|
||||
"get_stateful_documents": "langchain_community.document_transformers.embeddings_redundant_filter", # noqa: E501
|
||||
|
||||
@@ -57,7 +57,7 @@ class OpenAIMetadataTagger(BaseDocumentTransformer, BaseModel):
|
||||
new_documents = []
|
||||
|
||||
for document in documents:
|
||||
extracted_metadata: Dict = self.tagging_chain.run(document.page_content) # type: ignore[assignment] # noqa: E501
|
||||
extracted_metadata: Dict = self.tagging_chain.run(document.page_content) # type: ignore[assignment]
|
||||
new_document = Document(
|
||||
page_content=document.page_content,
|
||||
metadata={**extracted_metadata, **document.metadata},
|
||||
|
||||
@@ -173,7 +173,7 @@ if TYPE_CHECKING:
|
||||
QuantizedBiEncoderEmbeddings,
|
||||
)
|
||||
from langchain_community.embeddings.oracleai import (
|
||||
OracleEmbeddings, # noqa: F401
|
||||
OracleEmbeddings,
|
||||
)
|
||||
from langchain_community.embeddings.premai import (
|
||||
PremAIEmbeddings,
|
||||
|
||||
@@ -44,7 +44,7 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings):
|
||||
|
||||
For more:
|
||||
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
azure_ad_token_provider: Union[Callable[[], str], None] = None
|
||||
"""A function that returns an Azure Active Directory token.
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ class ElasticsearchEmbeddings(Embeddings):
|
||||
In Elasticsearch you need to have an embedding model loaded and deployed.
|
||||
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
|
||||
- https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
||||
@@ -182,7 +182,7 @@ class TinyAsyncOpenAIInfinityEmbeddingClient: #: :meta private:
|
||||
length_sorted_idx = np.argsort([-sorter(sen) for sen in texts])
|
||||
texts_sorted = [texts[idx] for idx in length_sorted_idx]
|
||||
|
||||
return texts_sorted, lambda unsorted_embeddings: [ # noqa E731
|
||||
return texts_sorted, lambda unsorted_embeddings: [ # E731
|
||||
unsorted_embeddings[idx] for idx in np.argsort(length_sorted_idx)
|
||||
]
|
||||
|
||||
|
||||
@@ -254,7 +254,7 @@ class LocalAIEmbeddings(BaseModel, Embeddings):
|
||||
openai.proxy = {
|
||||
"http": self.openai_proxy,
|
||||
"https": self.openai_proxy,
|
||||
} # type: ignore[assignment] # noqa: E501
|
||||
} # type: ignore[assignment]
|
||||
return openai_args
|
||||
|
||||
def _embedding_func(self, text: str, *, engine: str) -> List[float]:
|
||||
|
||||
@@ -390,7 +390,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
|
||||
openai.proxy = {
|
||||
"http": self.openai_proxy,
|
||||
"https": self.openai_proxy,
|
||||
} # type: ignore[assignment] # noqa: E501
|
||||
} # type: ignore[assignment]
|
||||
return openai_args
|
||||
|
||||
# please refer to
|
||||
|
||||
@@ -107,13 +107,13 @@ class YandexGPTEmbeddings(BaseModel, Embeddings):
|
||||
raise ValueError("'doc_model_uri' or 'folder_id' must be provided.")
|
||||
values[
|
||||
"doc_model_uri"
|
||||
] = f"emb://{values['folder_id']}/{values['doc_model_name']}/{values['model_version']}" # noqa: E501
|
||||
] = f"emb://{values['folder_id']}/{values['doc_model_name']}/{values['model_version']}"
|
||||
if not values.get("model_uri"):
|
||||
if values["folder_id"] == "":
|
||||
raise ValueError("'model_uri' or 'folder_id' must be provided.")
|
||||
values[
|
||||
"model_uri"
|
||||
] = f"emb://{values['folder_id']}/{values['model_name']}/{values['model_version']}" # noqa: E501
|
||||
] = f"emb://{values['folder_id']}/{values['model_name']}/{values['model_version']}"
|
||||
if values["disable_request_logging"]:
|
||||
values["_grpc_metadata"].append(
|
||||
(
|
||||
|
||||
@@ -174,9 +174,9 @@ def get_arangodb_client(
|
||||
"Unable to import arango, please install with `pip install python-arango`."
|
||||
) from e
|
||||
|
||||
_url: str = url or os.environ.get("ARANGODB_URL", "http://localhost:8529") # type: ignore[assignment] # noqa: E501
|
||||
_dbname: str = dbname or os.environ.get("ARANGODB_DBNAME", "_system") # type: ignore[assignment] # noqa: E501
|
||||
_username: str = username or os.environ.get("ARANGODB_USERNAME", "root") # type: ignore[assignment] # noqa: E501
|
||||
_password: str = password or os.environ.get("ARANGODB_PASSWORD", "") # type: ignore[assignment] # noqa: E501
|
||||
_url: str = url or os.environ.get("ARANGODB_URL", "http://localhost:8529") # type: ignore[assignment]
|
||||
_dbname: str = dbname or os.environ.get("ARANGODB_DBNAME", "_system") # type: ignore[assignment]
|
||||
_username: str = username or os.environ.get("ARANGODB_USERNAME", "root") # type: ignore[assignment]
|
||||
_password: str = password or os.environ.get("ARANGODB_PASSWORD", "") # type: ignore[assignment]
|
||||
|
||||
return ArangoClient(_url).db(_dbname, _username, _password, verify=True)
|
||||
|
||||
@@ -496,7 +496,7 @@ class AzureMLOnlineEndpoint(BaseLLM, AzureMLBaseEndpoint):
|
||||
timeout=120,
|
||||
content_formatter=content_formatter,
|
||||
)
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Mapping[str, Any]:
|
||||
|
||||
@@ -604,7 +604,7 @@ class BaseOpenAI(BaseLLM):
|
||||
if self.openai_proxy:
|
||||
import openai
|
||||
|
||||
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
|
||||
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment]
|
||||
return {**openai_creds, **self._default_params}
|
||||
|
||||
@property
|
||||
@@ -800,7 +800,7 @@ class AzureOpenAI(BaseOpenAI):
|
||||
|
||||
For more:
|
||||
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
azure_ad_token_provider: Union[Callable[[], str], None] = None
|
||||
"""A function that returns an Azure Active Directory token.
|
||||
|
||||
@@ -1055,7 +1055,7 @@ class OpenAIChat(BaseLLM):
|
||||
if openai_organization:
|
||||
openai.organization = openai_organization
|
||||
if openai_proxy:
|
||||
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
|
||||
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment]
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import openai python package. "
|
||||
|
||||
@@ -41,7 +41,7 @@ class Nebula(LLM):
|
||||
nebula_service_path="NEBULA_SERVICE_PATH",
|
||||
nebula_api_key="NEBULA_API_KEY",
|
||||
)
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
"""Key/value arguments to pass to the model. Reserved for future use"""
|
||||
model_kwargs: Optional[dict] = None
|
||||
|
||||
@@ -164,7 +164,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
else:
|
||||
fn_name = _result["name"]
|
||||
_args = _result["arguments"]
|
||||
pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore # noqa: E501
|
||||
pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore
|
||||
return pydantic_args
|
||||
|
||||
|
||||
|
||||
@@ -190,7 +190,7 @@ class _BaseGoogleVertexAISearchRetriever(BaseModel):
|
||||
print( # noqa: T201
|
||||
"Make sure that your data store is using Advanced Website "
|
||||
"Indexing.\n"
|
||||
"https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing" # noqa: E501
|
||||
"https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing"
|
||||
)
|
||||
|
||||
return documents
|
||||
|
||||
@@ -59,7 +59,7 @@ If you have any files outputted write them to "output/" relative to the executio
|
||||
path. Output can only be read from the directory, stdout, and stdin. \
|
||||
Do not use things like plot.show() as it will \
|
||||
not work instead write them out `output/` and a link to the file will be returned. \
|
||||
print() any output and results so you can capture the output.""" # noqa: T201
|
||||
print() any output and results so you can capture the output."""
|
||||
|
||||
|
||||
class FileInfo(BaseModel):
|
||||
|
||||
@@ -51,7 +51,7 @@ class QueryPowerBITool(BaseTool):
|
||||
for var in llm_chain.prompt.input_variables:
|
||||
if var not in ["tool_input", "tables", "schemas", "examples"]:
|
||||
raise ValueError(
|
||||
"LLM chain for QueryPowerBITool must have input variables ['tool_input', 'tables', 'schemas', 'examples'], found %s", # noqa: C0301 E501 # pylint: disable=C0301
|
||||
"LLM chain for QueryPowerBITool must have input variables ['tool_input', 'tables', 'schemas', 'examples'], found %s", # noqa: E501 # pylint: disable=C0301
|
||||
llm_chain.prompt.input_variables,
|
||||
)
|
||||
return llm_chain
|
||||
|
||||
@@ -105,7 +105,7 @@ if TYPE_CHECKING:
|
||||
OpenWeatherMapAPIWrapper,
|
||||
)
|
||||
from langchain_community.utilities.oracleai import (
|
||||
OracleSummary, # noqa: F401
|
||||
OracleSummary,
|
||||
)
|
||||
from langchain_community.utilities.outline import (
|
||||
OutlineAPIWrapper,
|
||||
|
||||
@@ -103,7 +103,7 @@ class ArxivAPIWrapper(BaseModel):
|
||||
|
||||
Args:
|
||||
query: a plaintext search query
|
||||
""" # noqa: E501
|
||||
"""
|
||||
try:
|
||||
if self.is_arxiv_identifier(query):
|
||||
results = self.arxiv_search(
|
||||
@@ -142,7 +142,7 @@ class ArxivAPIWrapper(BaseModel):
|
||||
|
||||
Args:
|
||||
query: a plaintext search query
|
||||
""" # noqa: E501
|
||||
"""
|
||||
try:
|
||||
if self.is_arxiv_identifier(query):
|
||||
results = self.arxiv_search(
|
||||
|
||||
@@ -60,7 +60,7 @@ class LambdaWrapper(BaseModel):
|
||||
query: an input to passed to the lambda
|
||||
function as the ``body`` of a JSON
|
||||
object.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
res = self.lambda_client.invoke(
|
||||
FunctionName=self.function_name,
|
||||
InvocationType="RequestResponse",
|
||||
|
||||
@@ -61,7 +61,7 @@ class PowerBIDataset(BaseModel):
|
||||
"""Get the request url."""
|
||||
if self.group_id:
|
||||
return f"{BASE_URL}/groups/{self.group_id}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301
|
||||
return f"{BASE_URL}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301
|
||||
return f"{BASE_URL}/datasets/{self.dataset_id}/executeQueries" # pylint: disable=C0301
|
||||
|
||||
@property
|
||||
def headers(self) -> Dict[str, str]:
|
||||
|
||||
@@ -97,7 +97,7 @@ class TavilySearchAPIWrapper(BaseModel):
|
||||
content: The content of the result.
|
||||
score: The score of the result.
|
||||
raw_content: The raw content of the result.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
raw_search_results = self.raw_results(
|
||||
query,
|
||||
max_results=max_results,
|
||||
|
||||
@@ -40,7 +40,7 @@ class TwilioAPIWrapper(BaseModel):
|
||||
Twilio also work here. You cannot, for example, spoof messages from a private
|
||||
cell phone number. If you are using `messaging_service_sid`, this parameter
|
||||
must be empty.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
@@ -77,6 +77,6 @@ class TwilioAPIWrapper(BaseModel):
|
||||
SMS/MMS or
|
||||
[Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses)
|
||||
for other 3rd-party channels.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
message = self.client.messages.create(to, from_=self.from_number, body=body)
|
||||
return message.sid
|
||||
|
||||
@@ -182,7 +182,7 @@ if TYPE_CHECKING:
|
||||
OpenSearchVectorSearch,
|
||||
)
|
||||
from langchain_community.vectorstores.oraclevs import (
|
||||
OracleVS, # noqa: F401
|
||||
OracleVS,
|
||||
)
|
||||
from langchain_community.vectorstores.pathway import (
|
||||
PathwayVectorClient,
|
||||
@@ -448,7 +448,7 @@ _module_lookup = {
|
||||
"MyScale": "langchain_community.vectorstores.myscale",
|
||||
"MyScaleSettings": "langchain_community.vectorstores.myscale",
|
||||
"Neo4jVector": "langchain_community.vectorstores.neo4j_vector",
|
||||
"NeuralDBClientVectorStore": "langchain_community.vectorstores.thirdai_neuraldb", # noqa: E501
|
||||
"NeuralDBClientVectorStore": "langchain_community.vectorstores.thirdai_neuraldb",
|
||||
"NeuralDBVectorStore": "langchain_community.vectorstores.thirdai_neuraldb",
|
||||
"OpenSearchVectorSearch": "langchain_community.vectorstores.opensearch_vector_search", # noqa: E501
|
||||
"OracleVS": "langchain_community.vectorstores.oraclevs",
|
||||
|
||||
@@ -431,7 +431,7 @@ class Jaguar(VectorStore):
|
||||
|
||||
def prt(self, msg: str) -> None:
|
||||
with open("/tmp/debugjaguar.log", "a") as file:
|
||||
print(f"msg={msg}", file=file, flush=True) # noqa: T201
|
||||
print(f"msg={msg}", file=file, flush=True)
|
||||
|
||||
def _parseMeta(self, nvmap: dict, filecol: str) -> Tuple[List[str], List[str], str]:
|
||||
filepath = ""
|
||||
|
||||
@@ -397,7 +397,7 @@ class MomentoVectorIndex(VectorStore):
|
||||
)
|
||||
selected = [response.hits[i].metadata for i in mmr_selected]
|
||||
return [
|
||||
Document(page_content=metadata.pop(self.text_field, ""), metadata=metadata) # type: ignore # noqa: E501
|
||||
Document(page_content=metadata.pop(self.text_field, ""), metadata=metadata) # type: ignore
|
||||
for metadata in selected
|
||||
]
|
||||
|
||||
|
||||
@@ -1681,7 +1681,7 @@ class Qdrant(VectorStore):
|
||||
f"Existing Qdrant collection {collection_name} uses named vectors. "
|
||||
f"If you want to reuse it, please set `vector_name` to any of the "
|
||||
f"existing named vectors: "
|
||||
f"{', '.join(current_vector_config.keys())}." # noqa
|
||||
f"{', '.join(current_vector_config.keys())}."
|
||||
f"If you want to recreate the collection, set `force_recreate` "
|
||||
f"parameter to `True`."
|
||||
)
|
||||
@@ -1846,7 +1846,7 @@ class Qdrant(VectorStore):
|
||||
f"Existing Qdrant collection {collection_name} uses named vectors. "
|
||||
f"If you want to reuse it, please set `vector_name` to any of the "
|
||||
f"existing named vectors: "
|
||||
f"{', '.join(current_vector_config.keys())}." # noqa
|
||||
f"{', '.join(current_vector_config.keys())}."
|
||||
f"If you want to recreate the collection, set `force_recreate` "
|
||||
f"parameter to `True`."
|
||||
)
|
||||
|
||||
@@ -383,7 +383,7 @@ class Redis(VectorStore):
|
||||
|
||||
# type check for metadata
|
||||
if metadatas:
|
||||
if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore # noqa: E501
|
||||
if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore
|
||||
raise ValueError("Number of metadatas must match number of texts")
|
||||
if not (isinstance(metadatas, list) and isinstance(metadatas[0], dict)):
|
||||
raise ValueError("Metadatas must be a list of dicts")
|
||||
@@ -704,7 +704,7 @@ class Redis(VectorStore):
|
||||
|
||||
# type check for metadata
|
||||
if metadatas:
|
||||
if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore # noqa: E501
|
||||
if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore
|
||||
raise ValueError("Number of metadatas must match number of texts")
|
||||
if not (isinstance(metadatas, list) and isinstance(metadatas[0], dict)):
|
||||
raise ValueError("Metadatas must be a list of dicts")
|
||||
@@ -832,7 +832,7 @@ class Redis(VectorStore):
|
||||
# Perform vector search
|
||||
# ignore type because redis-py is wrong about bytes
|
||||
try:
|
||||
results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore # noqa: E501
|
||||
results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore
|
||||
except redis.exceptions.ResponseError as e:
|
||||
# split error message and see if it starts with "Syntax"
|
||||
if str(e).split(" ")[0] == "Syntax":
|
||||
@@ -947,7 +947,7 @@ class Redis(VectorStore):
|
||||
# Perform vector search
|
||||
# ignore type because redis-py is wrong about bytes
|
||||
try:
|
||||
results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore # noqa: E501
|
||||
results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore
|
||||
except redis.exceptions.ResponseError as e:
|
||||
# split error message and see if it starts with "Syntax"
|
||||
if str(e).split(" ")[0] == "Syntax":
|
||||
|
||||
@@ -66,7 +66,7 @@ class TimescaleVector(VectorStore):
|
||||
collection_name=COLLECTION_NAME,
|
||||
service_url=SERVICE_URL,
|
||||
)
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
||||
@@ -12,7 +12,7 @@ from langchain_community.document_loaders.tensorflow_datasets import (
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import tensorflow as tf # noqa: E402
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
def decode_to_str(item: tf.Tensor) -> str:
|
||||
|
||||
@@ -10,7 +10,7 @@ from langchain_core.pydantic_v1 import ValidationError
|
||||
from langchain_community.utilities.tensorflow_datasets import TensorflowDatasets
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import tensorflow as tf # noqa: E402
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
def decode_to_str(item: tf.Tensor) -> str:
|
||||
|
||||
@@ -7,7 +7,7 @@ from langchain_community.vectorstores import Qdrant
|
||||
from tests.integration_tests.vectorstores.fake_embeddings import (
|
||||
ConsistentFakeEmbeddings,
|
||||
)
|
||||
from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import ( # noqa
|
||||
from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import (
|
||||
qdrant_locations,
|
||||
)
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ class TestAzureCosmosDBVectorSearch:
|
||||
|
||||
# insure the test collection is empty
|
||||
collection = prepare_collection()
|
||||
assert collection.count_documents({}) == 0 # type: ignore[index] # noqa: E501
|
||||
assert collection.count_documents({}) == 0 # type: ignore[index]
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls) -> None:
|
||||
|
||||
@@ -70,7 +70,7 @@ class TestDocumentDBVectorSearch:
|
||||
|
||||
# insure the test collection is empty
|
||||
collection = prepare_collection()
|
||||
assert collection.count_documents({}) == 0 # type: ignore[index] # noqa: E501
|
||||
assert collection.count_documents({}) == 0 # type: ignore[index]
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls) -> None:
|
||||
|
||||
@@ -34,7 +34,7 @@ class TestMongoDBAtlasVectorSearch:
|
||||
def setup_class(cls) -> None:
|
||||
# insure the test collection is empty
|
||||
collection = get_collection()
|
||||
assert collection.count_documents({}) == 0 # type: ignore[index] # noqa: E501
|
||||
assert collection.count_documents({}) == 0 # type: ignore[index]
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls) -> None:
|
||||
|
||||
@@ -33,7 +33,7 @@ end"""
|
||||
@pytest.mark.skip(
|
||||
reason=(
|
||||
"Flakey. To be investigated. See "
|
||||
"https://github.com/langchain-ai/langchain/actions/runs/7907779756/job/21585580650." # noqa: E501
|
||||
"https://github.com/langchain-ai/langchain/actions/runs/7907779756/job/21585580650."
|
||||
)
|
||||
)
|
||||
def test_extract_functions_classes(self) -> None:
|
||||
@@ -46,7 +46,7 @@ end"""
|
||||
reason=(
|
||||
"Flakey. To be investigated. See "
|
||||
"https://github.com/langchain-ai/langchain/actions/runs/7923203031/job/21632416298?pr=17599 " # noqa: E501
|
||||
"and https://github.com/langchain-ai/langchain/actions/runs/7923784089/job/2163420864." # noqa: E501
|
||||
"and https://github.com/langchain-ai/langchain/actions/runs/7923784089/job/2163420864."
|
||||
)
|
||||
)
|
||||
def test_simplify_code(self) -> None:
|
||||
|
||||
@@ -42,7 +42,7 @@ def mock_response() -> requests.Response:
|
||||
def mock_response_stream(): # type: ignore[no-untyped-def]
|
||||
mock_response = deque(
|
||||
[
|
||||
b'data: {"content":"the","multimodal":false,"slot_id":0,"stop":false}\n\n', # noqa
|
||||
b'data: {"content":"the","multimodal":false,"slot_id":0,"stop":false}\n\n',
|
||||
b'data: {"content":" quick","multimodal":false,"slot_id":0,"stop":false}\n\n', # noqa
|
||||
]
|
||||
)
|
||||
|
||||
@@ -22,7 +22,7 @@ except ImportError:
|
||||
from langchain.globals import get_llm_cache, set_llm_cache
|
||||
from langchain_core.outputs import Generation, LLMResult
|
||||
|
||||
from langchain_community.cache import SQLAlchemyCache # noqa: E402
|
||||
from langchain_community.cache import SQLAlchemyCache
|
||||
from tests.unit_tests.llms.fake_llm import FakeLLM
|
||||
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import pytest
|
||||
|
||||
pytest.importorskip("langchain_community")
|
||||
|
||||
from langchain_community.document_transformers.embeddings_redundant_filter import ( # noqa: E402,E501
|
||||
from langchain_community.document_transformers.embeddings_redundant_filter import ( # noqa: E402
|
||||
_filter_similar_embeddings,
|
||||
)
|
||||
from langchain_community.utils.math import cosine_similarity # noqa: E402
|
||||
|
||||
@@ -110,7 +110,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
# Ignoring mypy re-assignment here since we're overriding the value
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment] # noqa: E501
|
||||
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment]
|
||||
|
||||
tool_call_chunks: List[ToolCallChunk] = []
|
||||
"""If provided, tool call chunks associated with the message."""
|
||||
|
||||
@@ -28,7 +28,7 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk):
|
||||
# Ignoring mypy re-assignment here since we're overriding the value
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment] # noqa: E501
|
||||
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment]
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
|
||||
@@ -25,7 +25,7 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
|
||||
# Ignoring mypy re-assignment here since we're overriding the value
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment] # noqa: E501
|
||||
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
|
||||
@@ -205,7 +205,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
else:
|
||||
fn_name = _result["name"]
|
||||
_args = _result["arguments"]
|
||||
pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore # noqa: E501
|
||||
pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore
|
||||
return pydantic_args
|
||||
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ class ChatGenerationChunk(ChatGeneration):
|
||||
|
||||
message: BaseMessageChunk
|
||||
# Override type to be ChatGeneration, ignore mypy error as this is intentional
|
||||
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment] # noqa: E501
|
||||
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment]
|
||||
"""Type is used exclusively for serialization purposes."""
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -431,7 +431,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
||||
if isinstance(tmpl, str):
|
||||
text: str = tmpl
|
||||
else:
|
||||
text = cast(_TextTemplateParam, tmpl)["text"] # type: ignore[assignment] # noqa: E501
|
||||
text = cast(_TextTemplateParam, tmpl)["text"] # type: ignore[assignment]
|
||||
prompt.append(
|
||||
PromptTemplate.from_template(
|
||||
text, template_format=template_format
|
||||
|
||||
@@ -517,7 +517,7 @@ class Runnable(Generic[Input, Output], ABC):
|
||||
json_and_bytes_chain.invoke("[1, 2, 3]")
|
||||
# -> {"json": [1, 2, 3], "bytes": b"[1, 2, 3]"}
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
from langchain_core.runnables.passthrough import RunnablePick
|
||||
|
||||
return self | RunnablePick(keys)
|
||||
|
||||
@@ -244,8 +244,8 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str:
|
||||
|
||||
# NOTE: coordinates might me negative, so we need to shift
|
||||
# everything to the positive plane before we actually draw it.
|
||||
Xs = [] # noqa: N806
|
||||
Ys = [] # noqa: N806
|
||||
Xs = []
|
||||
Ys = []
|
||||
|
||||
sug = _build_sugiyama_layout(vertices, edges)
|
||||
|
||||
|
||||
@@ -214,7 +214,7 @@ class RunnableWithMessageHistory(RunnableBindingBase):
|
||||
config={"configurable": {"user_id": "123", "conversation_id": "1"}}
|
||||
)
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
get_session_history: GetSessionHistoryCallable
|
||||
input_messages_key: Optional[str] = None
|
||||
@@ -297,7 +297,7 @@ class RunnableWithMessageHistory(RunnableBindingBase):
|
||||
into the get_session_history factory.
|
||||
**kwargs: Arbitrary additional kwargs to pass to parent class
|
||||
``RunnableBindingBase`` init.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
history_chain: Runnable = RunnableLambda(
|
||||
self._enter_history, self._aenter_history
|
||||
).with_config(run_name="load_history")
|
||||
|
||||
@@ -33,10 +33,10 @@ if TYPE_CHECKING:
|
||||
tracing_callback_var: Any = None
|
||||
tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar(
|
||||
"tracing_callback_v2", default=None
|
||||
) # noqa: E501
|
||||
)
|
||||
run_collector_var: ContextVar[Optional[RunCollectorCallbackHandler]] = ContextVar(
|
||||
"run_collector", default=None
|
||||
) # noqa: E501
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
|
||||
@@ -38,6 +38,6 @@ def print_text(
|
||||
) -> None:
|
||||
"""Print text with highlighting and no end characters."""
|
||||
text_to_print = get_colored_text(text, color) if color else text
|
||||
print(text_to_print, end=end, file=file) # noqa: T201
|
||||
print(text_to_print, end=end, file=file)
|
||||
if file:
|
||||
file.flush() # ensure all printed content are written to file
|
||||
|
||||
@@ -42,7 +42,7 @@ def test_indent_lines_after_first(text: str, prefix: str, expected_output: str)
|
||||
|
||||
|
||||
def test_nonlocals() -> None:
|
||||
agent = RunnableLambda(lambda x: x * 2) # noqa: F841
|
||||
agent = RunnableLambda(lambda x: x * 2)
|
||||
|
||||
def my_func(input: str, agent: Dict[str, str]) -> str:
|
||||
return agent.get("agent_name", input)
|
||||
|
||||
@@ -143,7 +143,7 @@ def test_chat_message_chunks() -> None:
|
||||
role="User", content=" indeed."
|
||||
) == AIMessageChunk(
|
||||
content="I am indeed."
|
||||
), "Other MessageChunk + ChatMessageChunk should be a MessageChunk as the left side" # noqa: E501
|
||||
), "Other MessageChunk + ChatMessageChunk should be a MessageChunk as the left side"
|
||||
|
||||
|
||||
def test_function_message_chunks() -> None:
|
||||
|
||||
@@ -626,7 +626,7 @@ def test_exception_handling_callable() -> None:
|
||||
expected = "foo bar"
|
||||
|
||||
def handling(e: ToolException) -> str:
|
||||
return expected # noqa: E731
|
||||
return expected
|
||||
|
||||
_tool = _FakeExceptionTool(handle_tool_error=handling)
|
||||
actual = _tool.run({})
|
||||
@@ -657,7 +657,7 @@ async def test_async_exception_handling_callable() -> None:
|
||||
expected = "foo bar"
|
||||
|
||||
def handling(e: ToolException) -> str:
|
||||
return expected # noqa: E731
|
||||
return expected
|
||||
|
||||
_tool = _FakeExceptionTool(handle_tool_error=handling)
|
||||
actual = await _tool.arun({})
|
||||
@@ -723,7 +723,7 @@ def test_validation_error_handling_callable() -> None:
|
||||
expected = "foo bar"
|
||||
|
||||
def handling(e: ValidationError) -> str:
|
||||
return expected # noqa: E731
|
||||
return expected
|
||||
|
||||
_tool = _MockStructuredTool(handle_validation_error=handling)
|
||||
actual = _tool.run({})
|
||||
@@ -785,7 +785,7 @@ async def test_async_validation_error_handling_callable() -> None:
|
||||
expected = "foo bar"
|
||||
|
||||
def handling(e: ValidationError) -> str:
|
||||
return expected # noqa: E731
|
||||
return expected
|
||||
|
||||
_tool = _MockStructuredTool(handle_validation_error=handling)
|
||||
actual = await _tool.arun({})
|
||||
|
||||
@@ -6,7 +6,7 @@ a language model is used as a reasoning engine to determine which actions
|
||||
to take and in which order.
|
||||
|
||||
Agents select and use **Tools** and **Toolkits** for actions.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
from langchain_experimental.agents.agent_toolkits import (
|
||||
create_csv_agent,
|
||||
create_pandas_dataframe_agent,
|
||||
|
||||
@@ -220,7 +220,7 @@ def create_pandas_dataframe_agent(
|
||||
verbose=True
|
||||
)
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
try:
|
||||
if engine == "modin":
|
||||
import modin.pandas as pd
|
||||
|
||||
@@ -149,7 +149,7 @@ class Llama2Chat(ChatWrapper):
|
||||
|
||||
|
||||
class Mixtral(ChatWrapper):
|
||||
"""See https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1#instruction-format""" # noqa: E501
|
||||
"""See https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1#instruction-format"""
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
|
||||
@@ -88,7 +88,7 @@ class BashProcess:
|
||||
|
||||
Args:
|
||||
Prompt(str): the bash command to execute
|
||||
""" # noqa: E501
|
||||
"""
|
||||
pexpect = self._lazy_import_pexpect()
|
||||
process = pexpect.spawn(
|
||||
"env", ["-i", "bash", "--norc", "--noprofile"], encoding="utf-8"
|
||||
@@ -107,7 +107,7 @@ class BashProcess:
|
||||
Args:
|
||||
commands(List[str]): a list of commands to
|
||||
execute in the session
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if isinstance(commands, str):
|
||||
commands = [commands]
|
||||
commands = ";".join(commands)
|
||||
@@ -125,7 +125,7 @@ class BashProcess:
|
||||
|
||||
Args:
|
||||
command: The command to run
|
||||
""" # noqa: E501
|
||||
"""
|
||||
try:
|
||||
output = subprocess.run(
|
||||
command,
|
||||
@@ -149,7 +149,7 @@ class BashProcess:
|
||||
Args:
|
||||
output: a process' output string
|
||||
command: the executed command
|
||||
""" # noqa: E501
|
||||
"""
|
||||
pattern = re.escape(command) + r"\s*\n"
|
||||
output = re.sub(pattern, "", output, count=1)
|
||||
return output.strip()
|
||||
@@ -161,7 +161,7 @@ class BashProcess:
|
||||
|
||||
Args:
|
||||
command: the command to execute
|
||||
""" # noqa: E501
|
||||
"""
|
||||
pexpect = self._lazy_import_pexpect()
|
||||
if self.process is None:
|
||||
raise ValueError("Process not initialized")
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
See the paper: https://arxiv.org/pdf/2211.10435.pdf.
|
||||
|
||||
This chain is vulnerable to [arbitrary code execution](https://github.com/langchain-ai/langchain/issues/5872).
|
||||
""" # noqa: E501
|
||||
"""
|
||||
from langchain_experimental.pal_chain.base import PALChain
|
||||
|
||||
__all__ = ["PALChain"]
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
[Amazon Personalize](https://docs.aws.amazon.com/personalize/latest/dg/what-is-personalize.html)
|
||||
is a fully managed machine learning service that uses your data to generate
|
||||
item recommendations for your users.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
from langchain_experimental.recommenders.amazon_personalize import AmazonPersonalize
|
||||
from langchain_experimental.recommenders.amazon_personalize_chain import (
|
||||
AmazonPersonalizeChain,
|
||||
|
||||
@@ -475,7 +475,7 @@ class RLChain(Chain, Generic[TEvent]):
|
||||
def save_progress(self) -> None:
|
||||
"""
|
||||
This function should be called to save the state of the learned policy model.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
self.active_policy.save()
|
||||
|
||||
def _validate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
|
||||
@@ -25,7 +25,7 @@ def test_prompt(model: Mixtral) -> None:
|
||||
|
||||
actual = model.invoke(messages).content # type: ignore
|
||||
expected = (
|
||||
"<s>[INST] sys-msg\nusr-msg-1 [/INST] ai-msg-1 </s> [INST] usr-msg-2 [/INST]" # noqa: E501
|
||||
"<s>[INST] sys-msg\nusr-msg-1 [/INST] ai-msg-1 </s> [INST] usr-msg-2 [/INST]"
|
||||
)
|
||||
|
||||
assert actual == expected
|
||||
|
||||
@@ -78,7 +78,7 @@ string = "racecar"
|
||||
if string == string[::-1]:
|
||||
print(string, "is a palindrome") # noqa: T201
|
||||
else:
|
||||
print(string, "is not a palindrome")""" # noqa: T201
|
||||
print(string, "is not a palindrome")"""
|
||||
tool = PythonAstREPLTool()
|
||||
assert tool.run(program) == "racecar is a palindrome\n"
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, List, Optional # noqa: E501
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.memory import BaseMemory
|
||||
|
||||
@@ -33,8 +33,8 @@ class LoggingCallbackHandler(FunctionCallbackHandler):
|
||||
text: str,
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None, # noqa: ARG002
|
||||
**kwargs: Any, # noqa: ARG002
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
try:
|
||||
crumbs_str = f"[{self.get_breadcrumbs(run=self._get_run(run_id=run_id))}] "
|
||||
|
||||
@@ -76,7 +76,7 @@ def _openapi_params_to_json_schema(params: List[Parameter], spec: OpenAPISpec) -
|
||||
if p.param_schema:
|
||||
schema = spec.get_schema(p.param_schema)
|
||||
else:
|
||||
media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore # noqa: E501
|
||||
media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore
|
||||
schema = spec.get_schema(media_type_schema)
|
||||
if p.description and not schema.description:
|
||||
schema.description = p.description
|
||||
@@ -237,7 +237,7 @@ class SimpleRequestChain(Chain):
|
||||
else:
|
||||
try:
|
||||
response = api_response.json()
|
||||
except Exception: # noqa: E722
|
||||
except Exception:
|
||||
response = api_response.text
|
||||
return {self.output_key: response}
|
||||
|
||||
@@ -280,7 +280,7 @@ def get_openapi_chain(
|
||||
break
|
||||
except ImportError as e:
|
||||
raise e
|
||||
except Exception: # noqa: E722
|
||||
except Exception:
|
||||
pass
|
||||
if isinstance(spec, str):
|
||||
raise ValueError(f"Unable to parse spec from source {spec}")
|
||||
|
||||
@@ -141,7 +141,7 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain):
|
||||
)
|
||||
print(result["score"]) # noqa: T201
|
||||
# 0
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
agent_tools: Optional[List[BaseTool]] = None
|
||||
"""A list of tools available to the agent."""
|
||||
|
||||
@@ -142,7 +142,7 @@ def resolve_criteria(
|
||||
>>> criterion = "relevance"
|
||||
>>> CriteriaEvalChain.resolve_criteria(criteria)
|
||||
{'relevance': 'Is the submission referring to a real quote from the text?'}
|
||||
""" # noqa: E501
|
||||
"""
|
||||
if criteria is None:
|
||||
return {
|
||||
"helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS],
|
||||
@@ -307,7 +307,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
|
||||
>>> criterion = "relevance"
|
||||
>>> CriteriaEvalChain.resolve_criteria(criteria)
|
||||
{'relevance': 'Is the submission referring to a real quote from the text?'}
|
||||
""" # noqa: E501
|
||||
"""
|
||||
return resolve_criteria(criteria)
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -56,7 +56,7 @@ def load_dataset(uri: str) -> List[Dict]:
|
||||
|
||||
from langchain.evaluation import load_dataset
|
||||
ds = load_dataset("llm-math")
|
||||
""" # noqa: E501
|
||||
"""
|
||||
try:
|
||||
from datasets import load_dataset
|
||||
except ImportError:
|
||||
|
||||
@@ -439,7 +439,7 @@ class LabeledScoreStringEvalChain(ScoreStringEvalChain):
|
||||
Raises:
|
||||
ValueError: If the input variables are not as expected.
|
||||
|
||||
""" # noqa: E501
|
||||
"""
|
||||
expected_input_vars = {
|
||||
"prediction",
|
||||
"input",
|
||||
|
||||
@@ -133,7 +133,7 @@ class RunEvalConfig(BaseModel):
|
||||
as `EvaluatorType.QA`, the evaluator type string ("qa"), or a configuration for a
|
||||
given evaluator
|
||||
(e.g.,
|
||||
:class:`RunEvalConfig.QA <langchain.smith.evaluation.config.RunEvalConfig.QA>`).""" # noqa: E501
|
||||
:class:`RunEvalConfig.QA <langchain.smith.evaluation.config.RunEvalConfig.QA>`)."""
|
||||
custom_evaluators: Optional[List[CUSTOM_EVALUATOR_TYPE]] = None
|
||||
"""Custom evaluators to apply to the dataset run."""
|
||||
batch_evaluators: Optional[List[BATCH_EVALUATOR_LIKE]] = None
|
||||
|
||||
@@ -225,7 +225,7 @@ def _wrap_in_chain_factory(
|
||||
return lambda: RunnableLambda(constructor)
|
||||
else:
|
||||
# Typical correct case
|
||||
return constructor # noqa
|
||||
return constructor
|
||||
return llm_or_chain_factory
|
||||
|
||||
|
||||
|
||||
@@ -165,5 +165,5 @@ def test_anthropic_complex_structured_output() -> None:
|
||||
{
|
||||
"email": "From: Erick. The email is about the new project. The tone is positive. The action items are to send the report and to schedule a meeting." # noqa: E501
|
||||
}
|
||||
) # noqa: E501
|
||||
)
|
||||
assert isinstance(response, Email)
|
||||
|
||||
@@ -51,7 +51,7 @@ class TestMongoDBAtlasVectorSearch:
|
||||
# insure the test collection is empty
|
||||
collection = get_collection()
|
||||
if collection.count_documents({}):
|
||||
collection.delete_many({}) # type: ignore[index] # noqa: E501
|
||||
collection.delete_many({}) # type: ignore[index]
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls) -> None:
|
||||
|
||||
@@ -46,7 +46,7 @@ class TestMongoDBAtlasVectorSearch:
|
||||
def setup_class(cls) -> None:
|
||||
# ensure the test collection is empty
|
||||
collection = get_collection()
|
||||
assert collection.count_documents({}) == 0 # type: ignore[index] # noqa: E501
|
||||
assert collection.count_documents({}) == 0 # type: ignore[index]
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls) -> None:
|
||||
|
||||
@@ -81,7 +81,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
|
||||
For more:
|
||||
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
|
||||
""" # noqa: E501
|
||||
"""
|
||||
azure_ad_token_provider: Union[Callable[[], str], None] = None
|
||||
"""A function that returns an Azure Active Directory token.
|
||||
|
||||
@@ -178,7 +178,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
'azure_endpoint="https://xxx.openai.azure.com/", '
|
||||
'azure_deployment="my-deployment"\n\n'
|
||||
"Or you can equivalently specify:\n\n"
|
||||
'base_url="https://xxx.openai.azure.com/openai/deployments/my-deployment"' # noqa: E501
|
||||
'base_url="https://xxx.openai.azure.com/openai/deployments/my-deployment"'
|
||||
)
|
||||
client_params = {
|
||||
"api_version": values["openai_api_version"],
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user