mirror of
https://github.com/hwchase17/langchain.git
synced 2026-01-04 23:47:36 +00:00
infra: add print rule to ruff (#16221)
Added noqa for existing prints. Can slowly remove / will prevent more being intro'd
This commit is contained in:
2
.github/scripts/check_diff.py
vendored
2
.github/scripts/check_diff.py
vendored
@@ -47,4 +47,4 @@ if __name__ == "__main__":
|
||||
else:
|
||||
pass
|
||||
json_output = json.dumps(list(dirs_to_run))
|
||||
print(f"dirs-to-run={json_output}")
|
||||
print(f"dirs-to-run={json_output}") # noqa: T201
|
||||
|
||||
4
.github/scripts/get_min_versions.py
vendored
4
.github/scripts/get_min_versions.py
vendored
@@ -62,4 +62,6 @@ toml_file = sys.argv[1]
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file)
|
||||
|
||||
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))
|
||||
print(
|
||||
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
|
||||
) # noqa: T201
|
||||
|
||||
@@ -7,4 +7,4 @@ ignore_words_list = (
|
||||
pyproject_toml.get("tool", {}).get("codespell", {}).get("ignore-words-list")
|
||||
)
|
||||
|
||||
print(f"::set-output name=ignore_words_list::{ignore_words_list}")
|
||||
print(f"::set-output name=ignore_words_list::{ignore_words_list}") # noqa: T201
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Script for auto-generating api_reference.rst."""
|
||||
|
||||
import importlib
|
||||
import inspect
|
||||
import os
|
||||
@@ -186,7 +187,7 @@ def _load_package_modules(
|
||||
modules_by_namespace[top_namespace] = _module_members
|
||||
|
||||
except ImportError as e:
|
||||
print(f"Error: Unable to import module '{namespace}' with error: {e}")
|
||||
print(f"Error: Unable to import module '{namespace}' with error: {e}") # noqa: T201
|
||||
|
||||
return modules_by_namespace
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ class MyClass:
|
||||
self.name = name
|
||||
|
||||
def greet(self):
|
||||
print(f"Hello, {self.name}!")
|
||||
print(f"Hello, {self.name}!") # noqa: T201
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -64,7 +64,7 @@ def main():
|
||||
global_imports = {}
|
||||
|
||||
for file in find_files(args.docs_dir):
|
||||
print(f"Adding links for imports in {file}")
|
||||
print(f"Adding links for imports in {file}") # noqa: T201
|
||||
file_imports = replace_imports(file)
|
||||
|
||||
if file_imports:
|
||||
|
||||
@@ -58,6 +58,7 @@ select = [
|
||||
"E", # pycodestyle
|
||||
"F", # pyflakes
|
||||
"I", # isort
|
||||
"T201", # print
|
||||
]
|
||||
|
||||
[tool.mypy]
|
||||
|
||||
@@ -10,8 +10,8 @@ if __name__ == "__main__":
|
||||
SourceFileLoader("x", file).load_module()
|
||||
except Exception:
|
||||
has_faillure = True
|
||||
print(file)
|
||||
print(file) # noqa: T201
|
||||
traceback.print_exc()
|
||||
print()
|
||||
print() # noqa: T201
|
||||
|
||||
sys.exit(1 if has_failure else 0)
|
||||
|
||||
@@ -45,6 +45,7 @@ select = [
|
||||
"E", # pycodestyle
|
||||
"F", # pyflakes
|
||||
"I", # isort
|
||||
"T201", # print
|
||||
]
|
||||
|
||||
[tool.poe.tasks]
|
||||
|
||||
@@ -49,7 +49,7 @@ class ArizeCallbackHandler(BaseCallbackHandler):
|
||||
if SPACE_KEY == "SPACE_KEY" or API_KEY == "API_KEY":
|
||||
raise ValueError("❌ CHANGE SPACE AND API KEYS")
|
||||
else:
|
||||
print("✅ Arize client setup done! Now you can start using Arize!")
|
||||
print("✅ Arize client setup done! Now you can start using Arize!") # noqa: T201
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
@@ -161,9 +161,9 @@ class ArizeCallbackHandler(BaseCallbackHandler):
|
||||
environment=Environments.PRODUCTION,
|
||||
)
|
||||
if response_from_arize.status_code == 200:
|
||||
print("✅ Successfully logged data to Arize!")
|
||||
print("✅ Successfully logged data to Arize!") # noqa: T201
|
||||
else:
|
||||
print(f'❌ Logging failed "{response_from_arize.text}"')
|
||||
print(f'❌ Logging failed "{response_from_arize.text}"') # noqa: T201
|
||||
|
||||
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
|
||||
"""Do nothing."""
|
||||
|
||||
@@ -509,8 +509,8 @@ class ClearMLCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
target_filename=name,
|
||||
)
|
||||
except NotImplementedError as e:
|
||||
print("Could not save model.")
|
||||
print(repr(e))
|
||||
print("Could not save model.") # noqa: T201
|
||||
print(repr(e)) # noqa: T201
|
||||
pass
|
||||
|
||||
# Cleanup after adding everything to ClearML
|
||||
|
||||
@@ -116,13 +116,13 @@ class DeepEvalCallbackHandler(BaseCallbackHandler):
|
||||
output=output,
|
||||
query=query,
|
||||
)
|
||||
print(f"Answer Relevancy: {result}")
|
||||
print(f"Answer Relevancy: {result}") # noqa: T201
|
||||
elif isinstance(metric, UnBiasedMetric):
|
||||
score = metric.measure(output)
|
||||
print(f"Bias Score: {score}")
|
||||
print(f"Bias Score: {score}") # noqa: T201
|
||||
elif isinstance(metric, NonToxicMetric):
|
||||
score = metric.measure(output)
|
||||
print(f"Toxic Score: {score}")
|
||||
print(f"Toxic Score: {score}") # noqa: T201
|
||||
else:
|
||||
raise ValueError(
|
||||
f"""Metric {metric.__name__} is not supported by deepeval
|
||||
|
||||
@@ -86,7 +86,7 @@ class InfinoCallbackHandler(BaseCallbackHandler):
|
||||
},
|
||||
}
|
||||
if self.verbose:
|
||||
print(f"Tracking {key} with Infino: {payload}")
|
||||
print(f"Tracking {key} with Infino: {payload}") # noqa: T201
|
||||
|
||||
# Append to Infino time series only if is_ts is True, otherwise
|
||||
# append to Infino log.
|
||||
@@ -245,7 +245,7 @@ class InfinoCallbackHandler(BaseCallbackHandler):
|
||||
self._send_to_infino("prompt_tokens", prompt_tokens)
|
||||
|
||||
if self.verbose:
|
||||
print(
|
||||
print( # noqa: T201
|
||||
f"on_chat_model_start: is_chat_openai_model= \
|
||||
{self.is_chat_openai_model}, \
|
||||
chat_openai_model_name={self.chat_openai_model_name}"
|
||||
|
||||
@@ -646,9 +646,11 @@ class MlflowCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
{
|
||||
"page_content": doc.page_content,
|
||||
"metadata": {
|
||||
k: str(v)
|
||||
if not isinstance(v, list)
|
||||
else ",".join(str(x) for x in v)
|
||||
k: (
|
||||
str(v)
|
||||
if not isinstance(v, list)
|
||||
else ",".join(str(x) for x in v)
|
||||
)
|
||||
for k, v in doc.metadata.items()
|
||||
},
|
||||
}
|
||||
@@ -757,15 +759,15 @@ class MlflowCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
langchain_asset.save_agent(langchain_asset_path)
|
||||
self.mlflg.artifact(langchain_asset_path)
|
||||
except AttributeError:
|
||||
print("Could not save model.")
|
||||
print("Could not save model.") # noqa: T201
|
||||
traceback.print_exc()
|
||||
pass
|
||||
except NotImplementedError:
|
||||
print("Could not save model.")
|
||||
print("Could not save model.") # noqa: T201
|
||||
traceback.print_exc()
|
||||
pass
|
||||
except NotImplementedError:
|
||||
print("Could not save model.")
|
||||
print("Could not save model.") # noqa: T201
|
||||
traceback.print_exc()
|
||||
pass
|
||||
if finish:
|
||||
|
||||
@@ -558,8 +558,8 @@ class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
model_artifact.add_file(str(langchain_asset_path))
|
||||
model_artifact.metadata = load_json_to_dict(langchain_asset_path)
|
||||
except NotImplementedError as e:
|
||||
print("Could not save model.")
|
||||
print(repr(e))
|
||||
print("Could not save model.") # noqa: T201
|
||||
print(repr(e)) # noqa: T201
|
||||
pass
|
||||
self.run.log_artifact(model_artifact)
|
||||
|
||||
@@ -577,7 +577,9 @@ class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
name=name if name else self.name,
|
||||
notes=notes if notes else self.notes,
|
||||
visualize=visualize if visualize else self.visualize,
|
||||
complexity_metrics=complexity_metrics
|
||||
if complexity_metrics
|
||||
else self.complexity_metrics,
|
||||
complexity_metrics=(
|
||||
complexity_metrics
|
||||
if complexity_metrics
|
||||
else self.complexity_metrics
|
||||
),
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ class RocksetChatMessageHistory(BaseChatMessageHistory):
|
||||
history.add_user_message("hi!")
|
||||
history.add_ai_message("whats up?")
|
||||
|
||||
print(history.messages)
|
||||
print(history.messages) # noqa: T201
|
||||
"""
|
||||
|
||||
# You should set these values based on your VI.
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""deepinfra.com chat models wrapper"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
@@ -207,7 +208,7 @@ class ChatDeepInfra(BaseChatModel):
|
||||
return response
|
||||
except Exception as e:
|
||||
# import pdb; pdb.set_trace()
|
||||
print("EX", e)
|
||||
print("EX", e) # noqa: T201
|
||||
raise
|
||||
|
||||
return _completion_with_retry(**kwargs)
|
||||
@@ -231,7 +232,7 @@ class ChatDeepInfra(BaseChatModel):
|
||||
self._handle_status(response.status, response.text)
|
||||
return await response.json()
|
||||
except Exception as e:
|
||||
print("EX", e)
|
||||
print("EX", e) # noqa: T201
|
||||
raise
|
||||
|
||||
return await _completion_with_retry(**kwargs)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""ChatModel wrapper which returns user input as the response.."""
|
||||
|
||||
from io import StringIO
|
||||
from typing import Any, Callable, Dict, List, Mapping, Optional
|
||||
|
||||
@@ -30,9 +31,9 @@ def _display_messages(messages: List[BaseMessage]) -> None:
|
||||
width=10000,
|
||||
line_break=None,
|
||||
)
|
||||
print("\n", "======= start of message =======", "\n\n")
|
||||
print(yaml_string)
|
||||
print("======= end of message =======", "\n\n")
|
||||
print("\n", "======= start of message =======", "\n\n") # noqa: T201
|
||||
print(yaml_string) # noqa: T201
|
||||
print("======= end of message =======", "\n\n") # noqa: T201
|
||||
|
||||
|
||||
def _collect_yaml_input(
|
||||
|
||||
@@ -150,7 +150,7 @@ class AssemblyAIAudioLoaderById(BaseLoader):
|
||||
)
|
||||
transcript_response.raise_for_status()
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
print(f"An error occurred: {e}") # noqa: T201
|
||||
raise
|
||||
|
||||
transcript = transcript_response.json()["text"]
|
||||
@@ -166,7 +166,7 @@ class AssemblyAIAudioLoaderById(BaseLoader):
|
||||
)
|
||||
paragraphs_response.raise_for_status()
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
print(f"An error occurred: {e}") # noqa: T201
|
||||
raise
|
||||
|
||||
paragraphs = paragraphs_response.json()["paragraphs"]
|
||||
@@ -181,7 +181,7 @@ class AssemblyAIAudioLoaderById(BaseLoader):
|
||||
)
|
||||
sentences_response.raise_for_status()
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
print(f"An error occurred: {e}") # noqa: T201
|
||||
raise
|
||||
|
||||
sentences = sentences_response.json()["sentences"]
|
||||
@@ -196,7 +196,7 @@ class AssemblyAIAudioLoaderById(BaseLoader):
|
||||
)
|
||||
srt_response.raise_for_status()
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
print(f"An error occurred: {e}") # noqa: T201
|
||||
raise
|
||||
|
||||
srt = srt_response.text
|
||||
@@ -211,7 +211,7 @@ class AssemblyAIAudioLoaderById(BaseLoader):
|
||||
)
|
||||
vtt_response.raise_for_status()
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
print(f"An error occurred: {e}") # noqa: T201
|
||||
raise
|
||||
|
||||
vtt = vtt_response.text
|
||||
|
||||
@@ -109,13 +109,13 @@ class BlackboardLoader(WebBaseLoader):
|
||||
documents = []
|
||||
for path in relative_paths:
|
||||
url = self.base_url + path
|
||||
print(f"Fetching documents from {url}")
|
||||
print(f"Fetching documents from {url}") # noqa: T201
|
||||
soup_info = self._scrape(url)
|
||||
with contextlib.suppress(ValueError):
|
||||
documents.extend(self._get_documents(soup_info))
|
||||
return documents
|
||||
else:
|
||||
print(f"Fetching documents from {self.web_path}")
|
||||
print(f"Fetching documents from {self.web_path}") # noqa: T201
|
||||
soup_info = self.scrape()
|
||||
self.folder_path = self._get_folder_path(soup_info)
|
||||
return self._get_documents(soup_info)
|
||||
@@ -295,4 +295,4 @@ if __name__ == "__main__":
|
||||
load_all_recursively=True,
|
||||
)
|
||||
documents = loader.load()
|
||||
print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}")
|
||||
print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}") # noqa: T201
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Use to load blobs from the local file system."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Callable, Iterable, Iterator, Optional, Sequence, TypeVar, Union
|
||||
|
||||
@@ -46,7 +47,7 @@ class FileSystemBlobLoader(BlobLoader):
|
||||
from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader
|
||||
loader = FileSystemBlobLoader("/path/to/directory")
|
||||
for blob in loader.yield_blobs():
|
||||
print(blob)
|
||||
print(blob) # noqa: T201
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(
|
||||
|
||||
@@ -564,7 +564,7 @@ class ConfluenceLoader(BaseLoader):
|
||||
texts.append(text)
|
||||
except requests.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
print(f"Attachment not found at {absolute_url}")
|
||||
print(f"Attachment not found at {absolute_url}") # noqa: T201
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
|
||||
@@ -121,7 +121,7 @@ class DropboxLoader(BaseLoader, BaseModel):
|
||||
file_extension = os.path.splitext(file_path)[1].lower()
|
||||
|
||||
if file_extension == ".pdf":
|
||||
print(f"File {file_path} type detected as .pdf")
|
||||
print(f"File {file_path} type detected as .pdf") # noqa: T201
|
||||
from langchain_community.document_loaders import UnstructuredPDFLoader
|
||||
|
||||
# Download it to a temporary file.
|
||||
@@ -136,10 +136,10 @@ class DropboxLoader(BaseLoader, BaseModel):
|
||||
if docs:
|
||||
return docs[0]
|
||||
except Exception as pdf_ex:
|
||||
print(f"Error while trying to parse PDF {file_path}: {pdf_ex}")
|
||||
print(f"Error while trying to parse PDF {file_path}: {pdf_ex}") # noqa: T201
|
||||
return None
|
||||
else:
|
||||
print(
|
||||
print( # noqa: T201
|
||||
f"File {file_path} could not be decoded as pdf or text. Skipping."
|
||||
)
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ class EtherscanLoader(BaseLoader):
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print("Error occurred while making the request:", e)
|
||||
print("Error occurred while making the request:", e) # noqa: T201
|
||||
items = response.json()["result"]
|
||||
result = []
|
||||
if len(items) == 0:
|
||||
@@ -94,7 +94,7 @@ class EtherscanLoader(BaseLoader):
|
||||
content = str(item)
|
||||
metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]}
|
||||
result.append(Document(page_content=content, metadata=metadata))
|
||||
print(len(result))
|
||||
print(len(result)) # noqa: T201
|
||||
return result
|
||||
|
||||
def getEthBalance(self) -> List[Document]:
|
||||
@@ -107,7 +107,7 @@ class EtherscanLoader(BaseLoader):
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print("Error occurred while making the request:", e)
|
||||
print("Error occurred while making the request:", e) # noqa: T201
|
||||
return [Document(page_content=response.json()["result"])]
|
||||
|
||||
def getInternalTx(self) -> List[Document]:
|
||||
@@ -122,7 +122,7 @@ class EtherscanLoader(BaseLoader):
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print("Error occurred while making the request:", e)
|
||||
print("Error occurred while making the request:", e) # noqa: T201
|
||||
items = response.json()["result"]
|
||||
result = []
|
||||
if len(items) == 0:
|
||||
@@ -145,7 +145,7 @@ class EtherscanLoader(BaseLoader):
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print("Error occurred while making the request:", e)
|
||||
print("Error occurred while making the request:", e) # noqa: T201
|
||||
items = response.json()["result"]
|
||||
result = []
|
||||
if len(items) == 0:
|
||||
@@ -168,7 +168,7 @@ class EtherscanLoader(BaseLoader):
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print("Error occurred while making the request:", e)
|
||||
print("Error occurred while making the request:", e) # noqa: T201
|
||||
items = response.json()["result"]
|
||||
result = []
|
||||
if len(items) == 0:
|
||||
@@ -191,7 +191,7 @@ class EtherscanLoader(BaseLoader):
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print("Error occurred while making the request:", e)
|
||||
print("Error occurred while making the request:", e) # noqa: T201
|
||||
items = response.json()["result"]
|
||||
result = []
|
||||
if len(items) == 0:
|
||||
|
||||
@@ -105,6 +105,6 @@ class GitLoader(BaseLoader):
|
||||
doc = Document(page_content=text_content, metadata=metadata)
|
||||
docs.append(doc)
|
||||
except Exception as e:
|
||||
print(f"Error reading file {file_path}: {e}")
|
||||
print(f"Error reading file {file_path}: {e}") # noqa: T201
|
||||
|
||||
return docs
|
||||
|
||||
@@ -216,9 +216,9 @@ class GoogleDriveLoader(BaseLoader, BaseModel):
|
||||
|
||||
except HttpError as e:
|
||||
if e.resp.status == 404:
|
||||
print("File not found: {}".format(id))
|
||||
print("File not found: {}".format(id)) # noqa: T201
|
||||
else:
|
||||
print("An error occurred: {}".format(e))
|
||||
print("An error occurred: {}".format(e)) # noqa: T201
|
||||
|
||||
text = fh.getvalue().decode("utf-8")
|
||||
metadata = {
|
||||
|
||||
@@ -25,7 +25,7 @@ class NucliaLoader(BaseLoader):
|
||||
return []
|
||||
obj = json.loads(data)
|
||||
text = obj["extracted_text"][0]["body"]["text"]
|
||||
print(text)
|
||||
print(text) # noqa: T201
|
||||
metadata = {
|
||||
"file": obj["file_extracted_data"][0],
|
||||
"metadata": obj["field_metadata"][0],
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Loads data from OneNote Notebooks"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterator, List, Optional
|
||||
|
||||
@@ -164,8 +165,8 @@ class OneNoteLoader(BaseLoader, BaseModel):
|
||||
authorization_request_url = client_instance.get_authorization_request_url(
|
||||
self._scopes
|
||||
)
|
||||
print("Visit the following url to give consent:")
|
||||
print(authorization_request_url)
|
||||
print("Visit the following url to give consent:") # noqa: T201
|
||||
print(authorization_request_url) # noqa: T201
|
||||
authorization_url = input("Paste the authenticated url here:\n")
|
||||
|
||||
authorization_code = authorization_url.split("code=")[1].split("&")[0]
|
||||
|
||||
@@ -64,7 +64,7 @@ class OpenAIWhisperParser(BaseBlobParser):
|
||||
file_obj.name = f"part_{split_number}.mp3"
|
||||
|
||||
# Transcribe
|
||||
print(f"Transcribing part {split_number + 1}!")
|
||||
print(f"Transcribing part {split_number + 1}!") # noqa: T201
|
||||
attempts = 0
|
||||
while attempts < 3:
|
||||
try:
|
||||
@@ -77,10 +77,10 @@ class OpenAIWhisperParser(BaseBlobParser):
|
||||
break
|
||||
except Exception as e:
|
||||
attempts += 1
|
||||
print(f"Attempt {attempts} failed. Exception: {str(e)}")
|
||||
print(f"Attempt {attempts} failed. Exception: {str(e)}") # noqa: T201
|
||||
time.sleep(5)
|
||||
else:
|
||||
print("Failed to transcribe after 3 attempts.")
|
||||
print("Failed to transcribe after 3 attempts.") # noqa: T201
|
||||
continue
|
||||
|
||||
yield Document(
|
||||
@@ -169,7 +169,7 @@ class OpenAIWhisperParserLocal(BaseBlobParser):
|
||||
rec_model = "openai/whisper-large"
|
||||
self.lang_model = lang_model if lang_model else rec_model
|
||||
|
||||
print("Using the following model: ", self.lang_model)
|
||||
print("Using the following model: ", self.lang_model) # noqa: T201
|
||||
|
||||
self.batch_size = batch_size
|
||||
|
||||
@@ -216,7 +216,7 @@ class OpenAIWhisperParserLocal(BaseBlobParser):
|
||||
file_obj = io.BytesIO(audio.export(format="mp3").read())
|
||||
|
||||
# Transcribe
|
||||
print(f"Transcribing part {blob.path}!")
|
||||
print(f"Transcribing part {blob.path}!") # noqa: T201
|
||||
|
||||
y, sr = librosa.load(file_obj, sr=16000)
|
||||
|
||||
|
||||
@@ -61,13 +61,13 @@ class VsdxParser(BaseBlobParser, ABC):
|
||||
)
|
||||
|
||||
if "visio/pages/pages.xml" not in zfile.namelist():
|
||||
print("WARNING - No pages.xml file found in {}".format(source))
|
||||
print("WARNING - No pages.xml file found in {}".format(source)) # noqa: T201
|
||||
return # type: ignore[return-value]
|
||||
if "visio/pages/_rels/pages.xml.rels" not in zfile.namelist():
|
||||
print("WARNING - No pages.xml.rels file found in {}".format(source))
|
||||
print("WARNING - No pages.xml.rels file found in {}".format(source)) # noqa: T201
|
||||
return # type: ignore[return-value]
|
||||
if "docProps/app.xml" not in zfile.namelist():
|
||||
print("WARNING - No app.xml file found in {}".format(source))
|
||||
print("WARNING - No app.xml file found in {}".format(source)) # noqa: T201
|
||||
return # type: ignore[return-value]
|
||||
|
||||
pagesxml_content: dict = xmltodict.parse(zfile.read("visio/pages/pages.xml"))
|
||||
|
||||
@@ -480,7 +480,7 @@ class MathpixPDFLoader(BasePDFLoader):
|
||||
# This indicates an error with the PDF processing
|
||||
raise ValueError("Unable to retrieve PDF from Mathpix")
|
||||
else:
|
||||
print(f"Status: {status}, waiting for processing to complete")
|
||||
print(f"Status: {status}, waiting for processing to complete") # noqa: T201
|
||||
time.sleep(5)
|
||||
raise TimeoutError
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ class SnowflakeLoader(BaseLoader):
|
||||
column_names = [column[0] for column in cur.description]
|
||||
query_result = [dict(zip(column_names, row)) for row in query_result]
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
print(f"An error occurred: {e}") # noqa: T201
|
||||
query_result = []
|
||||
finally:
|
||||
cur.close()
|
||||
@@ -110,7 +110,7 @@ class SnowflakeLoader(BaseLoader):
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
query_result = self._execute_query()
|
||||
if isinstance(query_result, Exception):
|
||||
print(f"An error occurred during the query: {query_result}")
|
||||
print(f"An error occurred during the query: {query_result}") # noqa: T201
|
||||
return []
|
||||
page_content_columns, metadata_columns = self._get_columns(query_result)
|
||||
if "*" in page_content_columns:
|
||||
|
||||
@@ -44,4 +44,4 @@ class TomlLoader(BaseLoader):
|
||||
)
|
||||
yield doc
|
||||
except tomli.TOMLDecodeError as e:
|
||||
print(f"Error parsing TOML file {file_path}: {e}")
|
||||
print(f"Error parsing TOML file {file_path}: {e}") # noqa: T201
|
||||
|
||||
@@ -79,14 +79,14 @@ class BaichuanTextEmbeddings(BaseModel, Embeddings):
|
||||
return [result.get("embedding", []) for result in sorted_embeddings]
|
||||
else:
|
||||
# Log error or handle unsuccessful response appropriately
|
||||
print(
|
||||
print( # noqa: T201
|
||||
f"""Error: Received status code {response.status_code} from
|
||||
embedding API"""
|
||||
)
|
||||
return None
|
||||
except Exception as e:
|
||||
# Log the exception or handle it as needed
|
||||
print(f"Exception occurred while trying to get embeddings: {str(e)}")
|
||||
print(f"Exception occurred while trying to get embeddings: {str(e)}") # noqa: T201
|
||||
return None
|
||||
|
||||
def embed_documents(self, texts: List[str]) -> Optional[List[List[float]]]: # type: ignore[override]
|
||||
|
||||
@@ -74,7 +74,7 @@ class JavelinAIGatewayEmbeddings(Embeddings, BaseModel):
|
||||
if "embedding" in item:
|
||||
embeddings.append(item["embedding"])
|
||||
except ValueError as e:
|
||||
print("Failed to query route: " + str(e))
|
||||
print("Failed to query route: " + str(e)) # noqa: T201
|
||||
|
||||
return embeddings
|
||||
|
||||
@@ -92,7 +92,7 @@ class JavelinAIGatewayEmbeddings(Embeddings, BaseModel):
|
||||
if "embedding" in item:
|
||||
embeddings.append(item["embedding"])
|
||||
except ValueError as e:
|
||||
print("Failed to query route: " + str(e))
|
||||
print("Failed to query route: " + str(e)) # noqa: T201
|
||||
|
||||
return embeddings
|
||||
|
||||
|
||||
@@ -284,4 +284,4 @@ class AlephAlpha(LLM):
|
||||
if __name__ == "__main__":
|
||||
aa = AlephAlpha()
|
||||
|
||||
print(aa("How are you?"))
|
||||
print(aa("How are you?")) # noqa: T201
|
||||
|
||||
@@ -187,7 +187,7 @@ class Beam(LLM):
|
||||
do_sample=True, pad_token_id=tokenizer.eos_token_id)
|
||||
output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
||||
|
||||
print(output)
|
||||
print(output) # noqa: T201
|
||||
return {{"text": output}}
|
||||
|
||||
"""
|
||||
|
||||
@@ -177,7 +177,7 @@ class DeepSparse(LLM):
|
||||
)
|
||||
for chunk in llm.stream("Tell me a joke",
|
||||
stop=["'","\n"]):
|
||||
print(chunk, end='', flush=True)
|
||||
print(chunk, end='', flush=True) # noqa: T201
|
||||
"""
|
||||
inference = self.pipeline(
|
||||
sequences=prompt, streaming=True, **self.generation_config
|
||||
@@ -215,7 +215,7 @@ class DeepSparse(LLM):
|
||||
)
|
||||
for chunk in llm.stream("Tell me a joke",
|
||||
stop=["'","\n"]):
|
||||
print(chunk, end='', flush=True)
|
||||
print(chunk, end='', flush=True) # noqa: T201
|
||||
"""
|
||||
inference = self.pipeline(
|
||||
sequences=prompt, streaming=True, **self.generation_config
|
||||
|
||||
@@ -33,7 +33,7 @@ class HuggingFaceTextGenInference(LLM):
|
||||
temperature=0.01,
|
||||
repetition_penalty=1.03,
|
||||
)
|
||||
print(llm("What is Deep Learning?"))
|
||||
print(llm("What is Deep Learning?")) # noqa: T201
|
||||
|
||||
# Streaming response example
|
||||
from langchain_community.callbacks import streaming_stdout
|
||||
@@ -50,7 +50,7 @@ class HuggingFaceTextGenInference(LLM):
|
||||
callbacks=callbacks,
|
||||
streaming=True
|
||||
)
|
||||
print(llm("What is Deep Learning?"))
|
||||
print(llm("What is Deep Learning?")) # noqa: T201
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ from langchain_community.llms.utils import enforce_stop_tokens
|
||||
|
||||
def _display_prompt(prompt: str) -> None:
|
||||
"""Displays the given prompt to the user."""
|
||||
print(f"\n{prompt}")
|
||||
print(f"\n{prompt}") # noqa: T201
|
||||
|
||||
|
||||
def _collect_user_input(
|
||||
|
||||
@@ -333,7 +333,7 @@ class LlamaCpp(LLM):
|
||||
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
|
||||
stop=["'","\n"]):
|
||||
result = chunk["choices"][0]
|
||||
print(result["text"], end='', flush=True)
|
||||
print(result["text"], end='', flush=True) # noqa: T201
|
||||
|
||||
"""
|
||||
params = {**self._get_parameters(stop), **kwargs}
|
||||
|
||||
@@ -219,7 +219,7 @@ class TextGen(LLM):
|
||||
if response.status_code == 200:
|
||||
result = response.json()["results"][0]["text"]
|
||||
else:
|
||||
print(f"ERROR: response: {response}")
|
||||
print(f"ERROR: response: {response}") # noqa: T201
|
||||
result = ""
|
||||
|
||||
return result
|
||||
@@ -265,7 +265,7 @@ class TextGen(LLM):
|
||||
if response.status_code == 200:
|
||||
result = response.json()["results"][0]["text"]
|
||||
else:
|
||||
print(f"ERROR: response: {response}")
|
||||
print(f"ERROR: response: {response}") # noqa: T201
|
||||
result = ""
|
||||
|
||||
return result
|
||||
@@ -303,7 +303,7 @@ class TextGen(LLM):
|
||||
)
|
||||
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
|
||||
stop=["'","\n"]):
|
||||
print(chunk, end='', flush=True)
|
||||
print(chunk, end='', flush=True) # noqa: T201
|
||||
|
||||
"""
|
||||
try:
|
||||
@@ -376,7 +376,7 @@ class TextGen(LLM):
|
||||
)
|
||||
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
|
||||
stop=["'","\n"]):
|
||||
print(chunk, end='', flush=True)
|
||||
print(chunk, end='', flush=True) # noqa: T201
|
||||
|
||||
"""
|
||||
try:
|
||||
|
||||
@@ -166,24 +166,30 @@ class WatsonxLLM(BaseLLM):
|
||||
|
||||
credentials = {
|
||||
"url": values["url"].get_secret_value() if values["url"] else None,
|
||||
"apikey": values["apikey"].get_secret_value()
|
||||
if values["apikey"]
|
||||
else None,
|
||||
"token": values["token"].get_secret_value()
|
||||
if values["token"]
|
||||
else None,
|
||||
"password": values["password"].get_secret_value()
|
||||
if values["password"]
|
||||
else None,
|
||||
"username": values["username"].get_secret_value()
|
||||
if values["username"]
|
||||
else None,
|
||||
"instance_id": values["instance_id"].get_secret_value()
|
||||
if values["instance_id"]
|
||||
else None,
|
||||
"version": values["version"].get_secret_value()
|
||||
if values["version"]
|
||||
else None,
|
||||
"apikey": (
|
||||
values["apikey"].get_secret_value() if values["apikey"] else None
|
||||
),
|
||||
"token": (
|
||||
values["token"].get_secret_value() if values["token"] else None
|
||||
),
|
||||
"password": (
|
||||
values["password"].get_secret_value()
|
||||
if values["password"]
|
||||
else None
|
||||
),
|
||||
"username": (
|
||||
values["username"].get_secret_value()
|
||||
if values["username"]
|
||||
else None
|
||||
),
|
||||
"instance_id": (
|
||||
values["instance_id"].get_secret_value()
|
||||
if values["instance_id"]
|
||||
else None
|
||||
),
|
||||
"version": (
|
||||
values["version"].get_secret_value() if values["version"] else None
|
||||
),
|
||||
}
|
||||
credentials_without_none_value = {
|
||||
key: value for key, value in credentials.items() if value is not None
|
||||
@@ -384,7 +390,7 @@ class WatsonxLLM(BaseLLM):
|
||||
|
||||
response = watsonx_llm.stream("What is a molecule")
|
||||
for chunk in response:
|
||||
print(chunk, end='')
|
||||
print(chunk, end='') # noqa: T201
|
||||
"""
|
||||
params = self._get_chat_params(stop=stop)
|
||||
for stream_resp in self.watsonx_model.generate_text_stream(
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Retriever wrapper for Google Vertex AI Search."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
|
||||
@@ -90,9 +91,11 @@ class _BaseGoogleVertexAISearchRetriever(BaseModel):
|
||||
from google.api_core.client_options import ClientOptions
|
||||
|
||||
return ClientOptions(
|
||||
api_endpoint=f"{self.location_id}-discoveryengine.googleapis.com"
|
||||
if self.location_id != "global"
|
||||
else None
|
||||
api_endpoint=(
|
||||
f"{self.location_id}-discoveryengine.googleapis.com"
|
||||
if self.location_id != "global"
|
||||
else None
|
||||
)
|
||||
)
|
||||
|
||||
def _convert_structured_search_response(
|
||||
@@ -188,9 +191,9 @@ class _BaseGoogleVertexAISearchRetriever(BaseModel):
|
||||
)
|
||||
|
||||
if not documents:
|
||||
print(f"No {chunk_type} could be found.")
|
||||
print(f"No {chunk_type} could be found.") # noqa: T201
|
||||
if chunk_type == "extractive_answers":
|
||||
print(
|
||||
print( # noqa: T201
|
||||
"Make sure that your data store is using Advanced Website "
|
||||
"Indexing.\n"
|
||||
"https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing" # noqa: E501
|
||||
|
||||
@@ -32,7 +32,7 @@ class RedisStore(ByteStore):
|
||||
|
||||
# Iterate over keys
|
||||
for key in redis_store.yield_keys():
|
||||
print(key)
|
||||
print(key) # noqa: T201
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
|
||||
@@ -105,7 +105,7 @@ class AmadeusFlightSearch(AmadeusBaseTool):
|
||||
adults=1,
|
||||
)
|
||||
except ResponseError as error:
|
||||
print(error)
|
||||
print(error) # noqa: T201
|
||||
|
||||
# Generate output dictionary
|
||||
output = []
|
||||
|
||||
@@ -59,7 +59,7 @@ If you have any files outputted write them to "output/" relative to the executio
|
||||
path. Output can only be read from the directory, stdout, and stdin. \
|
||||
Do not use things like plot.show() as it will \
|
||||
not work instead write them out `output/` and a link to the file will be returned. \
|
||||
print() any output and results so you can capture the output."""
|
||||
print() any output and results so you can capture the output.""" # noqa: T201
|
||||
|
||||
|
||||
class FileInfo(BaseModel):
|
||||
@@ -125,12 +125,16 @@ class BearlyInterpreterTool:
|
||||
headers={"Authorization": self.api_key},
|
||||
).json()
|
||||
return {
|
||||
"stdout": base64.b64decode(resp["stdoutBasesixtyfour"]).decode()
|
||||
if resp["stdoutBasesixtyfour"]
|
||||
else "",
|
||||
"stderr": base64.b64decode(resp["stderrBasesixtyfour"]).decode()
|
||||
if resp["stderrBasesixtyfour"]
|
||||
else "",
|
||||
"stdout": (
|
||||
base64.b64decode(resp["stdoutBasesixtyfour"]).decode()
|
||||
if resp["stdoutBasesixtyfour"]
|
||||
else ""
|
||||
),
|
||||
"stderr": (
|
||||
base64.b64decode(resp["stderrBasesixtyfour"]).decode()
|
||||
if resp["stderrBasesixtyfour"]
|
||||
else ""
|
||||
),
|
||||
"fileLinks": resp["fileLinks"],
|
||||
"exitCode": resp["exitCode"],
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ from langchain_core.tools import BaseTool
|
||||
|
||||
|
||||
def _print_func(text: str) -> None:
|
||||
print("\n")
|
||||
print(text)
|
||||
print("\n") # noqa: T201
|
||||
print(text) # noqa: T201
|
||||
|
||||
|
||||
class HumanInputRun(BaseTool):
|
||||
|
||||
@@ -52,6 +52,6 @@ Note: SessionId must be received from previous Browser window creation."""
|
||||
try:
|
||||
multion.close_session(sessionId)
|
||||
except Exception as e:
|
||||
print(f"{e}, retrying...")
|
||||
print(f"{e}, retrying...") # noqa: T201
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred: {e}")
|
||||
|
||||
@@ -68,7 +68,7 @@ Note: sessionId must be received from previous Browser window creation."""
|
||||
self.sessionId = sessionId
|
||||
return content
|
||||
except Exception as e:
|
||||
print(f"{e}, retrying...")
|
||||
print(f"{e}, retrying...") # noqa: T201
|
||||
return {"error": f"{e}", "Response": "retrying..."}
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred: {e}")
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""O365 tool utils."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
@@ -66,7 +67,7 @@ def authenticate() -> Account:
|
||||
"https://graph.microsoft.com/MailboxSettings.ReadWrite",
|
||||
]
|
||||
):
|
||||
print("Error: Could not authenticate")
|
||||
print("Error: Could not authenticate") # noqa: T201
|
||||
return None
|
||||
else:
|
||||
return account
|
||||
|
||||
@@ -84,7 +84,7 @@ class ShellTool(BaseTool):
|
||||
) -> str:
|
||||
"""Run commands and return final output."""
|
||||
|
||||
print(f"Executing command:\n {commands}")
|
||||
print(f"Executing command:\n {commands}") # noqa: T201
|
||||
|
||||
try:
|
||||
if self.ask_human_input:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Util that calls clickup."""
|
||||
|
||||
import json
|
||||
import warnings
|
||||
from dataclasses import asdict, dataclass, fields
|
||||
@@ -308,10 +309,10 @@ class ClickupAPIWrapper(BaseModel):
|
||||
data = response.json()
|
||||
|
||||
if "access_token" not in data:
|
||||
print(f"Error: {data}")
|
||||
print(f"Error: {data}") # noqa: T201
|
||||
if "ECODE" in data and data["ECODE"] == "OAUTH_014":
|
||||
url = ClickupAPIWrapper.get_access_code_url(oauth_client_id)
|
||||
print(
|
||||
print( # noqa: T201
|
||||
"You already used this code once. Generate a new one.",
|
||||
f"Our best guess for the url to get a new code is:\n{url}",
|
||||
)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Util that calls GitHub."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
@@ -297,7 +298,7 @@ class GitHubAPIWrapper(BaseModel):
|
||||
new_branch_name = f"{proposed_branch_name}_v{i}"
|
||||
else:
|
||||
# Handle any other exceptions
|
||||
print(f"Failed to create branch. Error: {e}")
|
||||
print(f"Failed to create branch. Error: {e}") # noqa: T201
|
||||
raise Exception(
|
||||
"Unable to create branch name from proposed_branch_name: "
|
||||
f"{proposed_branch_name}"
|
||||
@@ -427,7 +428,7 @@ class GitHubAPIWrapper(BaseModel):
|
||||
"download_url"
|
||||
]
|
||||
else:
|
||||
print(f"Failed to download file: {file.contents_url}, skipping")
|
||||
print(f"Failed to download file: {file.contents_url}, skipping") # noqa: T201
|
||||
continue
|
||||
|
||||
file_content_response = requests.get(download_url)
|
||||
@@ -435,7 +436,7 @@ class GitHubAPIWrapper(BaseModel):
|
||||
# Save the content as a UTF-8 string
|
||||
file_content = file_content_response.text
|
||||
else:
|
||||
print(
|
||||
print( # noqa: T201
|
||||
"Failed downloading file content "
|
||||
f"(Error {file_content_response.status_code}). Skipping"
|
||||
)
|
||||
@@ -457,7 +458,7 @@ class GitHubAPIWrapper(BaseModel):
|
||||
)
|
||||
total_tokens += file_tokens
|
||||
except Exception as e:
|
||||
print(f"Error when reading files from a PR on github. {e}")
|
||||
print(f"Error when reading files from a PR on github. {e}") # noqa: T201
|
||||
page += 1
|
||||
return pr_files
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Util that calls Google Lens Search."""
|
||||
|
||||
from typing import Any, Dict, Optional, cast
|
||||
|
||||
import requests
|
||||
@@ -77,7 +78,7 @@ class GoogleLensAPIWrapper(BaseModel):
|
||||
"Reverse Image Search"
|
||||
+ f"Link: {responseValue['reverse_image_search']['link']}\n"
|
||||
)
|
||||
print(xs)
|
||||
print(xs) # noqa: T201
|
||||
|
||||
docs = [xs]
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""A common module for NVIDIA Riva Runnables."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import pathlib
|
||||
@@ -73,7 +74,7 @@ class RivaAudioEncoding(str, Enum):
|
||||
with the following commands:
|
||||
```python
|
||||
import riva.client
|
||||
print(riva.client.AudioEncoding.keys())
|
||||
print(riva.client.AudioEncoding.keys()) # noqa: T201
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
@@ -144,7 +144,7 @@ class PubMedAPIWrapper(BaseModel):
|
||||
if e.code == 429 and retry < self.max_retry:
|
||||
# Too Many Requests errors
|
||||
# wait for an exponentially increasing amount of time
|
||||
print(
|
||||
print( # noqa: T201
|
||||
f"Too Many Requests, "
|
||||
f"waiting for {self.sleep_time:.2f} seconds..."
|
||||
)
|
||||
|
||||
@@ -225,7 +225,7 @@ class SearxSearchWrapper(BaseModel):
|
||||
|
||||
urllib3.disable_warnings()
|
||||
except ImportError as e:
|
||||
print(e)
|
||||
print(e) # noqa: T201
|
||||
|
||||
return v
|
||||
|
||||
@@ -246,7 +246,7 @@ class SearxSearchWrapper(BaseModel):
|
||||
|
||||
searx_host = get_from_dict_or_env(values, "searx_host", "SEARX_HOST")
|
||||
if not searx_host.startswith("http"):
|
||||
print(
|
||||
print( # noqa: T201
|
||||
f"Warning: missing the url scheme on host \
|
||||
! assuming secure https://{searx_host} "
|
||||
)
|
||||
|
||||
@@ -347,7 +347,7 @@ class AnalyticDB(VectorStore):
|
||||
conn.execute(chunks_table.delete().where(delete_condition))
|
||||
return True
|
||||
except Exception as e:
|
||||
print("Delete operation failed:", str(e))
|
||||
print("Delete operation failed:", str(e)) # noqa: T201
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -431,7 +431,7 @@ class Jaguar(VectorStore):
|
||||
|
||||
def prt(self, msg: str) -> None:
|
||||
with open("/tmp/debugjaguar.log", "a") as file:
|
||||
print(f"msg={msg}", file=file, flush=True)
|
||||
print(f"msg={msg}", file=file, flush=True) # noqa: T201
|
||||
|
||||
def _parseMeta(self, nvmap: dict, filecol: str) -> Tuple[List[str], List[str], str]:
|
||||
filepath = ""
|
||||
|
||||
@@ -437,10 +437,10 @@ class Marqo(VectorStore):
|
||||
try:
|
||||
client.create_index(index_name, settings_dict=index_settings or {})
|
||||
if verbose:
|
||||
print(f"Created {index_name} successfully.")
|
||||
print(f"Created {index_name} successfully.") # noqa: T201
|
||||
except Exception:
|
||||
if verbose:
|
||||
print(f"Index {index_name} exists.")
|
||||
print(f"Index {index_name} exists.") # noqa: T201
|
||||
|
||||
instance: Marqo = cls(
|
||||
client,
|
||||
|
||||
@@ -207,9 +207,9 @@ class PGEmbedding(VectorStore):
|
||||
# Create the HNSW index
|
||||
session.execute(create_index_query)
|
||||
session.commit()
|
||||
print("HNSW extension and index created successfully.")
|
||||
print("HNSW extension and index created successfully.") # noqa: T201
|
||||
except Exception as e:
|
||||
print(f"Failed to create HNSW extension or index: {e}")
|
||||
print(f"Failed to create HNSW extension or index: {e}") # noqa: T201
|
||||
|
||||
def delete_collection(self) -> None:
|
||||
self.logger.debug("Trying to delete collection")
|
||||
|
||||
@@ -146,7 +146,7 @@ class SemaDB(VectorStore):
|
||||
headers=self.headers,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
print("HERE--", batch)
|
||||
print("HERE--", batch) # noqa: T201
|
||||
raise ValueError(f"Error adding points: {response.text}")
|
||||
failed_ranges = response.json()["failedRanges"]
|
||||
if len(failed_ranges) > 0:
|
||||
|
||||
@@ -38,7 +38,7 @@ def debug_output(s: Any) -> None:
|
||||
s: The message to print
|
||||
"""
|
||||
if DEBUG:
|
||||
print(s)
|
||||
print(s) # noqa: T201
|
||||
|
||||
|
||||
def get_named_result(connection: Any, query: str) -> List[dict[str, Any]]:
|
||||
@@ -217,9 +217,11 @@ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
|
||||
for n in transac:
|
||||
n = ",".join(
|
||||
[
|
||||
f"'{self.escape_str(str(_n))}'"
|
||||
if idx != embed_tuple_index
|
||||
else f"array<float>{str(_n)}"
|
||||
(
|
||||
f"'{self.escape_str(str(_n))}'"
|
||||
if idx != embed_tuple_index
|
||||
else f"array<float>{str(_n)}"
|
||||
)
|
||||
for (idx, _n) in enumerate(n)
|
||||
]
|
||||
)
|
||||
|
||||
@@ -308,7 +308,7 @@ class Vectara(VectorStore):
|
||||
self._delete_doc(doc_id)
|
||||
self._index_doc(doc)
|
||||
elif success_str == "E_NO_PERMISSIONS":
|
||||
print(
|
||||
print( # noqa: T201
|
||||
"""No permissions to add document to Vectara.
|
||||
Check your corpus ID, customer ID and API key"""
|
||||
)
|
||||
@@ -339,9 +339,11 @@ class Vectara(VectorStore):
|
||||
{
|
||||
"query": query,
|
||||
"start": 0,
|
||||
"numResults": config.mmr_config.mmr_k
|
||||
if config.mmr_config.is_enabled
|
||||
else config.k,
|
||||
"numResults": (
|
||||
config.mmr_config.mmr_k
|
||||
if config.mmr_config.is_enabled
|
||||
else config.k
|
||||
),
|
||||
"contextConfig": {
|
||||
"sentencesBefore": config.n_sentence_context,
|
||||
"sentencesAfter": config.n_sentence_context,
|
||||
|
||||
@@ -270,6 +270,7 @@ select = [
|
||||
"E", # pycodestyle
|
||||
"F", # pyflakes
|
||||
"I", # isort
|
||||
"T201", # print
|
||||
]
|
||||
|
||||
[tool.mypy]
|
||||
|
||||
@@ -15,8 +15,8 @@ if __name__ == "__main__":
|
||||
SourceFileLoader(module_name, file).load_module()
|
||||
except Exception:
|
||||
has_failure = True
|
||||
print(file)
|
||||
print(file) # noqa: T201
|
||||
traceback.print_exc()
|
||||
print()
|
||||
print() # noqa: T201
|
||||
|
||||
sys.exit(1 if has_failure else 0)
|
||||
|
||||
@@ -4,7 +4,7 @@ import sys
|
||||
|
||||
|
||||
def main() -> int:
|
||||
print("Hello World!")
|
||||
print("Hello World!") # noqa: T201
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Integration tests for the langchain tracer module."""
|
||||
|
||||
import asyncio
|
||||
|
||||
from langchain_community.callbacks import get_openai_callback
|
||||
@@ -62,7 +63,7 @@ def test_openai_callback_agent() -> None:
|
||||
"Who is Olivia Wilde's boyfriend? "
|
||||
"What is his current age raised to the 0.23 power?"
|
||||
)
|
||||
print(f"Total Tokens: {cb.total_tokens}")
|
||||
print(f"Prompt Tokens: {cb.prompt_tokens}")
|
||||
print(f"Completion Tokens: {cb.completion_tokens}")
|
||||
print(f"Total Cost (USD): ${cb.total_cost}")
|
||||
print(f"Total Tokens: {cb.total_tokens}") # noqa: T201
|
||||
print(f"Prompt Tokens: {cb.prompt_tokens}") # noqa: T201
|
||||
print(f"Completion Tokens: {cb.completion_tokens}") # noqa: T201
|
||||
print(f"Total Cost (USD): ${cb.total_cost}") # noqa: T201
|
||||
|
||||
@@ -50,7 +50,7 @@ def test_chat_baichuan_with_kwargs() -> None:
|
||||
chat = ChatBaichuan()
|
||||
message = HumanMessage(content="百川192K API是什么时候上线的?")
|
||||
response = chat([message], temperature=0.88, top_p=0.7, with_search_enhance=True)
|
||||
print(response)
|
||||
print(response) # noqa: T201
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Test GPTRouter API wrapper."""
|
||||
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
@@ -29,7 +30,7 @@ def test_api_key_masked_when_passed_via_constructor(
|
||||
gpt_router_api_base="https://example.com",
|
||||
gpt_router_api_key="secret-api-key",
|
||||
)
|
||||
print(gpt_router.gpt_router_api_key, end="")
|
||||
print(gpt_router.gpt_router_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
|
||||
@@ -24,7 +24,7 @@ def test_jinachat_api_key_masked_when_passed_from_env(
|
||||
"""Test initialization with an API key provided via an env variable"""
|
||||
monkeypatch.setenv("JINACHAT_API_KEY", "secret-api-key")
|
||||
llm = JinaChat()
|
||||
print(llm.jinachat_api_key, end="")
|
||||
print(llm.jinachat_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
@@ -35,7 +35,7 @@ def test_jinachat_api_key_masked_when_passed_via_constructor(
|
||||
) -> None:
|
||||
"""Test initialization with an API key provided via the initializer"""
|
||||
llm = JinaChat(jinachat_api_key="secret-api-key")
|
||||
print(llm.jinachat_api_key, end="")
|
||||
print(llm.jinachat_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Evaluate ChatKonko Interface."""
|
||||
|
||||
from typing import Any, cast
|
||||
|
||||
import pytest
|
||||
@@ -21,11 +22,11 @@ def test_konko_key_masked_when_passed_from_env(
|
||||
|
||||
chat = ChatKonko()
|
||||
|
||||
print(chat.openai_api_key, end="")
|
||||
print(chat.openai_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "**********"
|
||||
|
||||
print(chat.konko_api_key, end="")
|
||||
print(chat.konko_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "**********"
|
||||
|
||||
@@ -36,11 +37,11 @@ def test_konko_key_masked_when_passed_via_constructor(
|
||||
"""Test initialization with an API key provided via the initializer"""
|
||||
chat = ChatKonko(openai_api_key="test-openai-key", konko_api_key="test-konko-key")
|
||||
|
||||
print(chat.konko_api_key, end="")
|
||||
print(chat.konko_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "**********"
|
||||
|
||||
print(chat.konko_secret_key, end="") # type: ignore[attr-defined]
|
||||
print(chat.konko_secret_key, end="") # type: ignore[attr-defined] # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "**********"
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ def test_chat_wasm_service_streaming() -> None:
|
||||
|
||||
output = ""
|
||||
for chunk in chat.stream(messages):
|
||||
print(chunk.content, end="", flush=True)
|
||||
print(chunk.content, end="", flush=True) # noqa: T201
|
||||
output += chunk.content # type: ignore[operator]
|
||||
|
||||
assert "Paris" in output
|
||||
|
||||
@@ -318,11 +318,11 @@ def test_qianfan_key_masked_when_passed_from_env(
|
||||
monkeypatch.setenv("QIANFAN_SK", "test-secret-key")
|
||||
|
||||
chat = QianfanChatEndpoint()
|
||||
print(chat.qianfan_ak, end="")
|
||||
print(chat.qianfan_ak, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "**********"
|
||||
|
||||
print(chat.qianfan_sk, end="")
|
||||
print(chat.qianfan_sk, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "**********"
|
||||
|
||||
@@ -335,11 +335,11 @@ def test_qianfan_key_masked_when_passed_via_constructor(
|
||||
qianfan_ak="test-api-key",
|
||||
qianfan_sk="test-secret-key",
|
||||
)
|
||||
print(chat.qianfan_ak, end="")
|
||||
print(chat.qianfan_ak, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "**********"
|
||||
|
||||
print(chat.qianfan_sk, end="")
|
||||
print(chat.qianfan_sk, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
|
||||
@@ -22,7 +22,7 @@ def test_chat_spark_llm_with_domain() -> None:
|
||||
chat = ChatSparkLLM(spark_llm_domain="generalv3")
|
||||
message = HumanMessage(content="Hello")
|
||||
response = chat([message])
|
||||
print(response)
|
||||
print(response) # noqa: T201
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
|
||||
@@ -31,6 +31,6 @@ def test_chat_spark_llm_with_temperature() -> None:
|
||||
chat = ChatSparkLLM(temperature=0.9, top_k=2)
|
||||
message = HumanMessage(content="Hello")
|
||||
response = chat([message])
|
||||
print(response)
|
||||
print(response) # noqa: T201
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
|
||||
@@ -19,7 +19,7 @@ def test_api_key_masked_when_passed_via_constructor(
|
||||
capsys: CaptureFixture,
|
||||
) -> None:
|
||||
llm = ChatTongyi(dashscope_api_key="secret-api-key")
|
||||
print(llm.dashscope_api_key, end="")
|
||||
print(llm.dashscope_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
|
||||
@@ -7,6 +7,7 @@ pip install google-cloud-aiplatform>=1.35.0
|
||||
Your end-user credentials would be used to make the calls (make sure you've run
|
||||
`gcloud auth login` first).
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
from unittest.mock import MagicMock, Mock, patch
|
||||
|
||||
@@ -288,7 +289,7 @@ def test_parse_examples_correct() -> None:
|
||||
def test_parse_examples_failes_wrong_sequence() -> None:
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
_ = _parse_examples([AIMessage(content="a")])
|
||||
print(str(exc_info.value))
|
||||
print(str(exc_info.value)) # noqa: T201
|
||||
assert (
|
||||
str(exc_info.value)
|
||||
== "Expect examples to have an even amount of messages, got 1."
|
||||
|
||||
@@ -29,7 +29,7 @@ def test_language_loader_for_python() -> None:
|
||||
assert (
|
||||
docs[0].page_content
|
||||
== """def main():
|
||||
print("Hello World!")
|
||||
print("Hello World!") # noqa: T201
|
||||
|
||||
return 0"""
|
||||
)
|
||||
@@ -67,7 +67,7 @@ def esprima_installed() -> bool:
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"esprima not installed, skipping test {e}")
|
||||
print(f"esprima not installed, skipping test {e}") # noqa: T201
|
||||
return False
|
||||
|
||||
|
||||
|
||||
@@ -19,8 +19,8 @@ def test_load_success() -> None:
|
||||
|
||||
docs = loader.load()
|
||||
assert len(docs) == 1
|
||||
print(docs[0].metadata)
|
||||
print(docs[0].page_content)
|
||||
print(docs[0].metadata) # noqa: T201
|
||||
print(docs[0].page_content) # noqa: T201
|
||||
assert_docs(docs)
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ def test_load_returns_full_set_of_metadata() -> None:
|
||||
assert set(doc.metadata).issuperset(
|
||||
{"Published", "Title", "Authors", "Summary"}
|
||||
)
|
||||
print(doc.metadata)
|
||||
print(doc.metadata) # noqa: T201
|
||||
assert len(set(doc.metadata)) > 4
|
||||
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ def test_get_nfts_valid_contract() -> None:
|
||||
)
|
||||
result = BlockchainDocumentLoader(contract_address).load()
|
||||
|
||||
print("Tokens returned for valid contract: ", len(result))
|
||||
print("Tokens returned for valid contract: ", len(result)) # noqa: T201
|
||||
|
||||
assert len(result) == max_alchemy_tokens, (
|
||||
f"Wrong number of NFTs returned. "
|
||||
@@ -43,7 +43,7 @@ def test_get_nfts_with_pagination() -> None:
|
||||
startToken=startToken,
|
||||
).load()
|
||||
|
||||
print("Tokens returned for contract with offset: ", len(result))
|
||||
print("Tokens returned for contract with offset: ", len(result)) # noqa: T201
|
||||
|
||||
assert len(result) > 0, "No NFTs returned"
|
||||
|
||||
@@ -57,7 +57,7 @@ def test_get_nfts_polygon() -> None:
|
||||
contract_address, BlockchainType.POLYGON_MAINNET
|
||||
).load()
|
||||
|
||||
print("Tokens returned for contract on Polygon: ", len(result))
|
||||
print("Tokens returned for contract on Polygon: ", len(result)) # noqa: T201
|
||||
|
||||
assert len(result) > 0, "No NFTs returned"
|
||||
|
||||
@@ -94,7 +94,7 @@ def test_get_all() -> None:
|
||||
|
||||
end_time = time.time()
|
||||
|
||||
print(
|
||||
print( # noqa: T201
|
||||
f"Tokens returned for {contract_address} "
|
||||
f"contract: {len(result)} in {end_time - start_time} seconds"
|
||||
)
|
||||
@@ -121,4 +121,4 @@ def test_get_all_10sec_timeout() -> None:
|
||||
|
||||
end_time = time.time()
|
||||
|
||||
print("Execution took ", end_time - start_time, " seconds")
|
||||
print("Execution took ", end_time - start_time, " seconds") # noqa: T201
|
||||
|
||||
@@ -31,11 +31,11 @@ class TestCouchbaseLoader(unittest.TestCase):
|
||||
metadata_fields=self.valid_metadata_fields,
|
||||
)
|
||||
docs = loader.load()
|
||||
print(docs)
|
||||
print(docs) # noqa: T201
|
||||
|
||||
assert len(docs) > 0 # assuming the query returns at least one document
|
||||
for doc in docs:
|
||||
print(doc)
|
||||
print(doc) # noqa: T201
|
||||
assert (
|
||||
doc.page_content != ""
|
||||
) # assuming that every document has page_content
|
||||
|
||||
@@ -28,7 +28,7 @@ class TestGitbookLoader:
|
||||
loader = GitbookLoader(
|
||||
web_page, load_all_paths=load_all_paths, base_url=base_url
|
||||
)
|
||||
print(loader.__dict__)
|
||||
print(loader.__dict__) # noqa: T201
|
||||
assert (
|
||||
loader.base_url == (base_url or web_page)[:-1]
|
||||
if (base_url or web_page).endswith("/")
|
||||
@@ -52,5 +52,5 @@ class TestGitbookLoader:
|
||||
def test_load_multiple_pages(self, web_page: str) -> None:
|
||||
loader = GitbookLoader(web_page, load_all_paths=True)
|
||||
result = loader.load()
|
||||
print(len(result))
|
||||
print(len(result)) # noqa: T201
|
||||
assert len(result) > 10
|
||||
|
||||
@@ -145,14 +145,14 @@ def test_mathpix_loader() -> None:
|
||||
docs = loader.load()
|
||||
|
||||
assert len(docs) == 1
|
||||
print(docs[0].page_content)
|
||||
print(docs[0].page_content) # noqa: T201
|
||||
|
||||
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
|
||||
loader = MathpixPDFLoader(str(file_path))
|
||||
|
||||
docs = loader.load()
|
||||
assert len(docs) == 1
|
||||
print(docs[0].page_content)
|
||||
print(docs[0].page_content) # noqa: T201
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -230,7 +230,7 @@ def test_amazontextract_loader(
|
||||
else:
|
||||
loader = AmazonTextractPDFLoader(file_path, textract_features=features)
|
||||
docs = loader.load()
|
||||
print(docs)
|
||||
print(docs) # noqa: T201
|
||||
|
||||
assert len(docs) == docs_length
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Integration test for PubMed API Wrapper."""
|
||||
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
@@ -13,7 +14,7 @@ def test_load_success() -> None:
|
||||
"""Test that returns the correct answer"""
|
||||
api_client = PubMedLoader(query="chatgpt")
|
||||
docs = api_client.load()
|
||||
print(docs)
|
||||
print(docs) # noqa: T201
|
||||
assert len(docs) == api_client.load_max_docs == 3
|
||||
assert_docs(docs)
|
||||
|
||||
@@ -22,7 +23,7 @@ def test_load_success_load_max_docs() -> None:
|
||||
"""Test that returns the correct answer"""
|
||||
api_client = PubMedLoader(query="chatgpt", load_max_docs=2)
|
||||
docs = api_client.load()
|
||||
print(docs)
|
||||
print(docs) # noqa: T201
|
||||
assert len(docs) == api_client.load_max_docs == 2
|
||||
assert_docs(docs)
|
||||
|
||||
@@ -38,7 +39,7 @@ def test_load_no_content() -> None:
|
||||
"""Returns a Document without content."""
|
||||
api_client = PubMedLoader(query="37548971")
|
||||
docs = api_client.load()
|
||||
print(docs)
|
||||
print(docs) # noqa: T201
|
||||
assert len(docs) > 0
|
||||
assert docs[0].page_content == ""
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ def test_telegram_channel_loader_parsing() -> None:
|
||||
docs = loader.load()
|
||||
|
||||
assert len(docs) == 1
|
||||
print(docs[0].page_content)
|
||||
print(docs[0].page_content) # noqa: T201
|
||||
assert docs[0].page_content == (
|
||||
"Hello, world!.\nLLMs are awesome! Langchain is great. Telegram is the best!."
|
||||
)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Test Xinference embeddings."""
|
||||
|
||||
import time
|
||||
from typing import AsyncGenerator, Tuple
|
||||
|
||||
@@ -17,7 +18,7 @@ async def setup() -> AsyncGenerator[Tuple[str, str], None]:
|
||||
pool = await create_worker_actor_pool(
|
||||
f"test://127.0.0.1:{xo.utils.get_next_port()}"
|
||||
)
|
||||
print(f"Pool running on localhost:{pool.external_address}")
|
||||
print(f"Pool running on localhost:{pool.external_address}") # noqa: T201
|
||||
|
||||
endpoint = await start_supervisor_components(
|
||||
pool.external_address, "127.0.0.1", xo.utils.get_next_port()
|
||||
|
||||
@@ -4,7 +4,7 @@ import sys
|
||||
|
||||
|
||||
def main() -> int:
|
||||
print("Hello World!")
|
||||
print("Hello World!") # noqa: T201
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ def test_api_key_masked_when_passed_via_constructor(
|
||||
arcee_api_url="https://localhost",
|
||||
arcee_api_version="version",
|
||||
)
|
||||
print(arcee_without_env_var.arcee_api_key, end="")
|
||||
print(arcee_without_env_var.arcee_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert "**********" == captured.out
|
||||
@@ -64,7 +64,7 @@ def test_api_key_masked_when_passed_from_env(
|
||||
arcee_api_url="https://localhost",
|
||||
arcee_api_version="version",
|
||||
)
|
||||
print(arcee_with_env_var.arcee_api_key, end="")
|
||||
print(arcee_with_env_var.arcee_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert "**********" == captured.out
|
||||
|
||||
@@ -7,5 +7,5 @@ def test_aviary_call() -> None:
|
||||
"""Test valid call to Anyscale."""
|
||||
llm = Aviary()
|
||||
output = llm("Say bar:")
|
||||
print(f"llm answer:\n{output}")
|
||||
print(f"llm answer:\n{output}") # noqa: T201
|
||||
assert isinstance(output, str)
|
||||
|
||||
@@ -19,7 +19,7 @@ def test_huggingface_endpoint_text_generation() -> None:
|
||||
endpoint_url="", task="text-generation", model_kwargs={"max_new_tokens": 10}
|
||||
)
|
||||
output = llm("Say foo:")
|
||||
print(output)
|
||||
print(output) # noqa: T201
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ def test_nlpcloud_api_key(monkeypatch: MonkeyPatch, capsys: CaptureFixture) -> N
|
||||
|
||||
assert cast(SecretStr, llm.nlpcloud_api_key).get_secret_value() == "secret-api-key"
|
||||
|
||||
print(llm.nlpcloud_api_key, end="")
|
||||
print(llm.nlpcloud_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
|
||||
@@ -25,7 +25,7 @@ def test_octoai_endpoint_text_generation() -> None:
|
||||
)
|
||||
|
||||
output = llm("Which state is Los Angeles in?")
|
||||
print(output)
|
||||
print(output) # noqa: T201
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ def test_api_key_masked_when_passed_via_constructor(
|
||||
capsys: CaptureFixture,
|
||||
) -> None:
|
||||
llm = Petals(huggingface_api_key="secret-api-key")
|
||||
print(llm.huggingface_api_key, end="")
|
||||
print(llm.huggingface_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
|
||||
@@ -13,7 +13,7 @@ def test_api_key_masked_when_passed_via_constructor(
|
||||
capsys: CaptureFixture,
|
||||
) -> None:
|
||||
llm = Predibase(predibase_api_key="secret-api-key")
|
||||
print(llm.predibase_api_key, end="")
|
||||
print(llm.predibase_api_key, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Test Tongyi API wrapper."""
|
||||
|
||||
from langchain_core.outputs import LLMResult
|
||||
|
||||
from langchain_community.llms.tongyi import Tongyi
|
||||
@@ -23,6 +24,6 @@ def test_tongyi_generate_stream() -> None:
|
||||
"""Test valid call to tongyi."""
|
||||
llm = Tongyi(streaming=True)
|
||||
output = llm.generate(["who are you"])
|
||||
print(output)
|
||||
print(output) # noqa: T201
|
||||
assert isinstance(output, LLMResult)
|
||||
assert isinstance(output.generations, list)
|
||||
|
||||
@@ -28,7 +28,7 @@ def test_api_key_masked_when_passed_via_constructor(
|
||||
volc_engine_maas_ak="secret-volc-ak",
|
||||
volc_engine_maas_sk="secret-volc-sk",
|
||||
)
|
||||
print(llm.volc_engine_maas_ak, end="")
|
||||
print(llm.volc_engine_maas_ak, end="") # noqa: T201
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Test Xinference wrapper."""
|
||||
|
||||
import time
|
||||
from typing import AsyncGenerator, Tuple
|
||||
|
||||
@@ -17,7 +18,7 @@ async def setup() -> AsyncGenerator[Tuple[str, str], None]:
|
||||
pool = await create_worker_actor_pool(
|
||||
f"test://127.0.0.1:{xo.utils.get_next_port()}"
|
||||
)
|
||||
print(f"Pool running on localhost:{pool.external_address}")
|
||||
print(f"Pool running on localhost:{pool.external_address}") # noqa: T201
|
||||
|
||||
endpoint = await start_supervisor_components(
|
||||
pool.external_address, "127.0.0.1", xo.utils.get_next_port()
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Integration test for Arxiv API Wrapper."""
|
||||
|
||||
from typing import Any, List
|
||||
|
||||
import pytest
|
||||
@@ -136,7 +137,7 @@ def test_load_returns_full_set_of_metadata() -> None:
|
||||
assert set(doc.metadata).issuperset(
|
||||
{"Published", "Title", "Authors", "Summary"}
|
||||
)
|
||||
print(doc.metadata)
|
||||
print(doc.metadata) # noqa: T201
|
||||
assert len(set(doc.metadata)) > 4
|
||||
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ def ddg_installed() -> bool:
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"duckduckgo not installed, skipping test {e}")
|
||||
print(f"duckduckgo not installed, skipping test {e}") # noqa: T201
|
||||
return False
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ def test_ddg_search_tool() -> None:
|
||||
keywords = "Bella Ciao"
|
||||
tool = DuckDuckGoSearchRun()
|
||||
result = tool(keywords)
|
||||
print(result)
|
||||
print(result) # noqa: T201
|
||||
assert len(result.split()) > 20
|
||||
|
||||
|
||||
@@ -30,5 +30,5 @@ def test_ddg_search_news_tool() -> None:
|
||||
keywords = "Tesla"
|
||||
tool = DuckDuckGoSearchResults(source="news")
|
||||
result = tool(keywords)
|
||||
print(result)
|
||||
print(result) # noqa: T201
|
||||
assert len(result.split()) > 20
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user