mirror of
https://github.com/hwchase17/langchain.git
synced 2026-05-12 17:57:22 +00:00
```python
"""python scripts/update_mypy_ruff.py"""
import glob
import tomllib
from pathlib import Path
import toml
import subprocess
import re
ROOT_DIR = Path(__file__).parents[1]
def main():
for path in glob.glob(str(ROOT_DIR / "libs/**/pyproject.toml"), recursive=True):
print(path)
with open(path, "rb") as f:
pyproject = tomllib.load(f)
try:
pyproject["tool"]["poetry"]["group"]["typing"]["dependencies"]["mypy"] = (
"^1.10"
)
pyproject["tool"]["poetry"]["group"]["lint"]["dependencies"]["ruff"] = (
"^0.5"
)
except KeyError:
continue
with open(path, "w") as f:
toml.dump(pyproject, f)
cwd = "/".join(path.split("/")[:-1])
completed = subprocess.run(
"poetry lock --no-update; poetry install --with typing; poetry run mypy . --no-color",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
logs = completed.stdout.split("\n")
to_ignore = {}
for l in logs:
if re.match("^(.*)\:(\d+)\: error:.*\[(.*)\]", l):
path, line_no, error_type = re.match(
"^(.*)\:(\d+)\: error:.*\[(.*)\]", l
).groups()
if (path, line_no) in to_ignore:
to_ignore[(path, line_no)].append(error_type)
else:
to_ignore[(path, line_no)] = [error_type]
print(len(to_ignore))
for (error_path, line_no), error_types in to_ignore.items():
all_errors = ", ".join(error_types)
full_path = f"{cwd}/{error_path}"
try:
with open(full_path, "r") as f:
file_lines = f.readlines()
except FileNotFoundError:
continue
file_lines[int(line_no) - 1] = (
file_lines[int(line_no) - 1][:-1] + f" # type: ignore[{all_errors}]\n"
)
with open(full_path, "w") as f:
f.write("".join(file_lines))
subprocess.run(
"poetry run ruff format .; poetry run ruff --select I --fix .",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
if __name__ == "__main__":
main()
```
63 lines
1.5 KiB
Python
63 lines
1.5 KiB
Python
"""Test Vertex AI embeddings API wrapper."""
|
|
|
|
from langchain_community.embeddings import VertexAIEmbeddings
|
|
|
|
|
|
def test_split_by_punctuation() -> None:
|
|
parts = VertexAIEmbeddings._split_by_punctuation(
|
|
"Hello, my friend!\nHow are you?\nI have 2 news:\n\n\t- Good,\n\t- Bad."
|
|
)
|
|
assert parts == [
|
|
"Hello",
|
|
",",
|
|
" ",
|
|
"my",
|
|
" ",
|
|
"friend",
|
|
"!",
|
|
"\n",
|
|
"How",
|
|
" ",
|
|
"are",
|
|
" ",
|
|
"you",
|
|
"?",
|
|
"\n",
|
|
"I",
|
|
" ",
|
|
"have",
|
|
" ",
|
|
"2",
|
|
" ",
|
|
"news",
|
|
":",
|
|
"\n",
|
|
"\n",
|
|
"\t",
|
|
"-",
|
|
" ",
|
|
"Good",
|
|
",",
|
|
"\n",
|
|
"\t",
|
|
"-",
|
|
" ",
|
|
"Bad",
|
|
".",
|
|
]
|
|
|
|
|
|
def test_batching() -> None:
|
|
long_text = "foo " * 500 # 1000 words, 2000 tokens
|
|
long_texts = [long_text for _ in range(0, 250)]
|
|
documents251 = ["foo bar" for _ in range(0, 251)]
|
|
five_elem = VertexAIEmbeddings._prepare_batches(long_texts, 5)
|
|
default250_elem = VertexAIEmbeddings._prepare_batches(long_texts, 250)
|
|
batches251 = VertexAIEmbeddings._prepare_batches(documents251, 250)
|
|
assert len(five_elem) == 50 # 250/5 items
|
|
assert len(five_elem[0]) == 5 # 5 items per batch
|
|
assert len(default250_elem[0]) == 10 # Should not be more than 20K tokens
|
|
assert len(default250_elem) == 25
|
|
assert len(batches251[0]) == 250
|
|
assert len(batches251[1]) == 1
|