mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-10 15:06:18 +00:00
standard-tests: Add ruff rules PGH (#31869)
See https://docs.astral.sh/ruff/rules/#pygrep-hooks-pgh
This commit is contained in:
parent
53c75abba2
commit
1276bf3e1d
@ -149,7 +149,7 @@ class BaseStoreSyncTests(BaseStandardTests, Generic[V]):
|
|||||||
assert sorted(kv_store.yield_keys(prefix="foo")) == ["foo"]
|
assert sorted(kv_store.yield_keys(prefix="foo")) == ["foo"]
|
||||||
|
|
||||||
|
|
||||||
class BaseStoreAsyncTests(BaseStandardTests):
|
class BaseStoreAsyncTests(BaseStandardTests, Generic[V]):
|
||||||
"""Test suite for checking the key-value API of a BaseStore.
|
"""Test suite for checking the key-value API of a BaseStore.
|
||||||
|
|
||||||
This test suite verifies the basic key-value API of a BaseStore.
|
This test suite verifies the basic key-value API of a BaseStore.
|
||||||
|
@ -1346,7 +1346,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
# Test stream
|
# Test stream
|
||||||
full: Optional[BaseMessageChunk] = None
|
full: Optional[BaseMessageChunk] = None
|
||||||
for chunk in model_with_tools.stream(query):
|
for chunk in model_with_tools.stream(query):
|
||||||
full = chunk if full is None else full + chunk # type: ignore
|
full = chunk if full is None else full + chunk # type: ignore[assignment]
|
||||||
assert isinstance(full, AIMessage)
|
assert isinstance(full, AIMessage)
|
||||||
_validate_tool_call_message(full)
|
_validate_tool_call_message(full)
|
||||||
|
|
||||||
@ -1407,7 +1407,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
# Test astream
|
# Test astream
|
||||||
full: Optional[BaseMessageChunk] = None
|
full: Optional[BaseMessageChunk] = None
|
||||||
async for chunk in model_with_tools.astream(query):
|
async for chunk in model_with_tools.astream(query):
|
||||||
full = chunk if full is None else full + chunk # type: ignore
|
full = chunk if full is None else full + chunk # type: ignore[assignment]
|
||||||
assert isinstance(full, AIMessage)
|
assert isinstance(full, AIMessage)
|
||||||
_validate_tool_call_message(full)
|
_validate_tool_call_message(full)
|
||||||
|
|
||||||
@ -1743,7 +1743,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
|
|
||||||
full: Optional[BaseMessageChunk] = None
|
full: Optional[BaseMessageChunk] = None
|
||||||
for chunk in model_with_tools.stream(query):
|
for chunk in model_with_tools.stream(query):
|
||||||
full = chunk if full is None else full + chunk # type: ignore
|
full = chunk if full is None else full + chunk # type: ignore[assignment]
|
||||||
assert isinstance(full, AIMessage)
|
assert isinstance(full, AIMessage)
|
||||||
_validate_tool_call_message_no_args(full)
|
_validate_tool_call_message_no_args(full)
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ class DocumentIndexerTestSuite(ABC):
|
|||||||
|
|
||||||
# Ordering is not guaranteed, need to test carefully
|
# Ordering is not guaranteed, need to test carefully
|
||||||
documents = index.get(ids)
|
documents = index.get(ids)
|
||||||
sorted_documents = sorted(documents, key=lambda x: x.id) # type: ignore
|
sorted_documents = sorted(documents, key=lambda x: x.id or "")
|
||||||
|
|
||||||
if sorted_documents[0].page_content == "bar":
|
if sorted_documents[0].page_content == "bar":
|
||||||
assert sorted_documents[0] == Document(
|
assert sorted_documents[0] == Document(
|
||||||
@ -196,7 +196,7 @@ class DocumentIndexerTestSuite(ABC):
|
|||||||
}
|
}
|
||||||
retrieved_documents = index.get(["1", "2", "3", "4"])
|
retrieved_documents = index.get(["1", "2", "3", "4"])
|
||||||
# The ordering is not guaranteed, so we use a set.
|
# The ordering is not guaranteed, so we use a set.
|
||||||
assert sorted(retrieved_documents, key=lambda x: x.id) == [ # type: ignore
|
assert sorted(retrieved_documents, key=lambda x: x.id or "") == [
|
||||||
Document(page_content="foo", metadata={"id": 1}, id="1"),
|
Document(page_content="foo", metadata={"id": 1}, id="1"),
|
||||||
Document(page_content="bar", metadata={"id": 2}, id="2"),
|
Document(page_content="bar", metadata={"id": 2}, id="2"),
|
||||||
]
|
]
|
||||||
@ -239,7 +239,7 @@ class AsyncDocumentIndexTestSuite(ABC):
|
|||||||
|
|
||||||
# Ordering is not guaranteed, need to test carefully
|
# Ordering is not guaranteed, need to test carefully
|
||||||
documents = await index.aget(ids)
|
documents = await index.aget(ids)
|
||||||
sorted_documents = sorted(documents, key=lambda x: x.id) # type: ignore
|
sorted_documents = sorted(documents, key=lambda x: x.id or "")
|
||||||
|
|
||||||
if sorted_documents[0].page_content == "bar":
|
if sorted_documents[0].page_content == "bar":
|
||||||
assert sorted_documents[0] == Document(
|
assert sorted_documents[0] == Document(
|
||||||
@ -388,7 +388,7 @@ class AsyncDocumentIndexTestSuite(ABC):
|
|||||||
}
|
}
|
||||||
retrieved_documents = await index.aget(["1", "2", "3", "4"])
|
retrieved_documents = await index.aget(["1", "2", "3", "4"])
|
||||||
# The ordering is not guaranteed, so we use a set.
|
# The ordering is not guaranteed, so we use a set.
|
||||||
assert sorted(retrieved_documents, key=lambda x: x.id) == [ # type: ignore
|
assert sorted(retrieved_documents, key=lambda x: x.id or "") == [
|
||||||
Document(page_content="foo", metadata={"id": 1}, id="1"),
|
Document(page_content="foo", metadata={"id": 1}, id="1"),
|
||||||
Document(page_content="bar", metadata={"id": 2}, id="2"),
|
Document(page_content="bar", metadata={"id": 2}, id="2"),
|
||||||
]
|
]
|
||||||
|
@ -908,7 +908,7 @@ class ChatModelUnitTests(ChatModelTests):
|
|||||||
# Doing a mypy ignore here since some of the tools are from pydantic
|
# Doing a mypy ignore here since some of the tools are from pydantic
|
||||||
# BaseModel 2 which isn't typed properly yet. This will need to be fixed
|
# BaseModel 2 which isn't typed properly yet. This will need to be fixed
|
||||||
# so type checking does not become annoying to users.
|
# so type checking does not become annoying to users.
|
||||||
tool_model = model.bind_tools(tools, tool_choice="any") # type: ignore
|
tool_model = model.bind_tools(tools, tool_choice="any") # type: ignore[arg-type]
|
||||||
assert isinstance(tool_model, RunnableBinding)
|
assert isinstance(tool_model, RunnableBinding)
|
||||||
|
|
||||||
@pytest.mark.parametrize("schema", TEST_PYDANTIC_MODELS)
|
@pytest.mark.parametrize("schema", TEST_PYDANTIC_MODELS)
|
||||||
@ -962,19 +962,19 @@ class ChatModelUnitTests(ChatModelTests):
|
|||||||
|
|
||||||
ls_params = model._get_ls_params()
|
ls_params = model._get_ls_params()
|
||||||
try:
|
try:
|
||||||
ExpectedParams(**ls_params) # type: ignore
|
ExpectedParams(**ls_params) # type: ignore[arg-type]
|
||||||
except ValidationErrorV1 as e:
|
except ValidationErrorV1 as e:
|
||||||
pytest.fail(f"Validation error: {e}")
|
pytest.fail(f"Validation error: {e}")
|
||||||
|
|
||||||
# Test optional params
|
# Test optional params
|
||||||
model = self.chat_model_class(
|
model = self.chat_model_class(
|
||||||
max_tokens=10,
|
max_tokens=10, # type: ignore[call-arg]
|
||||||
stop=["test"],
|
stop=["test"], # type: ignore[call-arg]
|
||||||
**self.chat_model_params, # type: ignore
|
**self.chat_model_params,
|
||||||
)
|
)
|
||||||
ls_params = model._get_ls_params()
|
ls_params = model._get_ls_params()
|
||||||
try:
|
try:
|
||||||
ExpectedParams(**ls_params) # type: ignore
|
ExpectedParams(**ls_params) # type: ignore[arg-type]
|
||||||
except ValidationErrorV1 as e:
|
except ValidationErrorV1 as e:
|
||||||
pytest.fail(f"Validation error: {e}")
|
pytest.fail(f"Validation error: {e}")
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ ignore_missing_imports = true
|
|||||||
target-version = "py39"
|
target-version = "py39"
|
||||||
|
|
||||||
[tool.ruff.lint]
|
[tool.ruff.lint]
|
||||||
select = ["E", "F", "I", "T201", "UP",]
|
select = ["E", "F", "I", "PGH", "T201", "UP",]
|
||||||
pyupgrade.keep-runtime-typing = true
|
pyupgrade.keep-runtime-typing = true
|
||||||
|
|
||||||
[tool.coverage.run]
|
[tool.coverage.run]
|
||||||
|
@ -6,7 +6,7 @@ from langchain_tests.integration_tests import ToolsIntegrationTests
|
|||||||
from langchain_tests.unit_tests import ToolsUnitTests
|
from langchain_tests.unit_tests import ToolsUnitTests
|
||||||
|
|
||||||
|
|
||||||
class ParrotMultiplyTool(BaseTool): # type: ignore
|
class ParrotMultiplyTool(BaseTool):
|
||||||
name: str = "ParrotMultiplyTool"
|
name: str = "ParrotMultiplyTool"
|
||||||
description: str = (
|
description: str = (
|
||||||
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
|
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
|
||||||
@ -16,7 +16,7 @@ class ParrotMultiplyTool(BaseTool): # type: ignore
|
|||||||
return a * b + 80
|
return a * b + 80
|
||||||
|
|
||||||
|
|
||||||
class ParrotMultiplyArtifactTool(BaseTool): # type: ignore
|
class ParrotMultiplyArtifactTool(BaseTool):
|
||||||
name: str = "ParrotMultiplyArtifactTool"
|
name: str = "ParrotMultiplyArtifactTool"
|
||||||
description: str = (
|
description: str = (
|
||||||
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
|
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
|
||||||
|
@ -9,7 +9,7 @@ from langchain_tests.integration_tests.base_store import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestInMemoryStore(BaseStoreSyncTests):
|
class TestInMemoryStore(BaseStoreSyncTests[str]):
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def three_values(self) -> tuple[str, str, str]:
|
def three_values(self) -> tuple[str, str, str]:
|
||||||
return "foo", "bar", "buzz"
|
return "foo", "bar", "buzz"
|
||||||
@ -19,9 +19,9 @@ class TestInMemoryStore(BaseStoreSyncTests):
|
|||||||
return InMemoryStore()
|
return InMemoryStore()
|
||||||
|
|
||||||
|
|
||||||
class TestInMemoryStoreAsync(BaseStoreAsyncTests):
|
class TestInMemoryStoreAsync(BaseStoreAsyncTests[str]):
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def three_values(self) -> tuple[str, str, str]: # type: ignore
|
def three_values(self) -> tuple[str, str, str]:
|
||||||
return "foo", "bar", "buzz"
|
return "foo", "bar", "buzz"
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
|
Loading…
Reference in New Issue
Block a user