lint tests

This commit is contained in:
Eugene Yurtsev
2024-05-03 10:00:55 -04:00
parent 33dac2e00d
commit 32d02c2af2
42 changed files with 103 additions and 100 deletions

View File

@@ -8,12 +8,12 @@ from typing import Any
from urllib.error import HTTPError
import pytest
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
from langchain_community.chat_models import ChatOpenAI
from langchain_community.tools.ainetwork.utils import authenticate
from langchain.agents import AgentType, initialize_agent
class Match(Enum):
__test__ = False

View File

@@ -1,8 +1,9 @@
import pytest
from langchain_core.utils import get_from_env
from langchain_community.agent_toolkits import PowerBIToolkit, create_pbi_agent
from langchain_community.chat_models import ChatOpenAI
from langchain_community.utilities.powerbi import PowerBIDataset
from langchain_core.utils import get_from_env
def azure_installed() -> bool:

View File

@@ -15,13 +15,13 @@ import os
from typing import AsyncIterator, Iterator
import pytest
from langchain_community.cache import AstraDBCache, AstraDBSemanticCache
from langchain_community.utilities.astradb import SetupMode
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.caches import BaseCache
from langchain_core.language_models import LLM
from langchain_core.outputs import Generation, LLMResult
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_community.cache import AstraDBCache, AstraDBSemanticCache
from langchain_community.utilities.astradb import SetupMode
from tests.integration_tests.cache.fake_embeddings import FakeEmbeddings
from tests.unit_tests.llms.fake_llm import FakeLLM

View File

@@ -10,14 +10,14 @@ import os
import uuid
import pytest
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation
from langchain_community.cache import AzureCosmosDBSemanticCache
from langchain_community.vectorstores.azure_cosmos_db import (
CosmosDBSimilarityType,
CosmosDBVectorSearchType,
)
from langchain_core.outputs import Generation
from langchain.globals import get_llm_cache, set_llm_cache
from tests.integration_tests.cache.fake_embeddings import (
FakeEmbeddings,
)

View File

@@ -5,11 +5,11 @@ import time
from typing import Any, Iterator, Tuple
import pytest
from langchain_community.cache import CassandraCache, CassandraSemanticCache
from langchain_community.utilities.cassandra import SetupMode
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation, LLMResult
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_community.cache import CassandraCache, CassandraSemanticCache
from langchain_community.utilities.cassandra import SetupMode
from tests.integration_tests.cache.fake_embeddings import FakeEmbeddings
from tests.unit_tests.llms.fake_llm import FakeLLM

View File

@@ -2,10 +2,10 @@ import os
from typing import Any, Callable, Union
import pytest
from langchain_community.cache import GPTCache
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_community.cache import GPTCache
from tests.unit_tests.llms.fake_llm import FakeLLM
try:

View File

@@ -11,10 +11,10 @@ from datetime import timedelta
from typing import Iterator
import pytest
from langchain_community.cache import MomentoCache
from langchain.globals import set_llm_cache
from langchain_core.outputs import Generation, LLMResult
from langchain.globals import set_llm_cache
from langchain_community.cache import MomentoCache
from tests.unit_tests.llms.fake_llm import FakeLLM

View File

@@ -1,7 +1,7 @@
from langchain_community.cache import OpenSearchSemanticCache
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_community.cache import OpenSearchSemanticCache
from tests.integration_tests.cache.fake_embeddings import (
FakeEmbeddings,
)

View File

@@ -5,13 +5,13 @@ from contextlib import asynccontextmanager, contextmanager
from typing import AsyncGenerator, Generator, List, Optional, cast
import pytest
from langchain_community.cache import AsyncRedisCache, RedisCache, RedisSemanticCache
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.embeddings import Embeddings
from langchain_core.load.dump import dumps
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, Generation, LLMResult
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_community.cache import AsyncRedisCache, RedisCache, RedisSemanticCache
from tests.integration_tests.cache.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,

View File

@@ -1,11 +1,11 @@
"""Test Upstash Redis cache functionality."""
import uuid
import langchain
import pytest
from langchain_community.cache import UpstashRedisCache
from langchain_core.outputs import Generation, LLMResult
import langchain
from langchain_community.cache import UpstashRedisCache
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM

View File

@@ -1,9 +1,9 @@
"""Integration test for Dall-E image generator agent."""
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.load_tools import load_tools
from langchain_community.llms import OpenAI
from langchain.agents import AgentType, initialize_agent
def test_call() -> None:
"""Test that the agent runs and returns output."""

View File

@@ -1,12 +1,12 @@
"""Test Graph Database Chain."""
import os
from langchain.chains.loading import load_chain
from langchain_community.chains.graph_qa.cypher import GraphCypherQAChain
from langchain_community.graphs import Neo4jGraph
from langchain_community.llms.openai import OpenAI
from langchain.chains.loading import load_chain
def test_connect_neo4j() -> None:
"""Test that Neo4j database is correctly instantiated and connected."""

View File

@@ -3,11 +3,11 @@ import pathlib
import re
from unittest.mock import MagicMock, Mock
from langchain.chains import LLMChain
from langchain_community.chains.graph_qa.sparql import GraphSparqlQAChain
from langchain_community.graphs import RdfGraph
from langchain.chains import LLMChain
"""
cd libs/langchain/tests/integration_tests/chains/docker-compose-ontotext-graphdb
./start.sh

View File

@@ -1,11 +1,11 @@
from unittest.mock import MagicMock, Mock
import pytest
from langchain.chains import LLMChain
from langchain_community.chains.graph_qa.ontotext_graphdb import OntotextGraphDBQAChain
from langchain_community.graphs import OntotextGraphDBGraph
from langchain.chains import LLMChain
"""
cd libs/langchain/tests/integration_tests/chains/docker-compose-ontotext-graphdb
./start.sh

View File

@@ -1,10 +1,10 @@
"""Integration test for self ask with search."""
from langchain.agents.react.base import ReActChain
from langchain_community.docstore import Wikipedia
from langchain_community.llms.openai import OpenAI
from langchain.agents.react.base import ReActChain
def test_react() -> None:
"""Test functionality on a prompt."""

View File

@@ -1,14 +1,14 @@
"""Test RetrievalQA functionality."""
from pathlib import Path
from langchain.chains import RetrievalQA
from langchain.chains.loading import load_chain
from langchain_text_splitters.character import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.llms import OpenAI
from langchain_community.vectorstores import FAISS
from langchain_text_splitters.character import CharacterTextSplitter
from langchain.chains import RetrievalQA
from langchain.chains.loading import load_chain
def test_retrieval_qa_saving_loading(tmp_path: Path) -> None:

View File

@@ -1,12 +1,12 @@
"""Test RetrievalQA functionality."""
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chains.loading import load_chain
from langchain_text_splitters.character import CharacterTextSplitter
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.llms import OpenAI
from langchain_community.vectorstores import FAISS
from langchain_text_splitters.character import CharacterTextSplitter
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chains.loading import load_chain
def test_retrieval_qa_with_sources_chain_saving_loading(tmp_path: str) -> None:

View File

@@ -1,9 +1,9 @@
"""Integration test for self ask with search."""
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchChain
from langchain_community.llms.openai import OpenAI
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchChain
def test_self_ask_with_search() -> None:
"""Test functionality on a prompt."""

View File

@@ -2,13 +2,13 @@ import os
from typing import AsyncIterable, Iterable
import pytest
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import AIMessage, HumanMessage
from langchain_community.chat_message_histories.astradb import (
AstraDBChatMessageHistory,
)
from langchain_community.utilities.astradb import SetupMode
from langchain_core.messages import AIMessage, HumanMessage
from langchain.memory import ConversationBufferMemory
def _has_env_vars() -> bool:

View File

@@ -2,12 +2,12 @@ import os
import time
from typing import Optional
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import AIMessage, HumanMessage
from langchain_community.chat_message_histories.cassandra import (
CassandraChatMessageHistory,
)
from langchain_core.messages import AIMessage, HumanMessage
from langchain.memory import ConversationBufferMemory
def _chat_message_history(

View File

@@ -1,10 +1,10 @@
import json
import os
from langchain_community.chat_message_histories import CosmosDBChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import CosmosDBChatMessageHistory
# Replace these with your Azure Cosmos DB endpoint and key
endpoint = os.environ.get("COSMOS_DB_ENDPOINT", "")

View File

@@ -4,10 +4,10 @@ import uuid
from typing import Generator, Union
import pytest
from langchain_community.chat_message_histories import ElasticsearchChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import ElasticsearchChatMessageHistory
"""
cd tests/integration_tests/memory/docker-compose

View File

@@ -1,9 +1,9 @@
import json
from langchain_community.chat_message_histories import FirestoreChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import FirestoreChatMessageHistory
def test_memory_with_message_store() -> None:

View File

@@ -10,10 +10,10 @@ from datetime import timedelta
from typing import Iterator
import pytest
from langchain_community.chat_message_histories import MomentoChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import MomentoChatMessageHistory
def random_string() -> str:

View File

@@ -1,10 +1,10 @@
import json
import os
from langchain_community.chat_message_histories import MongoDBChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import MongoDBChatMessageHistory
# Replace these with your mongodb connection string
connection_string = os.environ.get("MONGODB_CONNECTION_STRING", "")

View File

@@ -1,9 +1,9 @@
import json
from langchain_community.chat_message_histories import Neo4jChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import Neo4jChatMessageHistory
def test_memory_with_message_store() -> None:

View File

@@ -1,9 +1,9 @@
import json
from langchain_community.chat_message_histories import RedisChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import RedisChatMessageHistory
def test_memory_with_message_store() -> None:

View File

@@ -8,10 +8,10 @@ and ROCKSET_REGION environment variables set.
import json
import os
from langchain_community.chat_message_histories import RocksetChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import RocksetChatMessageHistory
collection_name = "langchain_demo"
session_id = "MySession"

View File

@@ -1,9 +1,9 @@
import json
from langchain_community.chat_message_histories import SingleStoreDBChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import SingleStoreDBChatMessageHistory
# Replace these with your mongodb connection string
TEST_SINGLESTOREDB_URL = "root:pass@localhost:3306/db"

View File

@@ -1,12 +1,12 @@
import json
import pytest
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import message_to_dict
from langchain_community.chat_message_histories.upstash_redis import (
UpstashRedisChatMessageHistory,
)
from langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
URL = "<UPSTASH_REDIS_REST_URL>"
TOKEN = "<UPSTASH_REDIS_REST_TOKEN>"

View File

@@ -6,10 +6,10 @@ Before running this test, please create a Xata database.
import json
import os
from langchain_community.chat_message_histories import XataChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import XataChatMessageHistory
class TestXata:

View File

@@ -1,11 +1,12 @@
"""Test functionality related to ngram overlap based selector."""
import pytest
from langchain_core.prompts import PromptTemplate
from langchain_community.example_selectors import (
NGramOverlapExampleSelector,
ngram_overlap_score,
)
from langchain_core.prompts import PromptTemplate
EXAMPLES = [
{"input": "See Spot run.", "output": "foo1"},

View File

@@ -1,13 +1,13 @@
"""Integration test for compression pipelines."""
from langchain_community.document_transformers import EmbeddingsRedundantFilter
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_core.documents import Document
from langchain_text_splitters.character import CharacterTextSplitter
from langchain.retrievers.document_compressors import (
DocumentCompressorPipeline,
EmbeddingsFilter,
)
from langchain_core.documents import Document
from langchain_text_splitters.character import CharacterTextSplitter
from langchain_community.document_transformers import EmbeddingsRedundantFilter
from langchain_community.embeddings import OpenAIEmbeddings
def test_document_compressor_pipeline() -> None:

View File

@@ -1,8 +1,8 @@
"""Integration test for LLMChainExtractor."""
from langchain_community.chat_models import ChatOpenAI
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_core.documents import Document
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_community.chat_models import ChatOpenAI
def test_llm_construction_with_kwargs() -> None:

View File

@@ -1,8 +1,8 @@
"""Integration test for llm-based relevant doc filtering."""
from langchain_community.chat_models import ChatOpenAI
from langchain.retrievers.document_compressors import LLMChainFilter
from langchain_core.documents import Document
from langchain.retrievers.document_compressors import LLMChainFilter
from langchain_community.chat_models import ChatOpenAI
def test_llm_chain_filter() -> None:

View File

@@ -1,12 +1,12 @@
"""Integration test for embedding-based relevant doc filtering."""
import numpy as np
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_core.documents import Document
from langchain_community.document_transformers.embeddings_redundant_filter import (
_DocumentWithState,
)
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_core.documents import Document
from langchain.retrievers.document_compressors import EmbeddingsFilter
def test_embeddings_filter() -> None:

View File

@@ -1,9 +1,9 @@
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
def test_contextual_compression_retriever_get_relevant_docs() -> None:
"""Test get_relevant_docs."""

View File

@@ -1,8 +1,8 @@
from langchain.retrievers.merger_retriever import MergerRetriever
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.retrievers.merger_retriever import MergerRetriever
def test_merger_retriever_get_relevant_docs() -> None:
"""Test get_relevant_docs."""

View File

@@ -2,19 +2,19 @@ from typing import Iterator, List, Optional
from uuid import uuid4
import pytest
from langchain_community.chat_models import ChatOpenAI
from langchain_community.llms.openai import OpenAI
from langchain.chains.llm import LLMChain
from langchain.evaluation import EvaluatorType
from langchain.smith import RunEvalConfig, run_on_dataset
from langchain.smith.evaluation import InputFormatError
from langchain.smith.evaluation.runner_utils import arun_on_dataset
from langchain_core.messages import BaseMessage, HumanMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langsmith import Client as Client
from langsmith.evaluation import run_evaluator
from langsmith.schemas import DataType, Example, Run
from langchain.chains.llm import LLMChain
from langchain.evaluation import EvaluatorType
from langchain.smith import RunEvalConfig, run_on_dataset
from langchain.smith.evaluation import InputFormatError
from langchain.smith.evaluation.runner_utils import arun_on_dataset
from langchain_community.chat_models import ChatOpenAI
from langchain_community.llms.openai import OpenAI
def _check_all_feedback_passed(_project_name: str, client: Client) -> None:

View File

@@ -1,12 +1,13 @@
"""Integration test for embedding-based redundant doc filtering."""
from langchain_core.documents import Document
from langchain_community.document_transformers.embeddings_redundant_filter import (
EmbeddingsClusteringFilter,
EmbeddingsRedundantFilter,
_DocumentWithState,
)
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_core.documents import Document
def test_embeddings_redundant_filter() -> None:

View File

@@ -3,11 +3,12 @@ import json
from typing import Any
from unittest import mock
from langchain_core.documents import Document
from langchain_community.document_transformers.nuclia_text_transform import (
NucliaTextTransformer,
)
from langchain_community.tools.nuclia.tool import NucliaUnderstandingAPI
from langchain_core.documents import Document
def fakerun(**args: Any) -> Any:

View File

@@ -4,6 +4,7 @@ from typing import Dict, Generator, List, Union
import pytest
from _pytest.fixtures import FixtureRequest
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.caches import InMemoryCache
from langchain_core.language_models import FakeListChatModel, FakeListLLM
from langchain_core.language_models.chat_models import BaseChatModel
@@ -14,8 +15,6 @@ from langchain_core.outputs import ChatGeneration, Generation
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from langchain.globals import get_llm_cache, set_llm_cache
pytest.importorskip("langchain_community")
from langchain_community.cache import SQLAlchemyCache # noqa: E402