mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-07 20:15:40 +00:00
Use pytest asyncio auto mode (#13643)
<!-- Thank you for contributing to LangChain! Replace this entire comment with: - **Description:** a description of the change, - **Issue:** the issue # it fixes (if applicable), - **Dependencies:** any dependencies required for this change, - **Tag maintainer:** for a quicker response, tag the relevant maintainer (see below), - **Twitter handle:** we announce bigger features on Twitter. If your PR gets announced, and you'd like a mention, we'll gladly shout you out! Please make sure your PR is passing linting and testing before submitting. Run `make format`, `make lint` and `make test` to check this locally. See contribution guidelines for more information on how to write/run tests, lint, etc: https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md If you're adding a new integration, please include: 1. a test for the integration, preferably unit tests that do not rely on network access, 2. an example notebook showing its use. It lives in `docs/extras` directory. If no one reviews your PR within a few days, please @-mention one of @baskaryan, @eyurtsev, @hwchase17. -->
This commit is contained in:
parent
611e1e0ca4
commit
8329f81072
@ -92,3 +92,4 @@ markers = [
|
|||||||
"asyncio: mark tests as requiring asyncio",
|
"asyncio: mark tests as requiring asyncio",
|
||||||
"compile: mark placeholder test used to compile integration tests without running them",
|
"compile: mark placeholder test used to compile integration tests without running them",
|
||||||
]
|
]
|
||||||
|
asyncio_mode = "auto"
|
||||||
|
@ -89,7 +89,6 @@ def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator) ->
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("openai")
|
@pytest.mark.requires("openai")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_agenerate_synthetic(
|
async def test_agenerate_synthetic(
|
||||||
synthetic_data_generator: SyntheticDataGenerator,
|
synthetic_data_generator: SyntheticDataGenerator,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -68,7 +68,6 @@ def test_configured_system_message(
|
|||||||
assert actual == expected
|
assert actual == expected
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_configured_system_message_async(
|
async def test_configured_system_message_async(
|
||||||
model_cfg_sys_msg: Llama2Chat,
|
model_cfg_sys_msg: Llama2Chat,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -423,6 +423,7 @@ markers = [
|
|||||||
"scheduled: mark tests to run in scheduled testing",
|
"scheduled: mark tests to run in scheduled testing",
|
||||||
"compile: mark placeholder test used to compile integration tests without running them"
|
"compile: mark placeholder test used to compile integration tests without running them"
|
||||||
]
|
]
|
||||||
|
asyncio_mode = "auto"
|
||||||
|
|
||||||
[tool.codespell]
|
[tool.codespell]
|
||||||
skip = '.git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package-lock.json,example_data,_dist,examples'
|
skip = '.git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package-lock.json,example_data,_dist,examples'
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.adapters import openai as lcopenai
|
from langchain.adapters import openai as lcopenai
|
||||||
|
|
||||||
|
|
||||||
@ -83,7 +81,6 @@ async def _test_module(**kwargs: Any) -> None:
|
|||||||
await _test_astream(stream=True, **kwargs)
|
await _test_astream(stream=True, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_normal_call() -> None:
|
async def test_normal_call() -> None:
|
||||||
await _test_module(
|
await _test_module(
|
||||||
messages=[{"role": "user", "content": "hi"}],
|
messages=[{"role": "user", "content": "hi"}],
|
||||||
@ -92,7 +89,6 @@ async def test_normal_call() -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_function_calling() -> None:
|
async def test_function_calling() -> None:
|
||||||
await _test_module(
|
await _test_module(
|
||||||
messages=[{"role": "user", "content": "whats the weather in boston"}],
|
messages=[{"role": "user", "content": "whats the weather in boston"}],
|
||||||
@ -102,7 +98,6 @@ async def test_function_calling() -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_answer_with_function_calling() -> None:
|
async def test_answer_with_function_calling() -> None:
|
||||||
await _test_module(
|
await _test_module(
|
||||||
messages=[
|
messages=[
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import pytest
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
from langchain_core.prompts import PromptTemplate
|
from langchain_core.prompts import PromptTemplate
|
||||||
|
|
||||||
@ -66,7 +65,6 @@ def test_tracing_session_env_var() -> None:
|
|||||||
del os.environ["LANGCHAIN_SESSION"]
|
del os.environ["LANGCHAIN_SESSION"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_tracing_concurrent() -> None:
|
async def test_tracing_concurrent() -> None:
|
||||||
os.environ["LANGCHAIN_TRACING"] = "true"
|
os.environ["LANGCHAIN_TRACING"] = "true"
|
||||||
aiosession = ClientSession()
|
aiosession = ClientSession()
|
||||||
@ -80,7 +78,6 @@ async def test_tracing_concurrent() -> None:
|
|||||||
await aiosession.close()
|
await aiosession.close()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_tracing_concurrent_bw_compat_environ() -> None:
|
async def test_tracing_concurrent_bw_compat_environ() -> None:
|
||||||
os.environ["LANGCHAIN_HANDLER"] = "langchain"
|
os.environ["LANGCHAIN_HANDLER"] = "langchain"
|
||||||
if "LANGCHAIN_TRACING" in os.environ:
|
if "LANGCHAIN_TRACING" in os.environ:
|
||||||
@ -113,7 +110,6 @@ def test_tracing_context_manager() -> None:
|
|||||||
agent.run(questions[0]) # this should not be traced
|
agent.run(questions[0]) # this should not be traced
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_tracing_context_manager_async() -> None:
|
async def test_tracing_context_manager_async() -> None:
|
||||||
llm = OpenAI(temperature=0)
|
llm = OpenAI(temperature=0)
|
||||||
async_tools = load_tools(["llm-math", "serpapi"], llm=llm)
|
async_tools = load_tools(["llm-math", "serpapi"], llm=llm)
|
||||||
@ -133,7 +129,6 @@ async def test_tracing_context_manager_async() -> None:
|
|||||||
await task
|
await task
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_tracing_v2_environment_variable() -> None:
|
async def test_tracing_v2_environment_variable() -> None:
|
||||||
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
||||||
|
|
||||||
@ -196,7 +191,6 @@ def test_tracing_v2_agent_with_metadata() -> None:
|
|||||||
chat_agent.run(questions[0], tags=["a-tag"], metadata={"a": "b", "c": "d"})
|
chat_agent.run(questions[0], tags=["a-tag"], metadata={"a": "b", "c": "d"})
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_tracing_v2_async_agent_with_metadata() -> None:
|
async def test_tracing_v2_async_agent_with_metadata() -> None:
|
||||||
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
||||||
llm = OpenAI(temperature=0, metadata={"f": "g", "h": "i"})
|
llm = OpenAI(temperature=0, metadata={"f": "g", "h": "i"})
|
||||||
@ -256,7 +250,6 @@ def test_trace_as_group_with_env_set() -> None:
|
|||||||
group_manager.on_chain_end({"output": final_res})
|
group_manager.on_chain_end({"output": final_res})
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_trace_as_group_async() -> None:
|
async def test_trace_as_group_async() -> None:
|
||||||
llm = OpenAI(temperature=0.9)
|
llm = OpenAI(temperature=0.9)
|
||||||
prompt = PromptTemplate(
|
prompt = PromptTemplate(
|
||||||
|
@ -1,14 +1,11 @@
|
|||||||
"""Integration tests for the langchain tracer module."""
|
"""Integration tests for the langchain tracer module."""
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||||
from langchain.callbacks import get_openai_callback
|
from langchain.callbacks import get_openai_callback
|
||||||
from langchain.llms import OpenAI
|
from langchain.llms import OpenAI
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_callback() -> None:
|
async def test_openai_callback() -> None:
|
||||||
llm = OpenAI(temperature=0)
|
llm = OpenAI(temperature=0)
|
||||||
with get_openai_callback() as cb:
|
with get_openai_callback() as cb:
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import pytest
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||||
@ -60,7 +59,6 @@ def test_tracing_session_env_var() -> None:
|
|||||||
agent.run(questions[0])
|
agent.run(questions[0])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_tracing_concurrent() -> None:
|
async def test_tracing_concurrent() -> None:
|
||||||
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
|
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
|
||||||
aiosession = ClientSession()
|
aiosession = ClientSession()
|
||||||
@ -95,7 +93,6 @@ def test_tracing_context_manager() -> None:
|
|||||||
agent.run(questions[0]) # this should not be traced
|
agent.run(questions[0]) # this should not be traced
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_tracing_context_manager_async() -> None:
|
async def test_tracing_context_manager_async() -> None:
|
||||||
llm = OpenAI(temperature=0)
|
llm = OpenAI(temperature=0)
|
||||||
async_tools = load_tools(
|
async_tools = load_tools(
|
||||||
|
@ -66,7 +66,6 @@ def test_anthropic_streaming_callback() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_anthropic_async_streaming_callback() -> None:
|
async def test_anthropic_async_streaming_callback() -> None:
|
||||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||||
callback_handler = FakeCallbackHandler()
|
callback_handler = FakeCallbackHandler()
|
||||||
|
@ -122,7 +122,6 @@ def test_chat_openai_streaming_generation_info() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_chat_openai() -> None:
|
async def test_async_chat_openai() -> None:
|
||||||
"""Test async generation."""
|
"""Test async generation."""
|
||||||
chat = _get_llm(max_tokens=10, n=2)
|
chat = _get_llm(max_tokens=10, n=2)
|
||||||
@ -139,7 +138,6 @@ async def test_async_chat_openai() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_chat_openai_streaming() -> None:
|
async def test_async_chat_openai_streaming() -> None:
|
||||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||||
callback_handler = FakeCallbackHandler()
|
callback_handler = FakeCallbackHandler()
|
||||||
@ -173,7 +171,6 @@ def test_openai_streaming(llm: AzureChatOpenAI) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_astream(llm: AzureChatOpenAI) -> None:
|
async def test_openai_astream(llm: AzureChatOpenAI) -> None:
|
||||||
"""Test streaming tokens from OpenAI."""
|
"""Test streaming tokens from OpenAI."""
|
||||||
async for token in llm.astream("I'm Pickle Rick"):
|
async for token in llm.astream("I'm Pickle Rick"):
|
||||||
@ -181,7 +178,6 @@ async def test_openai_astream(llm: AzureChatOpenAI) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_abatch(llm: AzureChatOpenAI) -> None:
|
async def test_openai_abatch(llm: AzureChatOpenAI) -> None:
|
||||||
"""Test streaming tokens from AzureChatOpenAI."""
|
"""Test streaming tokens from AzureChatOpenAI."""
|
||||||
|
|
||||||
@ -191,7 +187,6 @@ async def test_openai_abatch(llm: AzureChatOpenAI) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_abatch_tags(llm: AzureChatOpenAI) -> None:
|
async def test_openai_abatch_tags(llm: AzureChatOpenAI) -> None:
|
||||||
"""Test batch tokens from AzureChatOpenAI."""
|
"""Test batch tokens from AzureChatOpenAI."""
|
||||||
|
|
||||||
@ -212,7 +207,6 @@ def test_openai_batch(llm: AzureChatOpenAI) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_ainvoke(llm: AzureChatOpenAI) -> None:
|
async def test_openai_ainvoke(llm: AzureChatOpenAI) -> None:
|
||||||
"""Test invoke tokens from AzureChatOpenAI."""
|
"""Test invoke tokens from AzureChatOpenAI."""
|
||||||
|
|
||||||
|
@ -92,7 +92,6 @@ def test_bedrock_streaming(chat: BedrockChat) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_bedrock_astream(chat: BedrockChat) -> None:
|
async def test_bedrock_astream(chat: BedrockChat) -> None:
|
||||||
"""Test streaming tokens from OpenAI."""
|
"""Test streaming tokens from OpenAI."""
|
||||||
|
|
||||||
@ -101,7 +100,6 @@ async def test_bedrock_astream(chat: BedrockChat) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_bedrock_abatch(chat: BedrockChat) -> None:
|
async def test_bedrock_abatch(chat: BedrockChat) -> None:
|
||||||
"""Test streaming tokens from BedrockChat."""
|
"""Test streaming tokens from BedrockChat."""
|
||||||
result = await chat.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
result = await chat.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
||||||
@ -110,7 +108,6 @@ async def test_bedrock_abatch(chat: BedrockChat) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_bedrock_abatch_tags(chat: BedrockChat) -> None:
|
async def test_bedrock_abatch_tags(chat: BedrockChat) -> None:
|
||||||
"""Test batch tokens from BedrockChat."""
|
"""Test batch tokens from BedrockChat."""
|
||||||
result = await chat.abatch(
|
result = await chat.abatch(
|
||||||
@ -129,7 +126,6 @@ def test_bedrock_batch(chat: BedrockChat) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_bedrock_ainvoke(chat: BedrockChat) -> None:
|
async def test_bedrock_ainvoke(chat: BedrockChat) -> None:
|
||||||
"""Test invoke tokens from BedrockChat."""
|
"""Test invoke tokens from BedrockChat."""
|
||||||
result = await chat.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
result = await chat.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
||||||
|
@ -90,7 +90,6 @@ def test_fireworks_invoke(chat: ChatFireworks) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_fireworks_ainvoke(chat: ChatFireworks) -> None:
|
async def test_fireworks_ainvoke(chat: ChatFireworks) -> None:
|
||||||
"""Tests chat completion with invoke"""
|
"""Tests chat completion with invoke"""
|
||||||
result = await chat.ainvoke("How is the weather in New York today?", stop=[","])
|
result = await chat.ainvoke("How is the weather in New York today?", stop=[","])
|
||||||
@ -119,7 +118,6 @@ def test_fireworks_batch(chat: ChatFireworks) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_fireworks_abatch(chat: ChatFireworks) -> None:
|
async def test_fireworks_abatch(chat: ChatFireworks) -> None:
|
||||||
"""Test batch tokens from ChatFireworks."""
|
"""Test batch tokens from ChatFireworks."""
|
||||||
result = await chat.abatch(
|
result = await chat.abatch(
|
||||||
@ -159,7 +157,6 @@ def test_fireworks_streaming_stop_words(chat: ChatFireworks) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_chat_fireworks_agenerate() -> None:
|
async def test_chat_fireworks_agenerate() -> None:
|
||||||
"""Test ChatFireworks wrapper with generate."""
|
"""Test ChatFireworks wrapper with generate."""
|
||||||
chat = ChatFireworks(model_kwargs={"n": 2})
|
chat = ChatFireworks(model_kwargs={"n": 2})
|
||||||
@ -176,7 +173,6 @@ async def test_chat_fireworks_agenerate() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_fireworks_astream(chat: ChatFireworks) -> None:
|
async def test_fireworks_astream(chat: ChatFireworks) -> None:
|
||||||
"""Test streaming tokens from Fireworks."""
|
"""Test streaming tokens from Fireworks."""
|
||||||
|
|
||||||
|
@ -4,7 +4,6 @@ Note: This test must be run with the GOOGLE_API_KEY environment variable set to
|
|||||||
valid API key.
|
valid API key.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import pytest
|
|
||||||
from langchain_core.schema import (
|
from langchain_core.schema import (
|
||||||
ChatGeneration,
|
ChatGeneration,
|
||||||
ChatResult,
|
ChatResult,
|
||||||
@ -63,7 +62,6 @@ def test_chat_google_palm_multiple_completions() -> None:
|
|||||||
assert isinstance(generation.message.content, str)
|
assert isinstance(generation.message.content, str)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_chat_google_palm() -> None:
|
async def test_async_chat_google_palm() -> None:
|
||||||
"""Test async generation."""
|
"""Test async generation."""
|
||||||
chat = ChatGooglePalm(n=2, temperature=1.0)
|
chat = ChatGooglePalm(n=2, temperature=1.0)
|
||||||
|
@ -66,7 +66,6 @@ def test_jinachat_streaming() -> None:
|
|||||||
assert isinstance(response, BaseMessage)
|
assert isinstance(response, BaseMessage)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_jinachat() -> None:
|
async def test_async_jinachat() -> None:
|
||||||
"""Test async generation."""
|
"""Test async generation."""
|
||||||
chat = JinaChat(max_tokens=102)
|
chat = JinaChat(max_tokens=102)
|
||||||
@ -82,7 +81,6 @@ async def test_async_jinachat() -> None:
|
|||||||
assert generation.text == generation.message.content
|
assert generation.text == generation.message.content
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_jinachat_streaming() -> None:
|
async def test_async_jinachat_streaming() -> None:
|
||||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||||
callback_handler = FakeCallbackHandler()
|
callback_handler = FakeCallbackHandler()
|
||||||
|
@ -169,7 +169,6 @@ def test_chat_openai_invalid_streaming_params() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_chat_openai() -> None:
|
async def test_async_chat_openai() -> None:
|
||||||
"""Test async generation."""
|
"""Test async generation."""
|
||||||
chat = ChatOpenAI(max_tokens=10, n=2)
|
chat = ChatOpenAI(max_tokens=10, n=2)
|
||||||
@ -188,7 +187,6 @@ async def test_async_chat_openai() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_chat_openai_streaming() -> None:
|
async def test_async_chat_openai_streaming() -> None:
|
||||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||||
callback_handler = FakeCallbackHandler()
|
callback_handler = FakeCallbackHandler()
|
||||||
@ -214,7 +212,6 @@ async def test_async_chat_openai_streaming() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_chat_openai_streaming_with_function() -> None:
|
async def test_async_chat_openai_streaming_with_function() -> None:
|
||||||
"""Test ChatOpenAI wrapper with multiple completions."""
|
"""Test ChatOpenAI wrapper with multiple completions."""
|
||||||
|
|
||||||
@ -316,7 +313,6 @@ async def test_async_chat_openai_streaming_with_function() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_chat_openai_bind_functions() -> None:
|
async def test_async_chat_openai_bind_functions() -> None:
|
||||||
"""Test ChatOpenAI wrapper with multiple completions."""
|
"""Test ChatOpenAI wrapper with multiple completions."""
|
||||||
|
|
||||||
@ -389,7 +385,6 @@ def test_openai_streaming() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_astream() -> None:
|
async def test_openai_astream() -> None:
|
||||||
"""Test streaming tokens from OpenAI."""
|
"""Test streaming tokens from OpenAI."""
|
||||||
llm = ChatOpenAI(max_tokens=10)
|
llm = ChatOpenAI(max_tokens=10)
|
||||||
@ -399,7 +394,6 @@ async def test_openai_astream() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_abatch() -> None:
|
async def test_openai_abatch() -> None:
|
||||||
"""Test streaming tokens from ChatOpenAI."""
|
"""Test streaming tokens from ChatOpenAI."""
|
||||||
llm = ChatOpenAI(max_tokens=10)
|
llm = ChatOpenAI(max_tokens=10)
|
||||||
@ -410,7 +404,6 @@ async def test_openai_abatch() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_abatch_tags() -> None:
|
async def test_openai_abatch_tags() -> None:
|
||||||
"""Test batch tokens from ChatOpenAI."""
|
"""Test batch tokens from ChatOpenAI."""
|
||||||
llm = ChatOpenAI(max_tokens=10)
|
llm = ChatOpenAI(max_tokens=10)
|
||||||
@ -433,7 +426,6 @@ def test_openai_batch() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_ainvoke() -> None:
|
async def test_openai_ainvoke() -> None:
|
||||||
"""Test invoke tokens from ChatOpenAI."""
|
"""Test invoke tokens from ChatOpenAI."""
|
||||||
llm = ChatOpenAI(max_tokens=10)
|
llm = ChatOpenAI(max_tokens=10)
|
||||||
|
@ -87,7 +87,6 @@ def test_promptlayer_chat_openai_invalid_streaming_params() -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_promptlayer_chat_openai() -> None:
|
async def test_async_promptlayer_chat_openai() -> None:
|
||||||
"""Test async generation."""
|
"""Test async generation."""
|
||||||
chat = PromptLayerChatOpenAI(max_tokens=10, n=2)
|
chat = PromptLayerChatOpenAI(max_tokens=10, n=2)
|
||||||
@ -103,7 +102,6 @@ async def test_async_promptlayer_chat_openai() -> None:
|
|||||||
assert generation.text == generation.message.content
|
assert generation.text == generation.message.content
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_promptlayer_chat_openai_streaming() -> None:
|
async def test_async_promptlayer_chat_openai_streaming() -> None:
|
||||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||||
callback_handler = FakeCallbackHandler()
|
callback_handler = FakeCallbackHandler()
|
||||||
|
@ -51,7 +51,6 @@ def test_candidates() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_vertexai_agenerate() -> None:
|
async def test_vertexai_agenerate() -> None:
|
||||||
model = ChatVertexAI(temperature=0)
|
model = ChatVertexAI(temperature=0)
|
||||||
message = HumanMessage(content="Hello")
|
message = HumanMessage(content="Hello")
|
||||||
|
@ -1,9 +1,6 @@
|
|||||||
import pytest as pytest
|
|
||||||
|
|
||||||
from langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader
|
from langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
def test_async_recursive_url_loader() -> None:
|
def test_async_recursive_url_loader() -> None:
|
||||||
url = "https://docs.python.org/3.9/"
|
url = "https://docs.python.org/3.9/"
|
||||||
loader = RecursiveUrlLoader(
|
loader = RecursiveUrlLoader(
|
||||||
@ -19,7 +16,6 @@ def test_async_recursive_url_loader() -> None:
|
|||||||
assert docs[0].page_content == "placeholder"
|
assert docs[0].page_content == "placeholder"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
def test_async_recursive_url_loader_deterministic() -> None:
|
def test_async_recursive_url_loader_deterministic() -> None:
|
||||||
url = "https://docs.python.org/3.9/"
|
url = "https://docs.python.org/3.9/"
|
||||||
loader = RecursiveUrlLoader(
|
loader = RecursiveUrlLoader(
|
||||||
@ -43,7 +39,6 @@ def test_sync_recursive_url_loader() -> None:
|
|||||||
assert docs[0].page_content == "placeholder"
|
assert docs[0].page_content == "placeholder"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
def test_sync_async_equivalent() -> None:
|
def test_sync_async_equivalent() -> None:
|
||||||
url = "https://docs.python.org/3.9/"
|
url = "https://docs.python.org/3.9/"
|
||||||
loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
|
loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
"""Tests for the Playwright URL loader"""
|
"""Tests for the Playwright URL loader"""
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.document_loaders import PlaywrightURLLoader
|
from langchain.document_loaders import PlaywrightURLLoader
|
||||||
from langchain.document_loaders.url_playwright import PlaywrightEvaluator
|
from langchain.document_loaders.url_playwright import PlaywrightEvaluator
|
||||||
|
|
||||||
@ -43,7 +41,6 @@ def test_playwright_url_loader() -> None:
|
|||||||
assert len(docs) > 0
|
assert len(docs) > 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_playwright_async_url_loader() -> None:
|
async def test_playwright_async_url_loader() -> None:
|
||||||
"""Test Playwright async URL loader."""
|
"""Test Playwright async URL loader."""
|
||||||
urls = [
|
urls = [
|
||||||
@ -76,7 +73,6 @@ def test_playwright_url_loader_with_custom_evaluator() -> None:
|
|||||||
assert docs[0].page_content == "test"
|
assert docs[0].page_content == "test"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_playwright_async_url_loader_with_custom_evaluator() -> None:
|
async def test_playwright_async_url_loader_with_custom_evaluator() -> None:
|
||||||
"""Test Playwright async URL loader with a custom evaluator."""
|
"""Test Playwright async URL loader with a custom evaluator."""
|
||||||
urls = ["https://www.youtube.com/watch?v=dQw4w9WgXcQ"]
|
urls = ["https://www.youtube.com/watch?v=dQw4w9WgXcQ"]
|
||||||
|
@ -49,7 +49,6 @@ def test_azure_openai_embedding_documents_chunk_size() -> None:
|
|||||||
assert all([len(out) == 1536 for out in output])
|
assert all([len(out) == 1536 for out in output])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_azure_openai_embedding_documents_async_multiple() -> None:
|
async def test_azure_openai_embedding_documents_async_multiple() -> None:
|
||||||
"""Test openai embeddings."""
|
"""Test openai embeddings."""
|
||||||
documents = ["foo bar", "bar foo", "foo"]
|
documents = ["foo bar", "bar foo", "foo"]
|
||||||
@ -70,7 +69,6 @@ def test_azure_openai_embedding_query() -> None:
|
|||||||
assert len(output) == 1536
|
assert len(output) == 1536
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_azure_openai_embedding_async_query() -> None:
|
async def test_azure_openai_embedding_async_query() -> None:
|
||||||
"""Test openai embeddings."""
|
"""Test openai embeddings."""
|
||||||
document = "foo bar"
|
document = "foo bar"
|
||||||
|
@ -38,7 +38,6 @@ def test_fastembed_embedding_query(model_name: str, max_length: int) -> None:
|
|||||||
assert len(output) == 384
|
assert len(output) == 384
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"model_name", ["sentence-transformers/all-MiniLM-L6-v2", "BAAI/bge-small-en-v1.5"]
|
"model_name", ["sentence-transformers/all-MiniLM-L6-v2", "BAAI/bge-small-en-v1.5"]
|
||||||
)
|
)
|
||||||
@ -61,7 +60,6 @@ async def test_fastembed_async_embedding_documents(
|
|||||||
assert len(output[0]) == 384
|
assert len(output[0]) == 384
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"model_name", ["sentence-transformers/all-MiniLM-L6-v2", "BAAI/bge-small-en-v1.5"]
|
"model_name", ["sentence-transformers/all-MiniLM-L6-v2", "BAAI/bge-small-en-v1.5"]
|
||||||
)
|
)
|
||||||
|
@ -29,7 +29,6 @@ def test_openai_embedding_documents_multiple() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_embedding_documents_async_multiple() -> None:
|
async def test_openai_embedding_documents_async_multiple() -> None:
|
||||||
"""Test openai embeddings."""
|
"""Test openai embeddings."""
|
||||||
documents = ["foo bar", "bar foo", "foo"]
|
documents = ["foo bar", "bar foo", "foo"]
|
||||||
@ -52,7 +51,6 @@ def test_openai_embedding_query() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_embedding_async_query() -> None:
|
async def test_openai_embedding_async_query() -> None:
|
||||||
"""Test openai embeddings."""
|
"""Test openai embeddings."""
|
||||||
document = "foo bar"
|
document = "foo bar"
|
||||||
|
@ -52,7 +52,6 @@ def test_anthropic_streaming_callback() -> None:
|
|||||||
assert callback_handler.llm_streams > 1
|
assert callback_handler.llm_streams > 1
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_anthropic_async_generate() -> None:
|
async def test_anthropic_async_generate() -> None:
|
||||||
"""Test async generate."""
|
"""Test async generate."""
|
||||||
llm = Anthropic()
|
llm = Anthropic()
|
||||||
@ -60,7 +59,6 @@ async def test_anthropic_async_generate() -> None:
|
|||||||
assert isinstance(output, LLMResult)
|
assert isinstance(output, LLMResult)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_anthropic_async_streaming_callback() -> None:
|
async def test_anthropic_async_streaming_callback() -> None:
|
||||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||||
callback_handler = FakeCallbackHandler()
|
callback_handler = FakeCallbackHandler()
|
||||||
|
@ -57,7 +57,6 @@ def test_openai_streaming(llm: AzureOpenAI) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_astream(llm: AzureOpenAI) -> None:
|
async def test_openai_astream(llm: AzureOpenAI) -> None:
|
||||||
"""Test streaming tokens from AzureOpenAI."""
|
"""Test streaming tokens from AzureOpenAI."""
|
||||||
async for token in llm.astream("I'm Pickle Rick"):
|
async for token in llm.astream("I'm Pickle Rick"):
|
||||||
@ -65,7 +64,6 @@ async def test_openai_astream(llm: AzureOpenAI) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_abatch(llm: AzureOpenAI) -> None:
|
async def test_openai_abatch(llm: AzureOpenAI) -> None:
|
||||||
"""Test streaming tokens from AzureOpenAI."""
|
"""Test streaming tokens from AzureOpenAI."""
|
||||||
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
||||||
@ -73,7 +71,6 @@ async def test_openai_abatch(llm: AzureOpenAI) -> None:
|
|||||||
assert isinstance(token, str)
|
assert isinstance(token, str)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_abatch_tags(llm: AzureOpenAI) -> None:
|
async def test_openai_abatch_tags(llm: AzureOpenAI) -> None:
|
||||||
"""Test streaming tokens from AzureOpenAI."""
|
"""Test streaming tokens from AzureOpenAI."""
|
||||||
result = await llm.abatch(
|
result = await llm.abatch(
|
||||||
@ -92,7 +89,6 @@ def test_openai_batch(llm: AzureOpenAI) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_ainvoke(llm: AzureOpenAI) -> None:
|
async def test_openai_ainvoke(llm: AzureOpenAI) -> None:
|
||||||
"""Test streaming tokens from AzureOpenAI."""
|
"""Test streaming tokens from AzureOpenAI."""
|
||||||
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
||||||
@ -157,7 +153,6 @@ def test_openai_streaming_callback() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_async_generate() -> None:
|
async def test_openai_async_generate() -> None:
|
||||||
"""Test async generation."""
|
"""Test async generation."""
|
||||||
llm = _get_llm(max_tokens=10)
|
llm = _get_llm(max_tokens=10)
|
||||||
@ -165,7 +160,6 @@ async def test_openai_async_generate() -> None:
|
|||||||
assert isinstance(output, LLMResult)
|
assert isinstance(output, LLMResult)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_async_streaming_callback() -> None:
|
async def test_openai_async_streaming_callback() -> None:
|
||||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||||
callback_handler = FakeCallbackHandler()
|
callback_handler = FakeCallbackHandler()
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
"""Test C Transformers wrapper."""
|
"""Test C Transformers wrapper."""
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.llms import CTransformers
|
from langchain.llms import CTransformers
|
||||||
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
||||||
@ -22,7 +21,6 @@ def test_ctransformers_call() -> None:
|
|||||||
assert 0 < callback_handler.llm_streams <= config["max_new_tokens"]
|
assert 0 < callback_handler.llm_streams <= config["max_new_tokens"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_ctransformers_async_inference() -> None:
|
async def test_ctransformers_async_inference() -> None:
|
||||||
config = {"max_new_tokens": 5}
|
config = {"max_new_tokens": 5}
|
||||||
callback_handler = FakeCallbackHandler()
|
callback_handler = FakeCallbackHandler()
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
"""Test DeepInfra API wrapper."""
|
"""Test DeepInfra API wrapper."""
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.llms.deepinfra import DeepInfra
|
from langchain.llms.deepinfra import DeepInfra
|
||||||
|
|
||||||
|
|
||||||
@ -11,7 +9,6 @@ def test_deepinfra_call() -> None:
|
|||||||
assert isinstance(output, str)
|
assert isinstance(output, str)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_deepinfra_acall() -> None:
|
async def test_deepinfra_acall() -> None:
|
||||||
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
|
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
|
||||||
output = await llm.apredict("What is 2 + 2?")
|
output = await llm.apredict("What is 2 + 2?")
|
||||||
@ -27,7 +24,6 @@ def test_deepinfra_stream() -> None:
|
|||||||
assert num_chunks > 0
|
assert num_chunks > 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_deepinfra_astream() -> None:
|
async def test_deepinfra_astream() -> None:
|
||||||
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
|
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
|
||||||
num_chunks = 0
|
num_chunks = 0
|
||||||
|
@ -61,7 +61,6 @@ def test_fireworks_invoke(llm: Fireworks) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_fireworks_ainvoke(llm: Fireworks) -> None:
|
async def test_fireworks_ainvoke(llm: Fireworks) -> None:
|
||||||
"""Tests completion with invoke"""
|
"""Tests completion with invoke"""
|
||||||
output = await llm.ainvoke("How is the weather in New York today?", stop=[","])
|
output = await llm.ainvoke("How is the weather in New York today?", stop=[","])
|
||||||
@ -89,7 +88,6 @@ def test_fireworks_batch(llm: Fireworks) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_fireworks_abatch(llm: Fireworks) -> None:
|
async def test_fireworks_abatch(llm: Fireworks) -> None:
|
||||||
"""Tests completion with invoke"""
|
"""Tests completion with invoke"""
|
||||||
output = await llm.abatch(
|
output = await llm.abatch(
|
||||||
@ -142,7 +140,6 @@ def test_fireworks_streaming_stop_words(llm: Fireworks) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_fireworks_streaming_async(llm: Fireworks) -> None:
|
async def test_fireworks_streaming_async(llm: Fireworks) -> None:
|
||||||
"""Test stream completion."""
|
"""Test stream completion."""
|
||||||
|
|
||||||
@ -156,7 +153,6 @@ async def test_fireworks_streaming_async(llm: Fireworks) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_fireworks_async_agenerate(llm: Fireworks) -> None:
|
async def test_fireworks_async_agenerate(llm: Fireworks) -> None:
|
||||||
"""Test async."""
|
"""Test async."""
|
||||||
output = await llm.agenerate(["What is the best city to live in California?"])
|
output = await llm.agenerate(["What is the best city to live in California?"])
|
||||||
@ -164,7 +160,6 @@ async def test_fireworks_async_agenerate(llm: Fireworks) -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_fireworks_multiple_prompts_async_agenerate(llm: Fireworks) -> None:
|
async def test_fireworks_multiple_prompts_async_agenerate(llm: Fireworks) -> None:
|
||||||
output = await llm.agenerate(
|
output = await llm.agenerate(
|
||||||
["How is the weather in New York today?", "I'm pickle rick"]
|
["How is the weather in New York today?", "I'm pickle rick"]
|
||||||
|
@ -100,7 +100,6 @@ def test_openai_streaming() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_astream() -> None:
|
async def test_openai_astream() -> None:
|
||||||
"""Test streaming tokens from OpenAI."""
|
"""Test streaming tokens from OpenAI."""
|
||||||
llm = OpenAI(max_tokens=10)
|
llm = OpenAI(max_tokens=10)
|
||||||
@ -110,7 +109,6 @@ async def test_openai_astream() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_abatch() -> None:
|
async def test_openai_abatch() -> None:
|
||||||
"""Test streaming tokens from OpenAI."""
|
"""Test streaming tokens from OpenAI."""
|
||||||
llm = OpenAI(max_tokens=10)
|
llm = OpenAI(max_tokens=10)
|
||||||
@ -120,7 +118,6 @@ async def test_openai_abatch() -> None:
|
|||||||
assert isinstance(token, str)
|
assert isinstance(token, str)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_abatch_tags() -> None:
|
async def test_openai_abatch_tags() -> None:
|
||||||
"""Test streaming tokens from OpenAI."""
|
"""Test streaming tokens from OpenAI."""
|
||||||
llm = OpenAI(max_tokens=10)
|
llm = OpenAI(max_tokens=10)
|
||||||
@ -143,7 +140,6 @@ def test_openai_batch() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_ainvoke() -> None:
|
async def test_openai_ainvoke() -> None:
|
||||||
"""Test streaming tokens from OpenAI."""
|
"""Test streaming tokens from OpenAI."""
|
||||||
llm = OpenAI(max_tokens=10)
|
llm = OpenAI(max_tokens=10)
|
||||||
@ -213,7 +209,6 @@ def test_openai_streaming_callback() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_async_generate() -> None:
|
async def test_openai_async_generate() -> None:
|
||||||
"""Test async generation."""
|
"""Test async generation."""
|
||||||
llm = OpenAI(max_tokens=10)
|
llm = OpenAI(max_tokens=10)
|
||||||
@ -221,7 +216,6 @@ async def test_openai_async_generate() -> None:
|
|||||||
assert isinstance(output, LLMResult)
|
assert isinstance(output, LLMResult)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_async_streaming_callback() -> None:
|
async def test_openai_async_streaming_callback() -> None:
|
||||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||||
callback_handler = FakeCallbackHandler()
|
callback_handler = FakeCallbackHandler()
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
"""Test Baidu Qianfan LLM Endpoint."""
|
"""Test Baidu Qianfan LLM Endpoint."""
|
||||||
from typing import Generator
|
from typing import Generator
|
||||||
|
|
||||||
import pytest
|
|
||||||
from langchain_core.schema import LLMResult
|
from langchain_core.schema import LLMResult
|
||||||
|
|
||||||
from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
|
from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
|
||||||
@ -29,7 +28,6 @@ def test_generate_stream() -> None:
|
|||||||
assert isinstance(output, Generator)
|
assert isinstance(output, Generator)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_qianfan_aio() -> None:
|
async def test_qianfan_aio() -> None:
|
||||||
llm = QianfanLLMEndpoint(streaming=True)
|
llm = QianfanLLMEndpoint(streaming=True)
|
||||||
|
|
||||||
|
@ -24,7 +24,6 @@ def test_together_call() -> None:
|
|||||||
assert isinstance(output, str)
|
assert isinstance(output, str)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_together_acall() -> None:
|
async def test_together_acall() -> None:
|
||||||
"""Test simple call to together."""
|
"""Test simple call to together."""
|
||||||
llm = Together(
|
llm = Together(
|
||||||
|
@ -49,7 +49,6 @@ def test_vertex_generate_code() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_vertex_agenerate() -> None:
|
async def test_vertex_agenerate() -> None:
|
||||||
llm = VertexAI(temperature=0)
|
llm = VertexAI(temperature=0)
|
||||||
output = await llm.agenerate(["Please say foo:"])
|
output = await llm.agenerate(["Please say foo:"])
|
||||||
@ -63,7 +62,6 @@ def test_vertex_stream() -> None:
|
|||||||
assert isinstance(outputs[0], str)
|
assert isinstance(outputs[0], str)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_vertex_consistency() -> None:
|
async def test_vertex_consistency() -> None:
|
||||||
llm = VertexAI(temperature=0)
|
llm = VertexAI(temperature=0)
|
||||||
output = llm.generate(["Please say foo:"])
|
output = llm.generate(["Please say foo:"])
|
||||||
@ -103,7 +101,6 @@ def test_model_garden_generate() -> None:
|
|||||||
assert len(output.generations) == 2
|
assert len(output.generations) == 2
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_model_garden_agenerate() -> None:
|
async def test_model_garden_agenerate() -> None:
|
||||||
endpoint_id = os.environ["ENDPOINT_ID"]
|
endpoint_id = os.environ["ENDPOINT_ID"]
|
||||||
project = os.environ["PROJECT"]
|
project = os.environ["PROJECT"]
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
"""Test Azure Cognitive Search wrapper."""
|
"""Test Azure Cognitive Search wrapper."""
|
||||||
import pytest
|
|
||||||
from langchain_core.schema import Document
|
from langchain_core.schema import Document
|
||||||
|
|
||||||
from langchain.retrievers.azure_cognitive_search import AzureCognitiveSearchRetriever
|
from langchain.retrievers.azure_cognitive_search import AzureCognitiveSearchRetriever
|
||||||
@ -18,7 +17,6 @@ def test_azure_cognitive_search_get_relevant_documents() -> None:
|
|||||||
assert len(documents) <= 1
|
assert len(documents) <= 1
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_azure_cognitive_search_aget_relevant_documents() -> None:
|
async def test_azure_cognitive_search_aget_relevant_documents() -> None:
|
||||||
"""Test valid async call to Azure Cognitive Search."""
|
"""Test valid async call to Azure Cognitive Search."""
|
||||||
retriever = AzureCognitiveSearchRetriever()
|
retriever = AzureCognitiveSearchRetriever()
|
||||||
|
@ -83,7 +83,6 @@ def test_zep_retriever_get_relevant_documents(
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("zep_python")
|
@pytest.mark.requires("zep_python")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_zep_retriever_aget_relevant_documents(
|
async def test_zep_retriever_aget_relevant_documents(
|
||||||
zep_retriever: ZepRetriever, search_results: List[MemorySearchResult]
|
zep_retriever: ZepRetriever, search_results: List[MemorySearchResult]
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -460,7 +460,6 @@ def test_chain_on_kv_singleio_dataset(
|
|||||||
_check_all_feedback_passed(eval_project_name, client)
|
_check_all_feedback_passed(eval_project_name, client)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_runnable_on_kv_singleio_dataset(
|
async def test_runnable_on_kv_singleio_dataset(
|
||||||
kv_singleio_dataset_name: str, eval_project_name: str, client: Client
|
kv_singleio_dataset_name: str, eval_project_name: str, client: Client
|
||||||
) -> None:
|
) -> None:
|
||||||
@ -480,7 +479,6 @@ async def test_runnable_on_kv_singleio_dataset(
|
|||||||
_check_all_feedback_passed(eval_project_name, client)
|
_check_all_feedback_passed(eval_project_name, client)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_arb_func_on_kv_singleio_dataset(
|
async def test_arb_func_on_kv_singleio_dataset(
|
||||||
kv_singleio_dataset_name: str, eval_project_name: str, client: Client
|
kv_singleio_dataset_name: str, eval_project_name: str, client: Client
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -3,7 +3,6 @@ import json
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
import pytest
|
|
||||||
from langchain_core.schema.document import Document
|
from langchain_core.schema.document import Document
|
||||||
|
|
||||||
from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer
|
from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer
|
||||||
@ -33,7 +32,6 @@ def fakerun(**args: Any) -> Any:
|
|||||||
return run
|
return run
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_nuclia_loader() -> None:
|
async def test_nuclia_loader() -> None:
|
||||||
with mock.patch(
|
with mock.patch(
|
||||||
"langchain.tools.nuclia.tool.NucliaUnderstandingAPI._arun", new_callable=fakerun
|
"langchain.tools.nuclia.tool.NucliaUnderstandingAPI._arun", new_callable=fakerun
|
||||||
|
@ -88,7 +88,6 @@ def test_nuclia_tool() -> None:
|
|||||||
assert json.loads(data)["uuid"] == "fake_uuid"
|
assert json.loads(data)["uuid"] == "fake_uuid"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("nucliadb_protos")
|
@pytest.mark.requires("nucliadb_protos")
|
||||||
async def test_async_call() -> None:
|
async def test_async_call() -> None:
|
||||||
with mock.patch(
|
with mock.patch(
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
"""Integration test for Dataforseo API Wrapper."""
|
"""Integration test for Dataforseo API Wrapper."""
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
|
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
|
||||||
|
|
||||||
|
|
||||||
@ -44,14 +42,12 @@ def test_events_call() -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_call() -> None:
|
async def test_async_call() -> None:
|
||||||
search = DataForSeoAPIWrapper()
|
search = DataForSeoAPIWrapper()
|
||||||
output = await search.arun("pi value")
|
output = await search.arun("pi value")
|
||||||
assert "3.14159" in output
|
assert "3.14159" in output
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_results() -> None:
|
async def test_async_results() -> None:
|
||||||
search = DataForSeoAPIWrapper(json_result_types=["answer_box"])
|
search = DataForSeoAPIWrapper(json_result_types=["answer_box"])
|
||||||
output = await search.aresults("New York timezone")
|
output = await search.aresults("New York timezone")
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
"""Integration test for Serper.dev's Google Search API Wrapper."""
|
"""Integration test for Serper.dev's Google Search API Wrapper."""
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
|
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
|
||||||
|
|
||||||
|
|
||||||
@ -25,7 +23,6 @@ async def test_results() -> None:
|
|||||||
assert "Barack Hussein Obama II" in output["answerBox"]["answer"]
|
assert "Barack Hussein Obama II" in output["answerBox"]["answer"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_call() -> None:
|
async def test_async_call() -> None:
|
||||||
"""Test that call gives the correct answer."""
|
"""Test that call gives the correct answer."""
|
||||||
search = GoogleSerperAPIWrapper()
|
search = GoogleSerperAPIWrapper()
|
||||||
@ -33,7 +30,6 @@ async def test_async_call() -> None:
|
|||||||
assert "Barack Hussein Obama II" in output
|
assert "Barack Hussein Obama II" in output
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_results() -> None:
|
async def test_async_results() -> None:
|
||||||
"""Test that call gives the correct answer."""
|
"""Test that call gives the correct answer."""
|
||||||
search = GoogleSerperAPIWrapper()
|
search = GoogleSerperAPIWrapper()
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
"""Integration tests for SearchApi"""
|
"""Integration tests for SearchApi"""
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.utilities.searchapi import SearchApiAPIWrapper
|
from langchain.utilities.searchapi import SearchApiAPIWrapper
|
||||||
|
|
||||||
|
|
||||||
@ -48,7 +46,6 @@ def test_jobs_call() -> None:
|
|||||||
assert "years of experience" in output
|
assert "years of experience" in output
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_call() -> None:
|
async def test_async_call() -> None:
|
||||||
"""Test that call gives the correct answer."""
|
"""Test that call gives the correct answer."""
|
||||||
search = SearchApiAPIWrapper()
|
search = SearchApiAPIWrapper()
|
||||||
@ -56,7 +53,6 @@ async def test_async_call() -> None:
|
|||||||
assert "Barack Hussein Obama II" in output
|
assert "Barack Hussein Obama II" in output
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_results() -> None:
|
async def test_async_results() -> None:
|
||||||
"""Test that call gives the correct answer."""
|
"""Test that call gives the correct answer."""
|
||||||
search = SearchApiAPIWrapper()
|
search = SearchApiAPIWrapper()
|
||||||
|
@ -12,7 +12,6 @@ from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import ( #
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("batch_size", [1, 64])
|
@pytest.mark.parametrize("batch_size", [1, 64])
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
async def test_qdrant_aadd_texts_returns_all_ids(
|
async def test_qdrant_aadd_texts_returns_all_ids(
|
||||||
@ -31,7 +30,6 @@ async def test_qdrant_aadd_texts_returns_all_ids(
|
|||||||
assert 3 == len(set(ids))
|
assert 3 == len(set(ids))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
async def test_qdrant_aadd_texts_stores_duplicated_texts(
|
async def test_qdrant_aadd_texts_stores_duplicated_texts(
|
||||||
@ -60,7 +58,6 @@ async def test_qdrant_aadd_texts_stores_duplicated_texts(
|
|||||||
assert 2 == client.count(collection_name).count
|
assert 2 == client.count(collection_name).count
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("batch_size", [1, 64])
|
@pytest.mark.parametrize("batch_size", [1, 64])
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
async def test_qdrant_aadd_texts_stores_ids(
|
async def test_qdrant_aadd_texts_stores_ids(
|
||||||
@ -93,7 +90,6 @@ async def test_qdrant_aadd_texts_stores_ids(
|
|||||||
assert set(ids) == set(stored_ids)
|
assert set(ids) == set(stored_ids)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("vector_name", ["custom-vector"])
|
@pytest.mark.parametrize("vector_name", ["custom-vector"])
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
async def test_qdrant_aadd_texts_stores_embeddings_as_named_vectors(
|
async def test_qdrant_aadd_texts_stores_embeddings_as_named_vectors(
|
||||||
|
@ -15,7 +15,6 @@ from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import (
|
|||||||
from tests.integration_tests.vectorstores.qdrant.common import qdrant_is_not_running
|
from tests.integration_tests.vectorstores.qdrant.common import qdrant_is_not_running
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
async def test_qdrant_from_texts_stores_duplicated_texts(qdrant_location: str) -> None:
|
async def test_qdrant_from_texts_stores_duplicated_texts(qdrant_location: str) -> None:
|
||||||
"""Test end to end Qdrant.afrom_texts stores duplicated texts separately."""
|
"""Test end to end Qdrant.afrom_texts stores duplicated texts separately."""
|
||||||
@ -32,7 +31,6 @@ async def test_qdrant_from_texts_stores_duplicated_texts(qdrant_location: str) -
|
|||||||
assert 2 == client.count(collection_name).count
|
assert 2 == client.count(collection_name).count
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("batch_size", [1, 64])
|
@pytest.mark.parametrize("batch_size", [1, 64])
|
||||||
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
@ -61,7 +59,6 @@ async def test_qdrant_from_texts_stores_ids(
|
|||||||
assert set(ids) == set(stored_ids)
|
assert set(ids) == set(stored_ids)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("vector_name", ["custom-vector"])
|
@pytest.mark.parametrize("vector_name", ["custom-vector"])
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
async def test_qdrant_from_texts_stores_embeddings_as_named_vectors(
|
async def test_qdrant_from_texts_stores_embeddings_as_named_vectors(
|
||||||
@ -87,7 +84,6 @@ async def test_qdrant_from_texts_stores_embeddings_as_named_vectors(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
|
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
|
||||||
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
|
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
|
||||||
async def test_qdrant_from_texts_reuses_same_collection(
|
async def test_qdrant_from_texts_reuses_same_collection(
|
||||||
@ -115,7 +111,6 @@ async def test_qdrant_from_texts_reuses_same_collection(
|
|||||||
assert 7 == client.count(collection_name).count
|
assert 7 == client.count(collection_name).count
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
|
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
|
||||||
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
|
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
|
||||||
async def test_qdrant_from_texts_raises_error_on_different_dimensionality(
|
async def test_qdrant_from_texts_raises_error_on_different_dimensionality(
|
||||||
@ -141,7 +136,6 @@ async def test_qdrant_from_texts_raises_error_on_different_dimensionality(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
["first_vector_name", "second_vector_name"],
|
["first_vector_name", "second_vector_name"],
|
||||||
[
|
[
|
||||||
@ -174,7 +168,6 @@ async def test_qdrant_from_texts_raises_error_on_different_vector_name(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
|
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
|
||||||
async def test_qdrant_from_texts_raises_error_on_different_distance() -> None:
|
async def test_qdrant_from_texts_raises_error_on_different_distance() -> None:
|
||||||
"""Test if Qdrant.afrom_texts raises an exception if distance does not match"""
|
"""Test if Qdrant.afrom_texts raises an exception if distance does not match"""
|
||||||
@ -196,7 +189,6 @@ async def test_qdrant_from_texts_raises_error_on_different_distance() -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
|
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
|
||||||
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
|
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
|
||||||
async def test_qdrant_from_texts_recreates_collection_on_force_recreate(
|
async def test_qdrant_from_texts_recreates_collection_on_force_recreate(
|
||||||
@ -230,7 +222,6 @@ async def test_qdrant_from_texts_recreates_collection_on_force_recreate(
|
|||||||
assert 5 == vector_params.size # type: ignore[union-attr]
|
assert 5 == vector_params.size # type: ignore[union-attr]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("batch_size", [1, 64])
|
@pytest.mark.parametrize("batch_size", [1, 64])
|
||||||
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
|
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
|
||||||
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
|
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
|
||||||
|
@ -12,7 +12,6 @@ from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("batch_size", [1, 64])
|
@pytest.mark.parametrize("batch_size", [1, 64])
|
||||||
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "test_content"])
|
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "test_content"])
|
||||||
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "test_metadata"])
|
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "test_metadata"])
|
||||||
|
@ -13,7 +13,6 @@ from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("batch_size", [1, 64])
|
@pytest.mark.parametrize("batch_size", [1, 64])
|
||||||
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
|
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
|
||||||
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
|
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
|
||||||
@ -41,7 +40,6 @@ async def test_qdrant_similarity_search(
|
|||||||
assert output == [Document(page_content="foo")]
|
assert output == [Document(page_content="foo")]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("batch_size", [1, 64])
|
@pytest.mark.parametrize("batch_size", [1, 64])
|
||||||
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
|
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
|
||||||
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
|
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
|
||||||
@ -70,7 +68,6 @@ async def test_qdrant_similarity_search_by_vector(
|
|||||||
assert output == [Document(page_content="foo")]
|
assert output == [Document(page_content="foo")]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("batch_size", [1, 64])
|
@pytest.mark.parametrize("batch_size", [1, 64])
|
||||||
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
|
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
|
||||||
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
|
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
|
||||||
@ -102,7 +99,6 @@ async def test_qdrant_similarity_search_with_score_by_vector(
|
|||||||
assert score >= 0
|
assert score >= 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("batch_size", [1, 64])
|
@pytest.mark.parametrize("batch_size", [1, 64])
|
||||||
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
@ -135,7 +131,6 @@ async def test_qdrant_similarity_search_filters(
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
async def test_qdrant_similarity_search_with_relevance_score_no_threshold(
|
async def test_qdrant_similarity_search_with_relevance_score_no_threshold(
|
||||||
@ -164,7 +159,6 @@ async def test_qdrant_similarity_search_with_relevance_score_no_threshold(
|
|||||||
assert round(output[i][1], 2) <= 1
|
assert round(output[i][1], 2) <= 1
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
async def test_qdrant_similarity_search_with_relevance_score_with_threshold(
|
async def test_qdrant_similarity_search_with_relevance_score_with_threshold(
|
||||||
@ -194,7 +188,6 @@ async def test_qdrant_similarity_search_with_relevance_score_with_threshold(
|
|||||||
assert all([score >= score_threshold for _, score in output])
|
assert all([score >= score_threshold for _, score in output])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
async def test_similarity_search_with_relevance_score_with_threshold_and_filter(
|
async def test_similarity_search_with_relevance_score_with_threshold_and_filter(
|
||||||
@ -230,7 +223,6 @@ async def test_similarity_search_with_relevance_score_with_threshold_and_filter(
|
|||||||
assert all([score >= score_threshold for _, score in output])
|
assert all([score >= score_threshold for _, score in output])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
|
||||||
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
|
||||||
async def test_qdrant_similarity_search_filters_with_qdrant_filters(
|
async def test_qdrant_similarity_search_filters_with_qdrant_filters(
|
||||||
@ -278,7 +270,6 @@ async def test_qdrant_similarity_search_filters_with_qdrant_filters(
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("batch_size", [1, 64])
|
@pytest.mark.parametrize("batch_size", [1, 64])
|
||||||
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
|
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
|
||||||
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
|
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
|
||||||
|
@ -23,7 +23,6 @@ def test_chroma() -> None:
|
|||||||
assert output == [Document(page_content="foo")]
|
assert output == [Document(page_content="foo")]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_chroma_async() -> None:
|
async def test_chroma_async() -> None:
|
||||||
"""Test end to end construction and search."""
|
"""Test end to end construction and search."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
"""Test ClickHouse functionality."""
|
"""Test ClickHouse functionality."""
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.docstore.document import Document
|
from langchain.docstore.document import Document
|
||||||
from langchain.vectorstores import Clickhouse, ClickhouseSettings
|
from langchain.vectorstores import Clickhouse, ClickhouseSettings
|
||||||
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
|
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
|
||||||
@ -17,7 +15,6 @@ def test_clickhouse() -> None:
|
|||||||
docsearch.drop()
|
docsearch.drop()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_clickhouse_async() -> None:
|
async def test_clickhouse_async() -> None:
|
||||||
"""Test end to end construction and search."""
|
"""Test end to end construction and search."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
|
@ -157,7 +157,6 @@ class TestElasticsearch:
|
|||||||
output = docsearch.similarity_search("foo", k=1, custom_query=assert_query)
|
output = docsearch.similarity_search("foo", k=1, custom_query=assert_query)
|
||||||
assert output == [Document(page_content="foo")]
|
assert output == [Document(page_content="foo")]
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_similarity_search_without_metadat_async(
|
async def test_similarity_search_without_metadat_async(
|
||||||
self, elasticsearch_connection: dict, index_name: str
|
self, elasticsearch_connection: dict, index_name: str
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
"""Test MyScale functionality."""
|
"""Test MyScale functionality."""
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.docstore.document import Document
|
from langchain.docstore.document import Document
|
||||||
from langchain.vectorstores import MyScale, MyScaleSettings
|
from langchain.vectorstores import MyScale, MyScaleSettings
|
||||||
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
|
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
|
||||||
@ -17,7 +15,6 @@ def test_myscale() -> None:
|
|||||||
docsearch.drop()
|
docsearch.drop()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_myscale_async() -> None:
|
async def test_myscale_async() -> None:
|
||||||
"""Test end to end construction and search."""
|
"""Test end to end construction and search."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
|
@ -3,8 +3,6 @@ import os
|
|||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.docstore.document import Document
|
from langchain.docstore.document import Document
|
||||||
from langchain.vectorstores.timescalevector import TimescaleVector
|
from langchain.vectorstores.timescalevector import TimescaleVector
|
||||||
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
|
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
|
||||||
@ -64,7 +62,6 @@ def test_timescalevector_from_documents() -> None:
|
|||||||
assert output == [Document(page_content="foo", metadata={"a": "b"})]
|
assert output == [Document(page_content="foo", metadata={"a": "b"})]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_timescalevector_afrom_documents() -> None:
|
async def test_timescalevector_afrom_documents() -> None:
|
||||||
"""Test end to end construction and search."""
|
"""Test end to end construction and search."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -96,7 +93,6 @@ def test_timescalevector_embeddings() -> None:
|
|||||||
assert output == [Document(page_content="foo")]
|
assert output == [Document(page_content="foo")]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_timescalevector_aembeddings() -> None:
|
async def test_timescalevector_aembeddings() -> None:
|
||||||
"""Test end to end construction with embeddings and search."""
|
"""Test end to end construction with embeddings and search."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -145,7 +141,6 @@ def test_timescalevector_with_metadatas_with_scores() -> None:
|
|||||||
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
|
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_timescalevector_awith_metadatas_with_scores() -> None:
|
async def test_timescalevector_awith_metadatas_with_scores() -> None:
|
||||||
"""Test end to end construction and search."""
|
"""Test end to end construction and search."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -254,7 +249,6 @@ def test_timescalevector_relevance_score() -> None:
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_timescalevector_relevance_score_async() -> None:
|
async def test_timescalevector_relevance_score_async() -> None:
|
||||||
"""Test to make sure the relevance score is scaled to 0-1."""
|
"""Test to make sure the relevance score is scaled to 0-1."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
|
@ -191,7 +191,6 @@ def test_add_documents(
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("zep_python")
|
@pytest.mark.requires("zep_python")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_asearch_similarity(
|
async def test_asearch_similarity(
|
||||||
zep_vectorstore: ZepVectorStore,
|
zep_vectorstore: ZepVectorStore,
|
||||||
) -> None:
|
) -> None:
|
||||||
@ -205,7 +204,6 @@ async def test_asearch_similarity(
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("zep_python")
|
@pytest.mark.requires("zep_python")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_asearch_mmr(
|
async def test_asearch_mmr(
|
||||||
zep_vectorstore: ZepVectorStore,
|
zep_vectorstore: ZepVectorStore,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -57,7 +57,6 @@ def test_agent_iterator_stopped_early() -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_agent_async_iterator_stopped_early() -> None:
|
async def test_agent_async_iterator_stopped_early() -> None:
|
||||||
"""
|
"""
|
||||||
Test react chain async iterator when max iterations or
|
Test react chain async iterator when max iterations or
|
||||||
@ -151,7 +150,6 @@ def test_agent_iterator_with_callbacks() -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_agent_async_iterator_with_callbacks() -> None:
|
async def test_agent_async_iterator_with_callbacks() -> None:
|
||||||
"""Test react chain async iterator with callbacks by setting verbose globally."""
|
"""Test react chain async iterator with callbacks by setting verbose globally."""
|
||||||
handler1 = FakeCallbackHandler()
|
handler1 = FakeCallbackHandler()
|
||||||
@ -281,7 +279,6 @@ def test_agent_iterator_output_structure() -> None:
|
|||||||
assert False, "Unexpected output structure"
|
assert False, "Unexpected output structure"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_agent_async_iterator_output_structure() -> None:
|
async def test_agent_async_iterator_output_structure() -> None:
|
||||||
"""Test the async output structure of AgentExecutorIterator."""
|
"""Test the async output structure of AgentExecutorIterator."""
|
||||||
agent = _get_agent()
|
agent = _get_agent()
|
||||||
|
@ -111,7 +111,6 @@ def test_callback_manager_with_async() -> None:
|
|||||||
_test_callback_manager(manager, handler1, handler2, handler3, handler4)
|
_test_callback_manager(manager, handler1, handler2, handler3, handler4)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_callback_manager_with_async_with_running_loop() -> None:
|
async def test_callback_manager_with_async_with_running_loop() -> None:
|
||||||
"""Test the CallbackManager."""
|
"""Test the CallbackManager."""
|
||||||
handler1 = FakeCallbackHandler()
|
handler1 = FakeCallbackHandler()
|
||||||
@ -187,7 +186,6 @@ def test_ignore_retriever() -> None:
|
|||||||
assert handler2.errors == 1
|
assert handler2.errors == 1
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_callback_manager() -> None:
|
async def test_async_callback_manager() -> None:
|
||||||
"""Test the AsyncCallbackManager."""
|
"""Test the AsyncCallbackManager."""
|
||||||
handler1 = FakeAsyncCallbackHandler()
|
handler1 = FakeAsyncCallbackHandler()
|
||||||
@ -196,7 +194,6 @@ async def test_async_callback_manager() -> None:
|
|||||||
await _test_callback_manager_async(manager, handler1, handler2)
|
await _test_callback_manager_async(manager, handler1, handler2)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_callback_manager_sync_handler() -> None:
|
async def test_async_callback_manager_sync_handler() -> None:
|
||||||
"""Test the AsyncCallbackManager."""
|
"""Test the AsyncCallbackManager."""
|
||||||
handler1 = FakeCallbackHandler()
|
handler1 = FakeCallbackHandler()
|
||||||
|
@ -180,7 +180,6 @@ def test_simple_sequential_functionality() -> None:
|
|||||||
assert output == expected_output
|
assert output == expected_output
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.parametrize("isAsync", [False, True])
|
@pytest.mark.parametrize("isAsync", [False, True])
|
||||||
async def test_simple_sequential_functionality_with_callbacks(isAsync: bool) -> None:
|
async def test_simple_sequential_functionality_with_callbacks(isAsync: bool) -> None:
|
||||||
"""Test simple sequential functionality."""
|
"""Test simple sequential functionality."""
|
||||||
|
@ -30,7 +30,6 @@ def expected_documents() -> List[Document]:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("motor")
|
@pytest.mark.requires("motor")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_load_mocked(expected_documents: List[Document]) -> None:
|
async def test_load_mocked(expected_documents: List[Document]) -> None:
|
||||||
mock_async_load = AsyncMock()
|
mock_async_load = AsyncMock()
|
||||||
mock_async_load.return_value = expected_documents
|
mock_async_load.return_value = expected_documents
|
||||||
|
@ -17,7 +17,6 @@ def test_zero_distance(distance: StringDistance) -> None:
|
|||||||
assert result["score"] == 0
|
assert result["score"] == 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("rapidfuzz")
|
@pytest.mark.requires("rapidfuzz")
|
||||||
@pytest.mark.parametrize("distance", list(StringDistance))
|
@pytest.mark.parametrize("distance", list(StringDistance))
|
||||||
async def test_zero_distance_async(distance: StringDistance) -> None:
|
async def test_zero_distance_async(distance: StringDistance) -> None:
|
||||||
@ -43,7 +42,6 @@ def test_zero_distance_pairwise(
|
|||||||
assert result["score"] == 0
|
assert result["score"] == 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("rapidfuzz")
|
@pytest.mark.requires("rapidfuzz")
|
||||||
@pytest.mark.parametrize("distance", list(StringDistance))
|
@pytest.mark.parametrize("distance", list(StringDistance))
|
||||||
async def test_zero_distance_pairwise_async(distance: StringDistance) -> None:
|
async def test_zero_distance_pairwise_async(distance: StringDistance) -> None:
|
||||||
@ -77,7 +75,6 @@ def test_non_zero_distance(distance: StringDistance, normalize_score: bool) -> N
|
|||||||
assert result["score"] < 1.0
|
assert result["score"] < 1.0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("rapidfuzz")
|
@pytest.mark.requires("rapidfuzz")
|
||||||
@pytest.mark.parametrize("distance", valid_distances)
|
@pytest.mark.parametrize("distance", valid_distances)
|
||||||
async def test_non_zero_distance_async(distance: StringDistance) -> None:
|
async def test_non_zero_distance_async(distance: StringDistance) -> None:
|
||||||
@ -104,7 +101,6 @@ def test_non_zero_distance_pairwise(distance: StringDistance) -> None:
|
|||||||
assert 0 < result["score"] < 1.0
|
assert 0 < result["score"] < 1.0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("rapidfuzz")
|
@pytest.mark.requires("rapidfuzz")
|
||||||
@pytest.mark.parametrize("distance", valid_distances)
|
@pytest.mark.parametrize("distance", valid_distances)
|
||||||
async def test_non_zero_distance_pairwise_async(distance: StringDistance) -> None:
|
async def test_non_zero_distance_pairwise_async(distance: StringDistance) -> None:
|
||||||
|
@ -208,7 +208,6 @@ def test_indexing_same_content(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_aindexing_same_content(
|
async def test_aindexing_same_content(
|
||||||
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
||||||
@ -321,7 +320,6 @@ def test_index_simple_delete_full(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_aindex_simple_delete_full(
|
async def test_aindex_simple_delete_full(
|
||||||
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
||||||
@ -442,7 +440,6 @@ def test_incremental_fails_with_bad_source_ids(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_aincremental_fails_with_bad_source_ids(
|
async def test_aincremental_fails_with_bad_source_ids(
|
||||||
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
||||||
@ -566,7 +563,6 @@ def test_no_delete(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_ano_delete(
|
async def test_ano_delete(
|
||||||
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
||||||
@ -753,7 +749,6 @@ def test_incremental_delete(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_aincremental_delete(
|
async def test_aincremental_delete(
|
||||||
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
||||||
@ -873,7 +868,6 @@ def test_indexing_with_no_docs(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_aindexing_with_no_docs(
|
async def test_aindexing_with_no_docs(
|
||||||
arecord_manager: SQLRecordManager, vector_store: VectorStore
|
arecord_manager: SQLRecordManager, vector_store: VectorStore
|
||||||
@ -915,7 +909,6 @@ def test_deduplication(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_adeduplication(
|
async def test_adeduplication(
|
||||||
arecord_manager: SQLRecordManager, vector_store: VectorStore
|
arecord_manager: SQLRecordManager, vector_store: VectorStore
|
||||||
@ -978,7 +971,6 @@ def test_cleanup_with_different_batchsize(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_async_cleanup_with_different_batchsize(
|
async def test_async_cleanup_with_different_batchsize(
|
||||||
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
|
||||||
@ -1061,7 +1053,6 @@ async def _to_async_iter(it: Iterable[Any]) -> AsyncIterator[Any]:
|
|||||||
yield i
|
yield i
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_abatch() -> None:
|
async def test_abatch() -> None:
|
||||||
"""Test the abatch function."""
|
"""Test the abatch function."""
|
||||||
batches = _abatch(5, _to_async_iter(range(12)))
|
batches = _abatch(5, _to_async_iter(range(12)))
|
||||||
|
@ -44,7 +44,6 @@ def test_update(manager: SQLRecordManager) -> None:
|
|||||||
assert read_keys == ["key1", "key2", "key3"]
|
assert read_keys == ["key1", "key2", "key3"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_aupdate(amanager: SQLRecordManager) -> None:
|
async def test_aupdate(amanager: SQLRecordManager) -> None:
|
||||||
"""Test updating records in the database."""
|
"""Test updating records in the database."""
|
||||||
@ -150,7 +149,6 @@ def test_update_timestamp(manager: SQLRecordManager) -> None:
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_aupdate_timestamp(amanager: SQLRecordManager) -> None:
|
async def test_aupdate_timestamp(amanager: SQLRecordManager) -> None:
|
||||||
"""Test updating records in the database."""
|
"""Test updating records in the database."""
|
||||||
@ -274,7 +272,6 @@ def test_update_with_group_ids(manager: SQLRecordManager) -> None:
|
|||||||
assert read_keys == ["key1", "key2", "key3"]
|
assert read_keys == ["key1", "key2", "key3"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_aupdate_with_group_ids(amanager: SQLRecordManager) -> None:
|
async def test_aupdate_with_group_ids(amanager: SQLRecordManager) -> None:
|
||||||
"""Test updating records in the database."""
|
"""Test updating records in the database."""
|
||||||
@ -304,7 +301,6 @@ def test_exists(manager: SQLRecordManager) -> None:
|
|||||||
assert exists == [True, False]
|
assert exists == [True, False]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_aexists(amanager: SQLRecordManager) -> None:
|
async def test_aexists(amanager: SQLRecordManager) -> None:
|
||||||
"""Test checking if keys exist in the database."""
|
"""Test checking if keys exist in the database."""
|
||||||
@ -408,7 +404,6 @@ def test_list_keys(manager: SQLRecordManager) -> None:
|
|||||||
) == ["key4"]
|
) == ["key4"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_alist_keys(amanager: SQLRecordManager) -> None:
|
async def test_alist_keys(amanager: SQLRecordManager) -> None:
|
||||||
"""Test listing keys based on the provided date range."""
|
"""Test listing keys based on the provided date range."""
|
||||||
@ -527,7 +522,6 @@ def test_namespace_is_used(manager: SQLRecordManager) -> None:
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_anamespace_is_used(amanager: SQLRecordManager) -> None:
|
async def test_anamespace_is_used(amanager: SQLRecordManager) -> None:
|
||||||
"""Verify that namespace is taken into account for all operations."""
|
"""Verify that namespace is taken into account for all operations."""
|
||||||
@ -571,7 +565,6 @@ def test_delete_keys(manager: SQLRecordManager) -> None:
|
|||||||
assert remaining_keys == ["key3"]
|
assert remaining_keys == ["key3"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@pytest.mark.requires("aiosqlite")
|
@pytest.mark.requires("aiosqlite")
|
||||||
async def test_adelete_keys(amanager: SQLRecordManager) -> None:
|
async def test_adelete_keys(amanager: SQLRecordManager) -> None:
|
||||||
"""Test deleting keys from the database."""
|
"""Test deleting keys from the database."""
|
||||||
|
@ -6,7 +6,6 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
from sqlalchemy.ext.declarative import declarative_base
|
from sqlalchemy.ext.declarative import declarative_base
|
||||||
|
|
||||||
import pytest
|
|
||||||
from langchain_core.schema import Generation, LLMResult
|
from langchain_core.schema import Generation, LLMResult
|
||||||
|
|
||||||
from langchain.cache import InMemoryCache, SQLAlchemyCache
|
from langchain.cache import InMemoryCache, SQLAlchemyCache
|
||||||
@ -86,7 +85,6 @@ def test_batch() -> None:
|
|||||||
assert output == ["foo"] * 3
|
assert output == ["foo"] * 3
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_abatch() -> None:
|
async def test_abatch() -> None:
|
||||||
llm = FakeLLM()
|
llm = FakeLLM()
|
||||||
output = await llm.abatch(["foo", "bar", "foo"])
|
output = await llm.abatch(["foo", "bar", "foo"])
|
||||||
|
@ -115,7 +115,6 @@ def test_openai_retries(mock_completion: dict) -> None:
|
|||||||
_openai_v1_installed(), reason="Retries only handled by LangChain for openai<1"
|
_openai_v1_installed(), reason="Retries only handled by LangChain for openai<1"
|
||||||
)
|
)
|
||||||
@pytest.mark.requires("openai")
|
@pytest.mark.requires("openai")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_openai_async_retries(mock_completion: dict) -> None:
|
async def test_openai_async_retries(mock_completion: dict) -> None:
|
||||||
llm = OpenAI()
|
llm = OpenAI()
|
||||||
mock_client = MagicMock()
|
mock_client = MagicMock()
|
||||||
|
@ -494,7 +494,6 @@ def test_partial_functions_json_output_parser_diff() -> None:
|
|||||||
assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON_DIFF
|
assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON_DIFF
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_partial_text_json_output_parser_async() -> None:
|
async def test_partial_text_json_output_parser_async() -> None:
|
||||||
async def input_iter(_: Any) -> AsyncIterator[str]:
|
async def input_iter(_: Any) -> AsyncIterator[str]:
|
||||||
for token in STREAMED_TOKENS:
|
for token in STREAMED_TOKENS:
|
||||||
@ -505,7 +504,6 @@ async def test_partial_text_json_output_parser_async() -> None:
|
|||||||
assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON
|
assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_partial_functions_json_output_parser_async() -> None:
|
async def test_partial_functions_json_output_parser_async() -> None:
|
||||||
async def input_iter(_: Any) -> AsyncIterator[AIMessageChunk]:
|
async def input_iter(_: Any) -> AsyncIterator[AIMessageChunk]:
|
||||||
for token in STREAMED_TOKENS:
|
for token in STREAMED_TOKENS:
|
||||||
@ -518,7 +516,6 @@ async def test_partial_functions_json_output_parser_async() -> None:
|
|||||||
assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON
|
assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_partial_text_json_output_parser_diff_async() -> None:
|
async def test_partial_text_json_output_parser_diff_async() -> None:
|
||||||
async def input_iter(_: Any) -> AsyncIterator[str]:
|
async def input_iter(_: Any) -> AsyncIterator[str]:
|
||||||
for token in STREAMED_TOKENS:
|
for token in STREAMED_TOKENS:
|
||||||
@ -529,7 +526,6 @@ async def test_partial_text_json_output_parser_diff_async() -> None:
|
|||||||
assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON_DIFF
|
assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON_DIFF
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_partial_functions_json_output_parser_diff_async() -> None:
|
async def test_partial_functions_json_output_parser_diff_async() -> None:
|
||||||
async def input_iter(_: Any) -> AsyncIterator[AIMessageChunk]:
|
async def input_iter(_: Any) -> AsyncIterator[AIMessageChunk]:
|
||||||
for token in STREAMED_TOKENS:
|
for token in STREAMED_TOKENS:
|
||||||
|
@ -130,7 +130,6 @@ def test__get_relevant_documents(fake_self_query_retriever: SelfQueryRetriever)
|
|||||||
assert relevant_documents[0].metadata["foo"] == "bar"
|
assert relevant_documents[0].metadata["foo"] == "bar"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test__aget_relevant_documents(
|
async def test__aget_relevant_documents(
|
||||||
fake_self_query_retriever: SelfQueryRetriever,
|
fake_self_query_retriever: SelfQueryRetriever,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -59,7 +59,6 @@ def test_fake_retriever_v1_upgrade(fake_retriever_v1: BaseRetriever) -> None:
|
|||||||
assert callbacks.retriever_errors == 0
|
assert callbacks.retriever_errors == 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_fake_retriever_v1_upgrade_async(
|
async def test_fake_retriever_v1_upgrade_async(
|
||||||
fake_retriever_v1: BaseRetriever,
|
fake_retriever_v1: BaseRetriever,
|
||||||
) -> None:
|
) -> None:
|
||||||
@ -122,7 +121,6 @@ def test_fake_retriever_v1_with_kwargs_upgrade(
|
|||||||
assert callbacks.retriever_errors == 0
|
assert callbacks.retriever_errors == 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_fake_retriever_v1_with_kwargs_upgrade_async(
|
async def test_fake_retriever_v1_with_kwargs_upgrade_async(
|
||||||
fake_retriever_v1_with_kwargs: BaseRetriever,
|
fake_retriever_v1_with_kwargs: BaseRetriever,
|
||||||
) -> None:
|
) -> None:
|
||||||
@ -202,7 +200,6 @@ def test_fake_retriever_v2(
|
|||||||
assert callbacks.retriever_errors == 1
|
assert callbacks.retriever_errors == 1
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_fake_retriever_v2_async(
|
async def test_fake_retriever_v2_async(
|
||||||
fake_retriever_v2: BaseRetriever, fake_erroring_retriever_v2: BaseRetriever
|
fake_retriever_v2: BaseRetriever, fake_erroring_retriever_v2: BaseRetriever
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -239,7 +239,6 @@ def test_run_chat_model_all_formats(inputs: Dict[str, Any]) -> None:
|
|||||||
_run_llm(llm, inputs, mock.MagicMock())
|
_run_llm(llm, inputs, mock.MagicMock())
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
@freeze_time("2023-01-01")
|
@freeze_time("2023-01-01")
|
||||||
async def test_arun_on_dataset(monkeypatch: pytest.MonkeyPatch) -> None:
|
async def test_arun_on_dataset(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||||
dataset = Dataset(
|
dataset = Dataset(
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
import warnings
|
import warnings
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.tools.shell.tool import ShellInput, ShellTool
|
from langchain.tools.shell.tool import ShellInput, ShellTool
|
||||||
|
|
||||||
# Test data
|
# Test data
|
||||||
@ -55,7 +53,6 @@ def test_shell_tool_run() -> None:
|
|||||||
assert result.strip() == "hello"
|
assert result.strip() == "hello"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_shell_tool_arun() -> None:
|
async def test_shell_tool_arun() -> None:
|
||||||
placeholder = PlaceholderProcess(output="hello")
|
placeholder = PlaceholderProcess(output="hello")
|
||||||
shell_tool = ShellTool(process=placeholder)
|
shell_tool = ShellTool(process=placeholder)
|
||||||
|
@ -133,7 +133,6 @@ def test_create_action_payload_with_params() -> None:
|
|||||||
assert payload["test"] == "test"
|
assert payload["test"] == "test"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_apreview(mocker) -> None: # type: ignore[no-untyped-def]
|
async def test_apreview(mocker) -> None: # type: ignore[no-untyped-def]
|
||||||
"""Test that the action payload with params is being created correctly."""
|
"""Test that the action payload with params is being created correctly."""
|
||||||
tool = ZapierNLARunAction(
|
tool = ZapierNLARunAction(
|
||||||
@ -162,7 +161,6 @@ async def test_apreview(mocker) -> None: # type: ignore[no-untyped-def]
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_arun(mocker) -> None: # type: ignore[no-untyped-def]
|
async def test_arun(mocker) -> None: # type: ignore[no-untyped-def]
|
||||||
"""Test that the action payload with params is being created correctly."""
|
"""Test that the action payload with params is being created correctly."""
|
||||||
tool = ZapierNLARunAction(
|
tool = ZapierNLARunAction(
|
||||||
@ -187,7 +185,6 @@ async def test_arun(mocker) -> None: # type: ignore[no-untyped-def]
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_alist(mocker) -> None: # type: ignore[no-untyped-def]
|
async def test_alist(mocker) -> None: # type: ignore[no-untyped-def]
|
||||||
"""Test that the action payload with params is being created correctly."""
|
"""Test that the action payload with params is being created correctly."""
|
||||||
tool = ZapierNLARunAction(
|
tool = ZapierNLARunAction(
|
||||||
|
@ -31,7 +31,6 @@ def test_faiss() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_afrom_texts() -> None:
|
async def test_faiss_afrom_texts() -> None:
|
||||||
"""Test end to end construction and search."""
|
"""Test end to end construction and search."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -69,7 +68,6 @@ def test_faiss_vector_sim() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_vector_sim() -> None:
|
async def test_faiss_async_vector_sim() -> None:
|
||||||
"""Test vector similarity."""
|
"""Test vector similarity."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -108,7 +106,6 @@ def test_faiss_vector_sim_with_score_threshold() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_vector_async_sim_with_score_threshold() -> None:
|
async def test_faiss_vector_async_sim_with_score_threshold() -> None:
|
||||||
"""Test vector similarity."""
|
"""Test vector similarity."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -150,7 +147,6 @@ def test_similarity_search_with_score_by_vector() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_similarity_async_search_with_score_by_vector() -> None:
|
async def test_similarity_async_search_with_score_by_vector() -> None:
|
||||||
"""Test vector similarity with score by vector."""
|
"""Test vector similarity with score by vector."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -196,7 +192,6 @@ def test_similarity_search_with_score_by_vector_with_score_threshold() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_sim_asearch_with_score_by_vector_with_score_threshold() -> None:
|
async def test_sim_asearch_with_score_by_vector_with_score_threshold() -> None:
|
||||||
"""Test vector similarity with score by vector."""
|
"""Test vector similarity with score by vector."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -237,7 +232,6 @@ def test_faiss_mmr() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_mmr() -> None:
|
async def test_faiss_async_mmr() -> None:
|
||||||
texts = ["foo", "foo", "fou", "foy"]
|
texts = ["foo", "foo", "fou", "foy"]
|
||||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
|
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
|
||||||
@ -268,7 +262,6 @@ def test_faiss_mmr_with_metadatas() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_mmr_with_metadatas() -> None:
|
async def test_faiss_async_mmr_with_metadatas() -> None:
|
||||||
texts = ["foo", "foo", "fou", "foy"]
|
texts = ["foo", "foo", "fou", "foy"]
|
||||||
metadatas = [{"page": i} for i in range(len(texts))]
|
metadatas = [{"page": i} for i in range(len(texts))]
|
||||||
@ -298,7 +291,6 @@ def test_faiss_mmr_with_metadatas_and_filter() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_mmr_with_metadatas_and_filter() -> None:
|
async def test_faiss_async_mmr_with_metadatas_and_filter() -> None:
|
||||||
texts = ["foo", "foo", "fou", "foy"]
|
texts = ["foo", "foo", "fou", "foy"]
|
||||||
metadatas = [{"page": i} for i in range(len(texts))]
|
metadatas = [{"page": i} for i in range(len(texts))]
|
||||||
@ -328,7 +320,6 @@ def test_faiss_mmr_with_metadatas_and_list_filter() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_mmr_with_metadatas_and_list_filter() -> None:
|
async def test_faiss_async_mmr_with_metadatas_and_list_filter() -> None:
|
||||||
texts = ["foo", "foo", "fou", "foy"]
|
texts = ["foo", "foo", "fou", "foy"]
|
||||||
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
|
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
|
||||||
@ -368,7 +359,6 @@ def test_faiss_with_metadatas() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_with_metadatas() -> None:
|
async def test_faiss_async_with_metadatas() -> None:
|
||||||
"""Test end to end construction and search."""
|
"""Test end to end construction and search."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -416,7 +406,6 @@ def test_faiss_with_metadatas_and_filter() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_with_metadatas_and_filter() -> None:
|
async def test_faiss_async_with_metadatas_and_filter() -> None:
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
metadatas = [{"page": i} for i in range(len(texts))]
|
metadatas = [{"page": i} for i in range(len(texts))]
|
||||||
@ -469,7 +458,6 @@ def test_faiss_with_metadatas_and_list_filter() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_with_metadatas_and_list_filter() -> None:
|
async def test_faiss_async_with_metadatas_and_list_filter() -> None:
|
||||||
texts = ["foo", "bar", "baz", "foo", "qux"]
|
texts = ["foo", "bar", "baz", "foo", "qux"]
|
||||||
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
|
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
|
||||||
@ -510,7 +498,6 @@ def test_faiss_search_not_found() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_search_not_found() -> None:
|
async def test_faiss_async_search_not_found() -> None:
|
||||||
"""Test what happens when document is not found."""
|
"""Test what happens when document is not found."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -534,7 +521,6 @@ def test_faiss_add_texts() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_add_texts() -> None:
|
async def test_faiss_async_add_texts() -> None:
|
||||||
"""Test end to end adding of texts."""
|
"""Test end to end adding of texts."""
|
||||||
# Create initial doc store.
|
# Create initial doc store.
|
||||||
@ -555,7 +541,6 @@ def test_faiss_add_texts_not_supported() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_add_texts_not_supported() -> None:
|
async def test_faiss_async_add_texts_not_supported() -> None:
|
||||||
"""Test adding of texts to a docstore that doesn't support it."""
|
"""Test adding of texts to a docstore that doesn't support it."""
|
||||||
docsearch = FAISS(FakeEmbeddings(), None, FakeDocstore(), {})
|
docsearch = FAISS(FakeEmbeddings(), None, FakeDocstore(), {})
|
||||||
@ -576,7 +561,6 @@ def test_faiss_local_save_load() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_local_save_load() -> None:
|
async def test_faiss_async_local_save_load() -> None:
|
||||||
"""Test end to end serialization."""
|
"""Test end to end serialization."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -604,7 +588,6 @@ def test_faiss_similarity_search_with_relevance_scores() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_similarity_search_with_relevance_scores() -> None:
|
async def test_faiss_async_similarity_search_with_relevance_scores() -> None:
|
||||||
"""Test the similarity search with normalized similarities."""
|
"""Test the similarity search with normalized similarities."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -638,7 +621,6 @@ def test_faiss_similarity_search_with_relevance_scores_with_threshold() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_asimilarity_search_with_relevance_scores_with_threshold() -> None:
|
async def test_faiss_asimilarity_search_with_relevance_scores_with_threshold() -> None:
|
||||||
"""Test the similarity search with normalized similarities with score threshold."""
|
"""Test the similarity search with normalized similarities with score threshold."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -668,7 +650,6 @@ def test_faiss_invalid_normalize_fn() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_faiss_async_invalid_normalize_fn() -> None:
|
async def test_faiss_async_invalid_normalize_fn() -> None:
|
||||||
"""Test the similarity search with normalized similarities."""
|
"""Test the similarity search with normalized similarities."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -689,7 +670,6 @@ def test_missing_normalize_score_fn() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_missing_normalize_score_fn() -> None:
|
async def test_async_missing_normalize_score_fn() -> None:
|
||||||
"""Test doesn't perform similarity search without a valid distance strategy."""
|
"""Test doesn't perform similarity search without a valid distance strategy."""
|
||||||
texts = ["foo", "bar", "baz"]
|
texts = ["foo", "bar", "baz"]
|
||||||
@ -713,7 +693,6 @@ def test_delete() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("faiss")
|
@pytest.mark.requires("faiss")
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_async_delete() -> None:
|
async def test_async_delete() -> None:
|
||||||
"""Test the similarity search with normalized similarities."""
|
"""Test the similarity search with normalized similarities."""
|
||||||
ids = ["a", "b", "c"]
|
ids = ["a", "b", "c"]
|
||||||
|
Loading…
Reference in New Issue
Block a user