mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-04 00:00:34 +00:00
more
This commit is contained in:
@@ -0,0 +1,83 @@
|
||||
"""**Callback handlers** allow listening to events in LangChain.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
|
||||
"""
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
StdOutCallbackHandler,
|
||||
StreamingStdOutCallbackHandler,
|
||||
)
|
||||
from langchain_core.tracers.langchain import LangChainTracer
|
||||
|
||||
from langchain_community.callbacks.aim_callback import AimCallbackHandler
|
||||
from langchain_community.callbacks.argilla_callback import ArgillaCallbackHandler
|
||||
from langchain_community.callbacks.arize_callback import ArizeCallbackHandler
|
||||
from langchain_community.callbacks.arthur_callback import ArthurCallbackHandler
|
||||
from langchain_community.callbacks.clearml_callback import ClearMLCallbackHandler
|
||||
from langchain_community.callbacks.comet_ml_callback import CometCallbackHandler
|
||||
from langchain_community.callbacks.context_callback import ContextCallbackHandler
|
||||
from langchain_community.callbacks.file import FileCallbackHandler
|
||||
from langchain_community.callbacks.flyte_callback import FlyteCallbackHandler
|
||||
from langchain_community.callbacks.human import HumanApprovalCallbackHandler
|
||||
from langchain_community.callbacks.infino_callback import InfinoCallbackHandler
|
||||
from langchain_community.callbacks.labelstudio_callback import (
|
||||
LabelStudioCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
from langchain_community.callbacks.manager import (
|
||||
get_openai_callback,
|
||||
wandb_tracing_enabled,
|
||||
)
|
||||
from langchain_community.callbacks.mlflow_callback import MlflowCallbackHandler
|
||||
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain_community.callbacks.promptlayer_callback import (
|
||||
PromptLayerCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.sagemaker_callback import SageMakerCallbackHandler
|
||||
from langchain_community.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
|
||||
from langchain_community.callbacks.streaming_stdout_final_only import (
|
||||
FinalStreamingStdOutCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.streamlit import (
|
||||
LLMThoughtLabeler,
|
||||
StreamlitCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.trubrics_callback import TrubricsCallbackHandler
|
||||
from langchain_community.callbacks.wandb_callback import WandbCallbackHandler
|
||||
from langchain_community.callbacks.whylabs_callback import WhyLabsCallbackHandler
|
||||
|
||||
__all__ = [
|
||||
"AimCallbackHandler",
|
||||
"ArgillaCallbackHandler",
|
||||
"ArizeCallbackHandler",
|
||||
"PromptLayerCallbackHandler",
|
||||
"ArthurCallbackHandler",
|
||||
"ClearMLCallbackHandler",
|
||||
"CometCallbackHandler",
|
||||
"ContextCallbackHandler",
|
||||
"FileCallbackHandler",
|
||||
"HumanApprovalCallbackHandler",
|
||||
"InfinoCallbackHandler",
|
||||
"MlflowCallbackHandler",
|
||||
"LLMonitorCallbackHandler",
|
||||
"OpenAICallbackHandler",
|
||||
"StdOutCallbackHandler",
|
||||
"AsyncIteratorCallbackHandler",
|
||||
"StreamingStdOutCallbackHandler",
|
||||
"FinalStreamingStdOutCallbackHandler",
|
||||
"LLMThoughtLabeler",
|
||||
"LangChainTracer",
|
||||
"StreamlitCallbackHandler",
|
||||
"WandbCallbackHandler",
|
||||
"WhyLabsCallbackHandler",
|
||||
"get_openai_callback",
|
||||
"wandb_tracing_enabled",
|
||||
"FlyteCallbackHandler",
|
||||
"SageMakerCallbackHandler",
|
||||
"LabelStudioCallbackHandler",
|
||||
"TrubricsCallbackHandler",
|
||||
]
|
||||
@@ -0,0 +1,69 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
from contextvars import ContextVar
|
||||
from typing import (
|
||||
Generator,
|
||||
Optional,
|
||||
)
|
||||
|
||||
from langchain_core.tracers.context import register_configure_hook
|
||||
|
||||
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain_community.callbacks.tracers.wandb import WandbTracer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
|
||||
"openai_callback", default=None
|
||||
)
|
||||
wandb_tracing_callback_var: ContextVar[Optional[WandbTracer]] = ContextVar( # noqa: E501
|
||||
"tracing_wandb_callback", default=None
|
||||
)
|
||||
|
||||
register_configure_hook(openai_callback_var, True)
|
||||
register_configure_hook(
|
||||
wandb_tracing_callback_var, True, WandbTracer, "LANGCHAIN_WANDB_TRACING"
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
|
||||
"""Get the OpenAI callback handler in a context manager.
|
||||
which conveniently exposes token and cost information.
|
||||
|
||||
Returns:
|
||||
OpenAICallbackHandler: The OpenAI callback handler.
|
||||
|
||||
Example:
|
||||
>>> with get_openai_callback() as cb:
|
||||
... # Use the OpenAI callback handler
|
||||
"""
|
||||
cb = OpenAICallbackHandler()
|
||||
openai_callback_var.set(cb)
|
||||
yield cb
|
||||
openai_callback_var.set(None)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def wandb_tracing_enabled(
|
||||
session_name: str = "default",
|
||||
) -> Generator[None, None, None]:
|
||||
"""Get the WandbTracer in a context manager.
|
||||
|
||||
Args:
|
||||
session_name (str, optional): The name of the session.
|
||||
Defaults to "default".
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Example:
|
||||
>>> with wandb_tracing_enabled() as session:
|
||||
... # Use the WandbTracer session
|
||||
"""
|
||||
cb = WandbTracer()
|
||||
wandb_tracing_callback_var.set(cb)
|
||||
yield None
|
||||
wandb_tracing_callback_var.set(None)
|
||||
@@ -0,0 +1,20 @@
|
||||
"""Tracers that record execution of LangChain runs."""
|
||||
|
||||
from langchain_core.tracers.langchain import LangChainTracer
|
||||
from langchain_core.tracers.langchain_v1 import LangChainTracerV1
|
||||
from langchain_core.tracers.stdout import (
|
||||
ConsoleCallbackHandler,
|
||||
FunctionCallbackHandler,
|
||||
)
|
||||
|
||||
from langchain_community.callbacks.tracers.logging import LoggingCallbackHandler
|
||||
from langchain_community.callbacks.tracers.wandb import WandbTracer
|
||||
|
||||
__all__ = [
|
||||
"ConsoleCallbackHandler",
|
||||
"FunctionCallbackHandler",
|
||||
"LoggingCallbackHandler",
|
||||
"LangChainTracer",
|
||||
"LangChainTracerV1",
|
||||
"WandbTracer",
|
||||
]
|
||||
@@ -0,0 +1,49 @@
|
||||
"""
|
||||
**Utility functions** for LangChain.
|
||||
|
||||
These functions do not depend on any other LangChain module.
|
||||
"""
|
||||
|
||||
from langchain_core.utils.env import get_from_dict_or_env, get_from_env
|
||||
from langchain_core.utils.formatting import StrictFormatter, formatter
|
||||
from langchain_core.utils.input import (
|
||||
get_bolded_text,
|
||||
get_color_mapping,
|
||||
get_colored_text,
|
||||
print_text,
|
||||
)
|
||||
from langchain_core.utils.loading import try_load_from_hub
|
||||
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
|
||||
from langchain_core.utils.utils import (
|
||||
build_extra_kwargs,
|
||||
check_package_version,
|
||||
convert_to_secret_str,
|
||||
get_pydantic_field_names,
|
||||
guard_import,
|
||||
mock_now,
|
||||
raise_for_status_with_text,
|
||||
xor_args,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"StrictFormatter",
|
||||
"check_package_version",
|
||||
"convert_to_secret_str",
|
||||
"formatter",
|
||||
"get_bolded_text",
|
||||
"get_color_mapping",
|
||||
"get_colored_text",
|
||||
"get_pydantic_field_names",
|
||||
"guard_import",
|
||||
"mock_now",
|
||||
"print_text",
|
||||
"raise_for_status_with_text",
|
||||
"xor_args",
|
||||
"try_load_from_hub",
|
||||
"build_extra_kwargs",
|
||||
"get_from_env",
|
||||
"get_from_dict_or_env",
|
||||
"stringify_dict",
|
||||
"comma_list",
|
||||
"stringify_value",
|
||||
]
|
||||
@@ -0,0 +1,83 @@
|
||||
"""**Callback handlers** allow listening to events in LangChain.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
|
||||
"""
|
||||
|
||||
from langchain_core.callbacks import StdOutCallbackHandler, StreamingStdOutCallbackHandler
|
||||
from langchain_core.tracers.langchain import LangChainTracer
|
||||
from langchain_core.tracers.context import (
|
||||
collect_runs,
|
||||
tracing_enabled,
|
||||
tracing_v2_enabled,
|
||||
)
|
||||
|
||||
from langchain_community.callbacks.aim_callback import AimCallbackHandler
|
||||
from langchain_community.callbacks.argilla_callback import ArgillaCallbackHandler
|
||||
from langchain_community.callbacks.arize_callback import ArizeCallbackHandler
|
||||
from langchain_community.callbacks.arthur_callback import ArthurCallbackHandler
|
||||
from langchain_community.callbacks.clearml_callback import ClearMLCallbackHandler
|
||||
from langchain_community.callbacks.comet_ml_callback import CometCallbackHandler
|
||||
from langchain_community.callbacks.context_callback import ContextCallbackHandler
|
||||
from langchain_community.callbacks.file import FileCallbackHandler
|
||||
from langchain_community.callbacks.flyte_callback import FlyteCallbackHandler
|
||||
from langchain_community.callbacks.human import HumanApprovalCallbackHandler
|
||||
from langchain_community.callbacks.infino_callback import InfinoCallbackHandler
|
||||
from langchain_community.callbacks.labelstudio_callback import LabelStudioCallbackHandler
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
from langchain_community.callbacks.mlflow_callback import MlflowCallbackHandler
|
||||
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain_community.callbacks.promptlayer_callback import PromptLayerCallbackHandler
|
||||
from langchain_community.callbacks.sagemaker_callback import SageMakerCallbackHandler
|
||||
from langchain_community.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
|
||||
from langchain_community.callbacks.streaming_stdout_final_only import (
|
||||
FinalStreamingStdOutCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.streamlit import LLMThoughtLabeler, StreamlitCallbackHandler
|
||||
from langchain_community.callbacks.trubrics_callback import TrubricsCallbackHandler
|
||||
from langchain_community.callbacks.wandb_callback import WandbCallbackHandler
|
||||
from langchain_community.callbacks.whylabs_callback import WhyLabsCallbackHandler
|
||||
|
||||
from langchain_community.callbacks.manager import (
|
||||
get_openai_callback,
|
||||
wandb_tracing_enabled,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AimCallbackHandler",
|
||||
"ArgillaCallbackHandler",
|
||||
"ArizeCallbackHandler",
|
||||
"PromptLayerCallbackHandler",
|
||||
"ArthurCallbackHandler",
|
||||
"ClearMLCallbackHandler",
|
||||
"CometCallbackHandler",
|
||||
"ContextCallbackHandler",
|
||||
"FileCallbackHandler",
|
||||
"HumanApprovalCallbackHandler",
|
||||
"InfinoCallbackHandler",
|
||||
"MlflowCallbackHandler",
|
||||
"LLMonitorCallbackHandler",
|
||||
"OpenAICallbackHandler",
|
||||
"StdOutCallbackHandler",
|
||||
"AsyncIteratorCallbackHandler",
|
||||
"StreamingStdOutCallbackHandler",
|
||||
"FinalStreamingStdOutCallbackHandler",
|
||||
"LLMThoughtLabeler",
|
||||
"LangChainTracer",
|
||||
"StreamlitCallbackHandler",
|
||||
"WandbCallbackHandler",
|
||||
"WhyLabsCallbackHandler",
|
||||
"get_openai_callback",
|
||||
"tracing_enabled",
|
||||
"tracing_v2_enabled",
|
||||
"collect_runs",
|
||||
"wandb_tracing_enabled",
|
||||
"FlyteCallbackHandler",
|
||||
"SageMakerCallbackHandler",
|
||||
"LabelStudioCallbackHandler",
|
||||
"TrubricsCallbackHandler",
|
||||
]
|
||||
@@ -0,0 +1,68 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from langchain_core.callbacks.manager import (
|
||||
AsyncCallbackManager,
|
||||
AsyncCallbackManagerForChainGroup,
|
||||
AsyncCallbackManagerForChainRun,
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
AsyncCallbackManagerForRetrieverRun,
|
||||
AsyncCallbackManagerForToolRun,
|
||||
AsyncParentRunManager,
|
||||
AsyncRunManager,
|
||||
BaseRunManager,
|
||||
CallbackManager,
|
||||
CallbackManagerForChainGroup,
|
||||
CallbackManagerForChainRun,
|
||||
CallbackManagerForLLMRun,
|
||||
CallbackManagerForRetrieverRun,
|
||||
CallbackManagerForToolRun,
|
||||
Callbacks,
|
||||
ParentRunManager,
|
||||
RunManager,
|
||||
ahandle_event,
|
||||
atrace_as_chain_group,
|
||||
handle_event,
|
||||
trace_as_chain_group,
|
||||
)
|
||||
from langchain_core.tracers.context import (
|
||||
collect_runs,
|
||||
tracing_enabled,
|
||||
tracing_v2_enabled,
|
||||
)
|
||||
from langchain_core.utils.env import env_var_is_set
|
||||
from langchain_community.callbacks.manager import (
|
||||
get_openai_callback,
|
||||
wandb_tracing_enabled,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BaseRunManager",
|
||||
"RunManager",
|
||||
"ParentRunManager",
|
||||
"AsyncRunManager",
|
||||
"AsyncParentRunManager",
|
||||
"CallbackManagerForLLMRun",
|
||||
"AsyncCallbackManagerForLLMRun",
|
||||
"CallbackManagerForChainRun",
|
||||
"AsyncCallbackManagerForChainRun",
|
||||
"CallbackManagerForToolRun",
|
||||
"AsyncCallbackManagerForToolRun",
|
||||
"CallbackManagerForRetrieverRun",
|
||||
"AsyncCallbackManagerForRetrieverRun",
|
||||
"CallbackManager",
|
||||
"CallbackManagerForChainGroup",
|
||||
"AsyncCallbackManager",
|
||||
"AsyncCallbackManagerForChainGroup",
|
||||
"tracing_enabled",
|
||||
"tracing_v2_enabled",
|
||||
"collect_runs",
|
||||
"atrace_as_chain_group",
|
||||
"trace_as_chain_group",
|
||||
"handle_event",
|
||||
"ahandle_event",
|
||||
"Callbacks",
|
||||
"env_var_is_set",
|
||||
"get_openai_callback",
|
||||
"wandb_tracing_enabled",
|
||||
]
|
||||
@@ -0,0 +1,20 @@
|
||||
from langchain_openai.chat_models import AzureChatOpenAI, ChatOpenAI, _import_tiktoken
|
||||
from langchain_openai.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings
|
||||
from langchain_openai.functions import (
|
||||
convert_pydantic_to_openai_function,
|
||||
convert_pydantic_to_openai_tool,
|
||||
)
|
||||
from langchain_openai.llms import AzureOpenAI, BaseOpenAI, OpenAI
|
||||
|
||||
__all__ = [
|
||||
"_import_tiktoken",
|
||||
"OpenAI",
|
||||
"AzureOpenAI",
|
||||
"ChatOpenAI",
|
||||
"AzureChatOpenAI",
|
||||
"OpenAIEmbeddings",
|
||||
"AzureOpenAIEmbeddings",
|
||||
"convert_pydantic_to_openai_function",
|
||||
"convert_pydantic_to_openai_tool",
|
||||
"BaseOpenAI",
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
from langchain_openai.chat_models.azure import AzureChatOpenAI
|
||||
from langchain_openai.chat_models.base import (
|
||||
ChatOpenAI,
|
||||
_convert_delta_to_message_chunk,
|
||||
_create_retry_decorator,
|
||||
_import_tiktoken,
|
||||
acompletion_with_retry,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"_create_retry_decorator",
|
||||
"acompletion_with_retry",
|
||||
"_convert_delta_to_message_chunk",
|
||||
"_import_tiktoken",
|
||||
"ChatOpenAI",
|
||||
"AzureChatOpenAI",
|
||||
]
|
||||
@@ -0,0 +1,21 @@
|
||||
from langchain_openai.embeddings.azure import AzureOpenAIEmbeddings
|
||||
from langchain_openai.embeddings.base import (
|
||||
OpenAIEmbeddings,
|
||||
_async_retry_decorator,
|
||||
_check_response,
|
||||
_create_retry_decorator,
|
||||
_is_openai_v1,
|
||||
async_embed_with_retry,
|
||||
embed_with_retry,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"_create_retry_decorator",
|
||||
"_async_retry_decorator",
|
||||
"_check_response",
|
||||
"embed_with_retry",
|
||||
"async_embed_with_retry",
|
||||
"_is_openai_v1",
|
||||
"OpenAIEmbeddings",
|
||||
"AzureOpenAIEmbeddings",
|
||||
]
|
||||
@@ -0,0 +1,27 @@
|
||||
from langchain_openai.llms.base import (
|
||||
AzureOpenAI,
|
||||
BaseOpenAI,
|
||||
OpenAI,
|
||||
OpenAIChat,
|
||||
_create_retry_decorator,
|
||||
_stream_response_to_generation_chunk,
|
||||
_streaming_response_template,
|
||||
_update_response,
|
||||
acompletion_with_retry,
|
||||
completion_with_retry,
|
||||
update_token_usage,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"update_token_usage",
|
||||
"_stream_response_to_generation_chunk",
|
||||
"_update_response",
|
||||
"_streaming_response_template",
|
||||
"_create_retry_decorator",
|
||||
"completion_with_retry",
|
||||
"acompletion_with_rety",
|
||||
"OpenAIChat",
|
||||
"OpenAI",
|
||||
"AzureOpenAI",
|
||||
"BaseOpenAI",
|
||||
]
|
||||
@@ -221,7 +221,8 @@ git checkout master -- langchain/tests/unit_tests/document_loaders/blob_loaders/
|
||||
git checkout master -- langchain/tests/unit_tests/document_loaders/parsers/test_public_api.py
|
||||
git checkout master -- langchain/tests/unit_tests/vectorstores/test_public_api.py
|
||||
git checkout master -- langchain/tests/unit_tests/schema
|
||||
touch langchain/tests/unit_tests/{llms,chat_models,tools,callbacks,runnables}/__init__.py
|
||||
touch langchain/tests/unit_tests/{llms,chat_models,tools,callbacks,runnables,document_loaders,docstore,document_transformers,embeddings,graphs,storage,utilities,vectorstores}/__init__.py
|
||||
touch langchain/tests/unit_tests/document_loaders/{blo_loaders,parsers}/__init__.py
|
||||
|
||||
cp core/Makefile community
|
||||
cp core/Makefile partners/openai
|
||||
@@ -230,65 +231,11 @@ sed -i '' 's/libs\/core/libs\/partners\/openai/g' partners/openai/Makefile
|
||||
cp -r core/scripts community
|
||||
cp -r core/scripts partners/openai
|
||||
|
||||
printf 'from langchain_openai.llms import BaseOpenAI, OpenAI, AzureOpenAI\nfrom langchain_openai.chat_models import _import_tiktoken, ChatOpenAI, AzureChatOpenAI\nfrom langchain_openai.embeddings import OpenAIEmbeddings, AzureOpenAIEmbeddings\nfrom langchain_openai.functions import convert_pydantic_to_openai_function, convert_pydantic_to_openai_tool\n\n__all__ = ["_import_tiktoken", "OpenAI", "AzureOpenAI", "ChatOpenAI", "AzureChatOpenAI", "OpenAIEmbeddings", "AzureOpenAIEmbeddings", "convert_pydantic_to_openai_function", "convert_pydantic_to_openai_tool", "BaseOpenAI"]' >> partners/openai/langchain_openai/__init__.py
|
||||
printf 'from langchain_openai.llms.base import update_token_usage, _stream_response_to_generation_chunk, _update_response, _streaming_response_template, _create_retry_decorator, completion_with_retry, acompletion_with_retry, BaseOpenAI, OpenAIChat, OpenAI, AzureOpenAI\n\n__all__ = ["update_token_usage", "_stream_response_to_generation_chunk", "_update_response", "_streaming_response_template", "_create_retry_decorator", "completion_with_retry", "acompletion_with_rety", "OpenAIChat", "OpenAI", "AzureOpenAI", "BaseOpenAI"]' >> partners/openai/langchain_openai/llms/__init__.py
|
||||
printf 'from langchain_openai.chat_models.base import _create_retry_decorator, acompletion_with_retry, _convert_delta_to_message_chunk, _import_tiktoken, ChatOpenAI\nfrom langchain_openai.chat_models.azure import AzureChatOpenAI\n\n__all__ = ["_create_retry_decorator", "acompletion_with_retry", "_convert_delta_to_message_chunk", "_import_tiktoken", "ChatOpenAI", "AzureChatOpenAI"]' >> partners/openai/langchain_openai/chat_models/__init__.py
|
||||
printf 'from langchain_openai.embeddings.base import _create_retry_decorator, _async_retry_decorator, _check_response, embed_with_retry, async_embed_with_retry, _is_openai_v1, OpenAIEmbeddings\nfrom langchain_openai.embeddings.azure import AzureOpenAIEmbeddings\n\n\n__all__ = ["_create_retry_decorator", "_async_retry_decorator", "_check_response", "embed_with_retry", "async_embed_with_retry", "_is_openai_v1", "OpenAIEmbeddings", "AzureOpenAIEmbeddings"]' >> partners/openai/langchain_openai/embeddings/__init__.py
|
||||
|
||||
|
||||
sed -i '' 's/from\ langchain_openai.chat_models\ /from\ langchain_openai.chat_models.base\ /g' partners/openai/langchain_openai/chat_models/azure.py
|
||||
sed -i '' 's/from\ langchain_openai.embeddings\ /from\ langchain_openai.embeddings.base\ /g' partners/openai/langchain_openai/embeddings/azure.py
|
||||
|
||||
echo '"""
|
||||
**Utility functions** for LangChain.
|
||||
|
||||
These functions do not depend on any other LangChain module.
|
||||
"""
|
||||
|
||||
from langchain_core.utils.formatting import StrictFormatter, formatter
|
||||
from langchain_core.utils.input import (
|
||||
get_bolded_text,
|
||||
get_color_mapping,
|
||||
get_colored_text,
|
||||
print_text,
|
||||
)
|
||||
from langchain_core.utils.loading import try_load_from_hub
|
||||
from langchain_core.utils.utils import (
|
||||
build_extra_kwargs,
|
||||
check_package_version,
|
||||
convert_to_secret_str,
|
||||
get_pydantic_field_names,
|
||||
guard_import,
|
||||
mock_now,
|
||||
raise_for_status_with_text,
|
||||
xor_args,
|
||||
)
|
||||
from langchain_core.utils.env import get_from_env, get_from_dict_or_env
|
||||
from langchain_core.utils.strings import stringify_dict, comma_list, stringify_value
|
||||
|
||||
__all__ = [
|
||||
"StrictFormatter",
|
||||
"check_package_version",
|
||||
"convert_to_secret_str",
|
||||
"formatter",
|
||||
"get_bolded_text",
|
||||
"get_color_mapping",
|
||||
"get_colored_text",
|
||||
"get_pydantic_field_names",
|
||||
"guard_import",
|
||||
"mock_now",
|
||||
"print_text",
|
||||
"raise_for_status_with_text",
|
||||
"xor_args",
|
||||
"try_load_from_hub",
|
||||
"build_extra_kwargs",
|
||||
"get_from_env",
|
||||
"get_from_dict_or_env",
|
||||
"stringify_dict",
|
||||
"comma_list",
|
||||
"stringify_value",
|
||||
]
|
||||
' > core/langchain_core/utils/__init__.py
|
||||
cp -r ../.scripts/community_split/libs/* .
|
||||
|
||||
cd core
|
||||
make format
|
||||
|
||||
@@ -9,7 +9,7 @@ SCRIPT_DIR="$(cd "$(dirname "$0")"; pwd)"
|
||||
cd "${SCRIPT_DIR}"
|
||||
|
||||
mkdir -p ../_dist
|
||||
rsync -ruv --exclude node_modules . ../_dist
|
||||
rsync -ruv --exclude node_modules --exclude api_reference --exclude .venv --exclude .docusaurus . ../_dist
|
||||
cd ../_dist
|
||||
poetry run python scripts/model_feat_table.py
|
||||
cp ../cookbook/README.md src/pages/cookbook.mdx
|
||||
|
||||
@@ -209,7 +209,10 @@
|
||||
"id": "637f994a-5134-402a-bcf0-4de3911eaf49",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip [LangSmith trace](https://smith.langchain.com/public/60909eae-f4f1-43eb-9f96-354f5176f66f/r)\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"[LangSmith trace](https://smith.langchain.com/public/60909eae-f4f1-43eb-9f96-354f5176f66f/r)\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -374,7 +377,10 @@
|
||||
"id": "5a7e498b-dc68-4267-a35c-90ceffa91c46",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip [LangSmith trace](https://smith.langchain.com/public/3b27d47f-e4df-4afb-81b1-0f88b80ca97e/r)\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"[LangSmith trace](https://smith.langchain.com/public/3b27d47f-e4df-4afb-81b1-0f88b80ca97e/r)\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -251,7 +251,10 @@
|
||||
"id": "da3d1feb-b4bb-4624-961c-7db2e1180df7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip [Langsmith trace](https://smith.langchain.com/public/863a003b-7ca8-4b24-be9e-d63ec13c106e/r)\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"[Langsmith trace](https://smith.langchain.com/public/863a003b-7ca8-4b24-be9e-d63ec13c106e/r)\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -334,7 +337,10 @@
|
||||
"id": "b898d1b1-11e6-4d30-a8dd-cc5e45533611",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip [LangSmith trace](https://smith.langchain.com/public/f6c3e1d1-a49d-4955-a9fa-c6519df74fa7/r)\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"[LangSmith trace](https://smith.langchain.com/public/f6c3e1d1-a49d-4955-a9fa-c6519df74fa7/r)\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# OpenAI Adapter(Old)\n",
|
||||
"\n",
|
||||
"**Please ensure OpenAI library is less than 1.0.0; otherwise, refer to the newer doc [OpenAI Adapter](./openai.ipynb).**\n",
|
||||
"**Please ensure OpenAI library is less than 1.0.0; otherwise, refer to the newer doc [OpenAI Adapter](./openai).**\n",
|
||||
"\n",
|
||||
"A lot of people get started with OpenAI but want to explore other models. LangChain's integrations with many model providers make this easy to do so. While LangChain has it's own message and model APIs, we've also made it as easy as possible to explore other models by exposing an adapter to adapt LangChain models to the OpenAI api.\n",
|
||||
"\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# OpenAI Adapter\n",
|
||||
"\n",
|
||||
"**Please ensure OpenAI library is version 1.0.0 or higher; otherwise, refer to the older doc [OpenAI Adapter(Old)](./openai-old.ipynb).**\n",
|
||||
"**Please ensure OpenAI library is version 1.0.0 or higher; otherwise, refer to the older doc [OpenAI Adapter(Old)](./openai-old).**\n",
|
||||
"\n",
|
||||
"A lot of people get started with OpenAI but want to explore other models. LangChain's integrations with many model providers make this easy to do so. While LangChain has it's own message and model APIs, we've also made it as easy as possible to explore other models by exposing an adapter to adapt LangChain models to the OpenAI api.\n",
|
||||
"\n",
|
||||
|
||||
127
docs/docs/integrations/llms/cloudflare_workersai.ipynb
Normal file
127
docs/docs/integrations/llms/cloudflare_workersai.ipynb
Normal file
@@ -0,0 +1,127 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Cloudflare Workers AI\n",
|
||||
"\n",
|
||||
"[Cloudflare AI documentation](https://developers.cloudflare.com/workers-ai/models/text-generation/) listed all generative text models available.\n",
|
||||
"\n",
|
||||
"Both Cloudflare account ID and API token are required. Find how to obtain them from [this document](https://developers.cloudflare.com/workers-ai/get-started/rest-api/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms.cloudflare_workersai import CloudflareWorkersAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"Human: {question}\n",
|
||||
"\n",
|
||||
"AI Assistant: \"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Get authentication before running LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"my_account_id = getpass.getpass(\"Enter your Cloudflare account ID:\\n\\n\")\n",
|
||||
"my_api_token = getpass.getpass(\"Enter your Cloudflare API token:\\n\\n\")\n",
|
||||
"llm = CloudflareWorkersAI(account_id=my_account_id, api_token=my_api_token)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"AI Assistant: Ah, a fascinating question! The answer to why roses are red is a bit complex, but I'll do my best to explain it in a simple and polite manner.\\nRoses are red due to the presence of a pigment called anthocyanin. Anthocyanin is a type of flavonoid, a class of plant compounds that are responsible for the red, purple, and blue colors found in many fruits and vegetables.\\nNow, you might be wondering why roses specifically have this pigment. The answer lies in the evolutionary history of roses. You see, roses have been around for millions of years, and their red color has likely played a crucial role in attracting pollinators like bees and butterflies. These pollinators are drawn to the bright colors of roses, which helps the plants reproduce and spread their seeds.\\nSo, to summarize, the reason roses are red is because of the anthocyanin pigment, which is a result of millions of years of evolutionary pressures shaping the plant's coloration to attract pollinators. I hope that helps clarify things for\""
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
"question = \"Why are roses red?\"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Ah | , | a | most | excellent | question | , | my | dear | human | ! | * | ad | just | s | glass | es | * | The | sky | appears | blue | due | to | a | phenomen | on | known | as | Ray | le | igh | scatter | ing | . | When | sun | light | enters | Earth | ' | s | atmosphere | , | it | enc | oun | ters | tiny | mole | cules | of | g | ases | such | as | nit | ro | gen | and | o | xygen | . | These | mole | cules | scatter | the | light | in | all | directions | , | but | they | scatter | shorter | ( | blue | ) | w | avel | ength | s | more | than | longer | ( | red | ) | w | avel | ength | s | . | This | is | known | as | Ray | le | igh | scatter | ing | . | \n",
|
||||
" | As | a | result | , | the | blue | light | is | dispers | ed | throughout | the | atmosphere | , | giving | the | sky | its | characteristic | blue | h | ue | . | The | blue | light | appears | more | prominent | during | sun | r | ise | and | sun | set | due | to | the | scatter | ing | of | light | by | the | Earth | ' | s | atmosphere | at | these | times | . | \n",
|
||||
" | I | hope | this | explanation | has | been | helpful | , | my | dear | human | ! | Is | there | anything | else | you | would | like | to | know | ? | * | sm | iles | * | * | | "
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Using streaming\n",
|
||||
"for chunk in llm.stream(\"Why is sky blue?\"):\n",
|
||||
" print(chunk, end=\" | \", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -24,7 +24,7 @@ supported functions:
|
||||
- `delete_doc_by_texts`
|
||||
|
||||
|
||||
For a more detailed walk through of the Alibaba Cloud OpenSearch wrapper, see [this notebook](../modules/indexes/vectorstores/examples/alibabacloud_opensearch.ipynb)
|
||||
For a more detailed walk through of the Alibaba Cloud OpenSearch wrapper, see [this notebook](/docs/integrations/vectorstores/alibabacloud_opensearch)
|
||||
|
||||
If you encounter any problems during use, please feel free to contact [xingshaomin.xsm@alibaba-inc.com](xingshaomin.xsm@alibaba-inc.com) , and we will do our best to provide you with assistance and support.
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ The DataForSEO utility wraps the API. To import this utility, use:
|
||||
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
|
||||
```
|
||||
|
||||
For a detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/dataforseo.ipynb).
|
||||
For a detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/dataforseo).
|
||||
|
||||
### Tool
|
||||
|
||||
|
||||
@@ -17,4 +17,4 @@ embeddings = JinaEmbeddings(jina_api_key='jina_**', model_name='jina-embeddings-
|
||||
|
||||
You can check the list of available models from [here](https://jina.ai/embeddings/)
|
||||
|
||||
For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/jina.ipynb)
|
||||
For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/jina)
|
||||
|
||||
@@ -17,6 +17,24 @@ Install the Python SDK:
|
||||
pip install redis
|
||||
```
|
||||
|
||||
To run Redis locally, you can use Docker:
|
||||
|
||||
```bash
|
||||
docker run --name langchain-redis -d -p 6379:6379 redis redis-server --save 60 1 --loglevel warning
|
||||
```
|
||||
|
||||
To stop the container:
|
||||
|
||||
```bash
|
||||
docker stop langchain-redis
|
||||
```
|
||||
|
||||
And to start it again:
|
||||
|
||||
```bash
|
||||
docker start langchain-redis
|
||||
```
|
||||
|
||||
## Wrappers
|
||||
|
||||
All wrappers need a redis url connection string to connect to the database support either a stand alone Redis server
|
||||
|
||||
@@ -8,7 +8,7 @@ Vearch Python SDK enables vearch to use locally. Vearch python sdk can be instal
|
||||
|
||||
# Vectorstore
|
||||
|
||||
Vearch also can used as vectorstore. Most detalis in [this notebook](docs/modules/indexes/vectorstores/examples/vearch.ipynb)
|
||||
Vearch also can used as vectorstore. Most detalis in [this notebook](/docs/integrations/vectorstores/vearch)
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Vearch
|
||||
|
||||
100
docs/docs/integrations/stores/file_system.ipynb
Normal file
100
docs/docs/integrations/stores/file_system.ipynb
Normal file
@@ -0,0 +1,100 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Local Filesystem\n",
|
||||
"sidebar_position: 3\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LocalFileStore\n",
|
||||
"\n",
|
||||
"The `LocalFileStore` is a persistent implementation of `ByteStore` that stores everything in a folder of your choosing."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[b'v1', b'v2']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from pathlib import Path\n",
|
||||
"\n",
|
||||
"from langchain.storage import LocalFileStore\n",
|
||||
"\n",
|
||||
"root_path = Path.cwd() / \"data\" # can also be a path set by a string\n",
|
||||
"store = LocalFileStore(root_path)\n",
|
||||
"\n",
|
||||
"store.mset([(\"k1\", b\"v1\"), (\"k2\", b\"v2\")])\n",
|
||||
"print(store.mget([\"k1\", \"k2\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's see which files exist in our `data` folder:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"k1 k2\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!ls {root_path}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
73
docs/docs/integrations/stores/in_memory.ipynb
Normal file
73
docs/docs/integrations/stores/in_memory.ipynb
Normal file
@@ -0,0 +1,73 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: In Memory\n",
|
||||
"sidebar_position: 2\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# InMemoryByteStore\n",
|
||||
"\n",
|
||||
"The `InMemoryByteStore` is a non-persistent implementation of `ByteStore` that stores everything in a Python dictionary."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[b'v1', b'v2']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.storage import InMemoryByteStore\n",
|
||||
"\n",
|
||||
"store = InMemoryByteStore()\n",
|
||||
"\n",
|
||||
"store.mset([(\"k1\", b\"v1\"), (\"k2\", b\"v2\")])\n",
|
||||
"print(store.mget([\"k1\", \"k2\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
29
docs/docs/integrations/stores/index.mdx
Normal file
29
docs/docs/integrations/stores/index.mdx
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_class_name: hidden
|
||||
---
|
||||
|
||||
# Stores
|
||||
|
||||
In many different applications, having some sort of key-value storage is helpful.
|
||||
In this section, we will look at a few different ways to store key-value pairs
|
||||
using implementations of the `ByteStore` interface.
|
||||
|
||||
## Features (natively supported)
|
||||
|
||||
All `ByteStore`s support the following functions, which are used for modifying
|
||||
**m**ultiple key-value pairs at once:
|
||||
|
||||
- `mget(key: Sequence[str]) -> List[Optional[bytes]]`: get the contents of multiple keys, returning `None` if the key does not exist
|
||||
- `mset(key_value_pairs: Sequence[Tuple[str, bytes]]) -> None`: set the contents of multiple keys
|
||||
- `mdelete(key: Sequence[str]) -> None`: delete multiple keys
|
||||
- `yield_keys(prefix: Optional[str] = None) -> Iterator[str]`: yield all keys in the store, optionally filtering by a prefix
|
||||
|
||||
## How to pick one
|
||||
|
||||
`ByteStore`s are designed to be interchangeable. By default, most dependent integrations
|
||||
use the `InMemoryByteStore`, which is a simple in-memory key-value store.
|
||||
|
||||
However, if you start having other requirements, like massive scalability or persistence,
|
||||
you can swap out the `ByteStore` implementation with one of the other ones documented
|
||||
in this section.
|
||||
83
docs/docs/integrations/stores/redis.ipynb
Normal file
83
docs/docs/integrations/stores/redis.ipynb
Normal file
@@ -0,0 +1,83 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Redis\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RedisStore\n",
|
||||
"\n",
|
||||
"The `RedisStore` is an implementation of `ByteStore` that stores everything in your Redis instance.\n",
|
||||
"\n",
|
||||
"To configure Redis, follow our [Redis guide](/docs/integrations/providers/redis)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[b'v1', b'v2']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.storage import RedisStore\n",
|
||||
"\n",
|
||||
"store = RedisStore(redis_url=\"redis://localhost:6379\")\n",
|
||||
"\n",
|
||||
"store.mset([(\"k1\", b\"v1\"), (\"k2\", b\"v2\")])\n",
|
||||
"print(store.mget([\"k1\", \"k2\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
90
docs/docs/integrations/stores/upstash_redis.ipynb
Normal file
90
docs/docs/integrations/stores/upstash_redis.ipynb
Normal file
@@ -0,0 +1,90 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Upstash Redis\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# UpstashRedisByteStore\n",
|
||||
"\n",
|
||||
"The `UpstashRedisStore` is an implementation of `ByteStore` that stores everything in your Upstash-hosted Redis instance.\n",
|
||||
"\n",
|
||||
"To use the base `RedisStore` instead, see [this guide](./redis)\n",
|
||||
"\n",
|
||||
"To configure Upstash Redis, follow our [Upstash guide](/docs/integrations/providers/upstash)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install upstash-redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[b'v1', b'v2']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.storage import UpstashRedisByteStore\n",
|
||||
"from upstash_redis import Redis\n",
|
||||
"\n",
|
||||
"URL = \"<UPSTASH_REDIS_REST_URL>\"\n",
|
||||
"TOKEN = \"<UPSTASH_REDIS_REST_TOKEN>\"\n",
|
||||
"\n",
|
||||
"redis_client = Redis(url=URL, token=TOKEN)\n",
|
||||
"store = UpstashRedisByteStore(client=redis_client, ttl=None, namespace=\"test-ns\")\n",
|
||||
"\n",
|
||||
"store.mset([(\"k1\", b\"v1\"), (\"k2\", b\"v2\")])\n",
|
||||
"print(store.mget([\"k1\", \"k2\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -153,7 +153,10 @@
|
||||
"id": "db6b9cbf-dd54-4346-be6c-842e08756ccc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip [LangSmith trace](https://smith.langchain.com/public/6750972b-0849-4beb-a8bb-353d424ffade/r)\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"[LangSmith trace](https://smith.langchain.com/public/6750972b-0849-4beb-a8bb-353d424ffade/r)\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -61,7 +61,7 @@
|
||||
" description=\"The answer to the question, which is based ONLY on the provided context.\"\n",
|
||||
" )\n",
|
||||
" score: float = Field(\n",
|
||||
" decsription=\"A 0.0-1.0 relevance score, where 1.0 indicates the provided context answers the question completely and 0.0 indicates the provided context does not answer the question at all.\"\n",
|
||||
" description=\"A 0.0-1.0 relevance score, where 1.0 indicates the provided context answers the question completely and 0.0 indicates the provided context does not answer the question at all.\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -66,7 +66,11 @@
|
||||
"source": [
|
||||
"## [Legacy] LLMChain\n",
|
||||
"\n",
|
||||
":::note This is a legacy class, using LCEL as shown above is preffered.\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"This is a legacy class, using LCEL as shown above is preferred.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"An `LLMChain` is a simple chain that adds some functionality around language models. It is used widely throughout LangChain, including in other chains and agents.\n",
|
||||
"\n",
|
||||
|
||||
@@ -130,7 +130,11 @@
|
||||
"source": [
|
||||
"## [Legacy] SequentialChain\n",
|
||||
"\n",
|
||||
":::note This is a legacy class, using LCEL as shown above is preffered.\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"This is a legacy class, using LCEL as shown above is preferred.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Sequential chains allow you to connect multiple chains and compose them into pipelines that execute some specific scenario. There are two types of sequential chains:\n",
|
||||
"\n",
|
||||
|
||||
@@ -88,7 +88,11 @@
|
||||
"source": [
|
||||
"## [Legacy] TransformationChain\n",
|
||||
"\n",
|
||||
":::note This is a legacy class, using LCEL as shown above is preffered.\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"This is a legacy class, using LCEL as shown above is preferred.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"This notebook showcases using a generic transformation chain."
|
||||
]
|
||||
|
||||
@@ -17,7 +17,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::info\n",
|
||||
"\n",
|
||||
"Head to [Integrations](/docs/integrations/retrievers/) for documentation on built-in retriever integrations with 3rd-party tools.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"A retriever is an interface that returns documents given an unstructured query. It is more general than a vector store.\n",
|
||||
|
||||
@@ -8,7 +8,9 @@
|
||||
"# Self-querying\n",
|
||||
"\n",
|
||||
":::info\n",
|
||||
"\n",
|
||||
"Head to [Integrations](/docs/integrations/retrievers/self_query) for documentation on vector stores with built-in support for self-querying.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"A self-querying retriever is one that, as the name suggests, has the ability to query itself. Specifically, given any natural language query, the retriever uses a query-constructing LLM chain to write a structured query and then applies that structured query to its underlying VectorStore. This allows the retriever to not only use the user-input query for semantic similarity comparison with the contents of stored documents but to also extract filters from the user query on the metadata of stored documents and to execute those filters.\n",
|
||||
|
||||
@@ -1,11 +1,21 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "8baf0f21",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"--\n",
|
||||
"sidebar_label: Caching\n",
|
||||
"--"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bf4061ce",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Caching\n",
|
||||
"# CacheBackedEmbeddings\n",
|
||||
"\n",
|
||||
"Embeddings can be stored or temporarily cached to avoid needing to recompute them.\n",
|
||||
"\n",
|
||||
@@ -15,7 +25,7 @@
|
||||
"The main supported way to initialized a `CacheBackedEmbeddings` is `from_bytes_store`. This takes in the following parameters:\n",
|
||||
"\n",
|
||||
"- underlying_embedder: The embedder to use for embedding.\n",
|
||||
"- document_embedding_cache: The cache to use for storing document embeddings.\n",
|
||||
"- document_embedding_cache: Any [`ByteStore`](/docs/integrations/stores/) for caching document embeddings.\n",
|
||||
"- namespace: (optional, defaults to `\"\"`) The namespace to use for document cache. This namespace is used to avoid collisions with other caches. For example, set it to the name of the embedding model used.\n",
|
||||
"\n",
|
||||
"**Attention**: Be sure to set the `namespace` parameter to avoid collisions of the same text embedded using different embeddings models."
|
||||
@@ -23,20 +33,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 1,
|
||||
"id": "a463c3c2-749b-40d1-a433-84f68a1cd1c7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import CacheBackedEmbeddings, OpenAIEmbeddings\n",
|
||||
"from langchain.storage import (\n",
|
||||
" InMemoryStore,\n",
|
||||
" LocalFileStore,\n",
|
||||
" RedisStore,\n",
|
||||
" UpstashRedisStore,\n",
|
||||
")"
|
||||
"from langchain.embeddings import CacheBackedEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -44,7 +48,7 @@
|
||||
"id": "9ddf07dd-3e72-41de-99d4-78e9521e272f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using with a vector store\n",
|
||||
"## Using with a Vector Store\n",
|
||||
"\n",
|
||||
"First, let's see an example that uses the local file system for storing embeddings and uses FAISS vector store for retrieval."
|
||||
]
|
||||
@@ -52,36 +56,32 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "50183825",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install openai faiss-cpu"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9e4314d8-88ef-4f52-81ae-0be771168bb6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3e751f26-9b5b-4c10-843a-d784b5ea8538",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"underlying_embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "30743664-38f5-425d-8216-772b64e7f348",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fs = LocalFileStore(\"./cache/\")\n",
|
||||
"from langchain.storage import LocalFileStore\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"\n",
|
||||
"underlying_embeddings = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"store = LocalFileStore(\"./cache/\")\n",
|
||||
"\n",
|
||||
"cached_embedder = CacheBackedEmbeddings.from_bytes_store(\n",
|
||||
" underlying_embeddings, fs, namespace=underlying_embeddings.model\n",
|
||||
" underlying_embeddings, store, namespace=underlying_embeddings.model\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -95,7 +95,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 4,
|
||||
"id": "f9ad627f-ced2-4277-b336-2434f22f2c8a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -105,13 +105,13 @@
|
||||
"[]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"list(fs.yield_keys())"
|
||||
"list(store.yield_keys())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -124,12 +124,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 5,
|
||||
"id": "cf958ac2-e60e-4668-b32c-8bb2d78b3c61",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"raw_documents = TextLoader(\"../state_of_the_union.txt\").load()\n",
|
||||
"raw_documents = TextLoader(\"../../state_of_the_union.txt\").load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"documents = text_splitter.split_documents(raw_documents)"
|
||||
]
|
||||
@@ -144,7 +144,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 6,
|
||||
"id": "3a1d7bb8-3b72-4bb5-9013-cf7729caca61",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -152,8 +152,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 608 ms, sys: 58.9 ms, total: 667 ms\n",
|
||||
"Wall time: 1.3 s\n"
|
||||
"CPU times: user 218 ms, sys: 29.7 ms, total: 248 ms\n",
|
||||
"Wall time: 1.02 s\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -172,7 +172,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 7,
|
||||
"id": "714cb2e2-77ba-41a8-bb83-84e75342af2d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -180,8 +180,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 33.6 ms, sys: 3.96 ms, total: 37.6 ms\n",
|
||||
"Wall time: 36.8 ms\n"
|
||||
"CPU times: user 15.7 ms, sys: 2.22 ms, total: 18 ms\n",
|
||||
"Wall time: 17.2 ms\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -200,458 +200,55 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 8,
|
||||
"id": "f2ca32dd-3712-4093-942b-4122f3dc8a8e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['text-embedding-ada-002614d7cf6-46f1-52fa-9d3a-740c39e7a20e',\n",
|
||||
" 'text-embedding-ada-0020fc1ede2-407a-5e14-8f8f-5642214263f5',\n",
|
||||
"['text-embedding-ada-00217a6727d-8916-54eb-b196-ec9c9d6ca472',\n",
|
||||
" 'text-embedding-ada-0025fc0d904-bd80-52da-95c9-441015bfb438',\n",
|
||||
" 'text-embedding-ada-002e4ad20ef-dfaa-5916-9459-f90c6d8e8159',\n",
|
||||
" 'text-embedding-ada-002a5ef11e4-0474-5725-8d80-81c91943b37f',\n",
|
||||
" 'text-embedding-ada-00281426526-23fe-58be-9e84-6c7c72c8ca9a']"
|
||||
" 'text-embedding-ada-002ed199159-c1cd-5597-9757-f80498e8f17b',\n",
|
||||
" 'text-embedding-ada-0021297d37a-2bc1-5e19-bf13-6c950f075062']"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"list(fs.yield_keys())[:5]"
|
||||
"list(store.yield_keys())[:5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "564c9801-29f0-4452-aeac-527382e2c0e8",
|
||||
"id": "c1a7fafd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## In Memory\n",
|
||||
"# Swapping the `ByteStore`\n",
|
||||
"\n",
|
||||
"This section shows how to set up an in memory cache for embeddings. This type of cache is primarily \n",
|
||||
"useful for unit tests or prototyping. Do **not** use this cache if you need to actually store the embeddings."
|
||||
"In order to use a different `ByteStore`, just use it when creating your `CacheBackedEmbeddings`. Below, we create an equivalent cached embeddings object, except using the non-persistent `InMemoryByteStore` instead:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "13bd1c5b-b7ba-4394-957c-7d5b5a841972",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 9,
|
||||
"id": "336a0538",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"store = InMemoryStore()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9d99885f-99e1-498c-904d-6db539ac9466",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"underlying_embeddings = OpenAIEmbeddings()\n",
|
||||
"embedder = CacheBackedEmbeddings.from_bytes_store(\n",
|
||||
"from langchain.embeddings import CacheBackedEmbeddings\n",
|
||||
"from langchain.storage import InMemoryByteStore\n",
|
||||
"\n",
|
||||
"store = InMemoryByteStore()\n",
|
||||
"\n",
|
||||
"cached_embedder = CacheBackedEmbeddings.from_bytes_store(\n",
|
||||
" underlying_embeddings, store, namespace=underlying_embeddings.model\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "682eb5d4-0b7a-4dac-b8fb-3de4ca6e421c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 10.9 ms, sys: 916 µs, total: 11.8 ms\n",
|
||||
"Wall time: 159 ms\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"embeddings = embedder.embed_documents([\"hello\", \"goodbye\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "95233026-147f-49d1-bd87-e1e8b88ebdbc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The second time we try to embed the embedding time is only 2 ms because the embeddings are looked up in the cache."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f819c3ff-a212-4d06-a5f7-5eb1435c1feb",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 1.67 ms, sys: 342 µs, total: 2.01 ms\n",
|
||||
"Wall time: 2.01 ms\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"embeddings_from_cache = embedder.embed_documents([\"hello\", \"goodbye\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ec38fb72-90a9-4687-a483-c62c87d1f4dd",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeddings == embeddings_from_cache"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f6cbe100-8587-4830-b207-fb8b524a9854",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## File system\n",
|
||||
"\n",
|
||||
"This section covers how to use a file system store."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0070271-0809-4528-97e0-2a88216846f3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fs = LocalFileStore(\"./test_cache/\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0b20e9fe-f57f-4d7c-9f81-105c5f8726f4",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embedder2 = CacheBackedEmbeddings.from_bytes_store(\n",
|
||||
" underlying_embeddings, fs, namespace=underlying_embeddings.model\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "630515fd-bf5c-4d9c-a404-9705308f3a2c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 6.89 ms, sys: 4.89 ms, total: 11.8 ms\n",
|
||||
"Wall time: 184 ms\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"embeddings = embedder2.embed_documents([\"hello\", \"goodbye\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "30e6bb87-42c9-4d08-88ac-0d22c9c449a1",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 0 ns, sys: 3.24 ms, total: 3.24 ms\n",
|
||||
"Wall time: 2.84 ms\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"embeddings = embedder2.embed_documents([\"hello\", \"goodbye\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12ed5a45-8352-4e0f-8583-5537397f53c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here are the embeddings that have been persisted to the directory `./test_cache`. \n",
|
||||
"\n",
|
||||
"Notice that the embedder takes a namespace parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "658e2914-05e9-44a3-a8fe-3fe17ca84039",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['text-embedding-ada-002e885db5b-c0bd-5fbc-88b1-4d1da6020aa5',\n",
|
||||
" 'text-embedding-ada-0026ba52e44-59c9-5cc9-a084-284061b13c80']"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"list(fs.yield_keys())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "904c1d47",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Upstash Redis Store"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d0f9f212",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.storage.upstash_redis import UpstashRedisStore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "45bf62e4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from upstash_redis import Redis\n",
|
||||
"\n",
|
||||
"URL = \"<UPSTASH_REDIS_REST_URL>\"\n",
|
||||
"TOKEN = \"<UPSTASH_REDIS_REST_TOKEN>\"\n",
|
||||
"\n",
|
||||
"redis_client = Redis(url=URL, token=TOKEN)\n",
|
||||
"store = UpstashRedisStore(client=redis_client, ttl=None, namespace=\"test-ns\")\n",
|
||||
"\n",
|
||||
"underlying_embeddings = OpenAIEmbeddings()\n",
|
||||
"embedder = CacheBackedEmbeddings.from_bytes_store(\n",
|
||||
" underlying_embeddings, store, namespace=underlying_embeddings.model\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3eac3504",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"embeddings = embedder.embed_documents([\"welcome\", \"goodbye\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "085dcd30",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"embeddings = embedder.embed_documents([\"welcome\", \"goodbye\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3570e83f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"list(store.yield_keys())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d7dc8e51",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"list(store.client.scan(0))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cd5f5a96-6ffa-429d-aa82-00b3f6532871",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Redis Store\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4879c134-141f-48a0-acfe-7d6f30253af0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.storage import RedisStore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b2bb9a0-6549-4487-8532-29ab4ab7336f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# For cache isolation can use a separate DB\n",
|
||||
"# Or additional namepace\n",
|
||||
"store = RedisStore(\n",
|
||||
" redis_url=\"redis://localhost:6379\",\n",
|
||||
" client_kwargs={\"db\": 2},\n",
|
||||
" namespace=\"embedding_caches\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"underlying_embeddings = OpenAIEmbeddings()\n",
|
||||
"embedder = CacheBackedEmbeddings.from_bytes_store(\n",
|
||||
" underlying_embeddings, store, namespace=underlying_embeddings.model\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "eca3cb99-2bb3-49d5-81f9-1dee03da4b8c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 3.99 ms, sys: 0 ns, total: 3.99 ms\n",
|
||||
"Wall time: 3.5 ms\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"embeddings = embedder.embed_documents([\"hello\", \"goodbye\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "317ba5d8-89f9-462c-b807-ad4ef26e518b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 2.47 ms, sys: 767 µs, total: 3.24 ms\n",
|
||||
"Wall time: 2.75 ms\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"embeddings = embedder.embed_documents([\"hello\", \"goodbye\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8a540317-5142-4491-9062-a097932b56e3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['text-embedding-ada-002e885db5b-c0bd-5fbc-88b1-4d1da6020aa5',\n",
|
||||
" 'text-embedding-ada-0026ba52e44-59c9-5cc9-a084-284061b13c80']"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"list(store.yield_keys())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cd9b0d4a-f816-4dce-9dde-cde1ad9a65fb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[b'embedding_caches/text-embedding-ada-002e885db5b-c0bd-5fbc-88b1-4d1da6020aa5',\n",
|
||||
" b'embedding_caches/text-embedding-ada-0026ba52e44-59c9-5cc9-a084-284061b13c80']"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"list(store.client.scan_iter())"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -670,7 +267,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -17,7 +17,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::info\n",
|
||||
"\n",
|
||||
"Head to [Integrations](/docs/integrations/chat/) for documentation on built-in integrations with chat model providers.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Chat models are a variation on language models.\n",
|
||||
|
||||
@@ -17,7 +17,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::info\n",
|
||||
"\n",
|
||||
"Head to [Integrations](/docs/integrations/llms/) for documentation on built-in integrations with LLM providers.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Large Language Models (LLMs) are a core component of LangChain.\n",
|
||||
|
||||
@@ -243,7 +243,10 @@
|
||||
"id": "639dc31a-7f16-40f6-ba2a-20e7c2ecfe60",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip Check out the [LangSmith trace](https://smith.langchain.com/public/1c6ca97e-445b-4d00-84b4-c7befcbc59fe/r) \n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"Check out the [LangSmith trace](https://smith.langchain.com/public/1c6ca97e-445b-4d00-84b4-c7befcbc59fe/r) \n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -723,7 +726,10 @@
|
||||
"id": "2c000e5f-2b7f-4eb9-8876-9f4b186b4a08",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip Check out the [LangSmith trace](https://smith.langchain.com/public/1799e8db-8a6d-4eb2-84d5-46e8d7d5a99b/r) \n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"Check out the [LangSmith trace](https://smith.langchain.com/public/1799e8db-8a6d-4eb2-84d5-46e8d7d5a99b/r) \n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -800,7 +806,10 @@
|
||||
"id": "94b952e6-dc4b-415b-9cf3-1ad333e48366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip Check out the [LangSmith trace](https://smith.langchain.com/public/da23c4d8-3b33-47fd-84df-a3a582eedf84/r) \n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"Check out the [LangSmith trace](https://smith.langchain.com/public/da23c4d8-3b33-47fd-84df-a3a582eedf84/r) \n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -872,7 +881,10 @@
|
||||
"id": "b437da5d-ca09-4d15-9be2-c35e5a1ace77",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip Check out the [LangSmith trace](https://smith.langchain.com/public/007d7e01-cb62-4a84-8b71-b24767f953ee/r)\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"Check out the [LangSmith trace](https://smith.langchain.com/public/007d7e01-cb62-4a84-8b71-b24767f953ee/r)\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -1050,7 +1062,10 @@
|
||||
"id": "53263a65-4de2-4dd8-9291-6a8169ab6f1d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip Check out the [LangSmith trace](https://smith.langchain.com/public/b3001782-bb30-476a-886b-12da17ec258f/r) \n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"Check out the [LangSmith trace](https://smith.langchain.com/public/b3001782-bb30-476a-886b-12da17ec258f/r) \n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -111,6 +111,7 @@ module.exports = {
|
||||
{ type: "category", label: "Callbacks", collapsed: true, items: [{type: "autogenerated", dirName: "integrations/callbacks" }], link: {type: "generated-index", slug: "integrations/callbacks" }},
|
||||
{ type: "category", label: "Chat loaders", collapsed: true, items: [{type: "autogenerated", dirName: "integrations/chat_loaders" }], link: {type: "generated-index", slug: "integrations/chat_loaders" }},
|
||||
{ type: "category", label: "Adapters", collapsed: true, items: [{type: "autogenerated", dirName: "integrations/adapters" }], link: {type: "generated-index", slug: "integrations/adapters" }},
|
||||
{ type: "category", label: "Stores", collapsed: true, items: [{type: "autogenerated", dirName: "integrations/stores" }], link: {type: "doc", id: "integrations/stores/index" }},
|
||||
],
|
||||
link: {
|
||||
type: 'generated-index',
|
||||
|
||||
@@ -11,11 +11,6 @@ from langchain_core.callbacks import (
|
||||
StdOutCallbackHandler,
|
||||
StreamingStdOutCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.manager import get_openai_callback, wandb_tracing_enabled
|
||||
|
||||
from langchain_community.callbacks.streaming_stdout_final_only import (
|
||||
FinalStreamingStdOutCallbackHandler,
|
||||
)
|
||||
from langchain_core.tracers.langchain import LangChainTracer
|
||||
|
||||
from langchain_community.callbacks.aim_callback import AimCallbackHandler
|
||||
@@ -33,6 +28,10 @@ from langchain_community.callbacks.labelstudio_callback import (
|
||||
LabelStudioCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
from langchain_community.callbacks.manager import (
|
||||
get_openai_callback,
|
||||
wandb_tracing_enabled,
|
||||
)
|
||||
from langchain_community.callbacks.mlflow_callback import MlflowCallbackHandler
|
||||
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain_community.callbacks.promptlayer_callback import (
|
||||
@@ -40,6 +39,9 @@ from langchain_community.callbacks.promptlayer_callback import (
|
||||
)
|
||||
from langchain_community.callbacks.sagemaker_callback import SageMakerCallbackHandler
|
||||
from langchain_community.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
|
||||
from langchain_community.callbacks.streaming_stdout_final_only import (
|
||||
FinalStreamingStdOutCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.streamlit import (
|
||||
LLMThoughtLabeler,
|
||||
StreamlitCallbackHandler,
|
||||
|
||||
@@ -8,37 +8,7 @@ from typing import (
|
||||
Optional,
|
||||
)
|
||||
|
||||
from langchain_core.callbacks.manager import (
|
||||
AsyncCallbackManager,
|
||||
AsyncCallbackManagerForChainGroup,
|
||||
AsyncCallbackManagerForChainRun,
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
AsyncCallbackManagerForRetrieverRun,
|
||||
AsyncCallbackManagerForToolRun,
|
||||
AsyncParentRunManager,
|
||||
AsyncRunManager,
|
||||
BaseRunManager,
|
||||
CallbackManager,
|
||||
CallbackManagerForChainGroup,
|
||||
CallbackManagerForChainRun,
|
||||
CallbackManagerForLLMRun,
|
||||
CallbackManagerForRetrieverRun,
|
||||
CallbackManagerForToolRun,
|
||||
Callbacks,
|
||||
ParentRunManager,
|
||||
RunManager,
|
||||
ahandle_event,
|
||||
atrace_as_chain_group,
|
||||
handle_event,
|
||||
trace_as_chain_group,
|
||||
)
|
||||
from langchain_core.tracers.context import (
|
||||
collect_runs,
|
||||
register_configure_hook,
|
||||
tracing_enabled,
|
||||
tracing_v2_enabled,
|
||||
)
|
||||
from langchain_core.utils.env import env_var_is_set
|
||||
from langchain_core.tracers.context import register_configure_hook
|
||||
|
||||
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain_community.callbacks.tracers.wandb import WandbTracer
|
||||
@@ -97,33 +67,3 @@ def wandb_tracing_enabled(
|
||||
wandb_tracing_callback_var.set(cb)
|
||||
yield None
|
||||
wandb_tracing_callback_var.set(None)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BaseRunManager",
|
||||
"RunManager",
|
||||
"ParentRunManager",
|
||||
"AsyncRunManager",
|
||||
"AsyncParentRunManager",
|
||||
"CallbackManagerForLLMRun",
|
||||
"AsyncCallbackManagerForLLMRun",
|
||||
"CallbackManagerForChainRun",
|
||||
"AsyncCallbackManagerForChainRun",
|
||||
"CallbackManagerForToolRun",
|
||||
"AsyncCallbackManagerForToolRun",
|
||||
"CallbackManagerForRetrieverRun",
|
||||
"AsyncCallbackManagerForRetrieverRun",
|
||||
"CallbackManager",
|
||||
"CallbackManagerForChainGroup",
|
||||
"AsyncCallbackManager",
|
||||
"AsyncCallbackManagerForChainGroup",
|
||||
"tracing_enabled",
|
||||
"tracing_v2_enabled",
|
||||
"collect_runs",
|
||||
"atrace_as_chain_group",
|
||||
"trace_as_chain_group",
|
||||
"handle_event",
|
||||
"ahandle_event",
|
||||
"Callbacks",
|
||||
"env_var_is_set",
|
||||
]
|
||||
|
||||
@@ -18,8 +18,8 @@ from langchain_core.messages import (
|
||||
SystemMessage,
|
||||
)
|
||||
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
||||
from langchain_core.pydantic_v1 import Field, root_validator
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
|
||||
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -87,8 +87,8 @@ class QianfanChatEndpoint(BaseChatModel):
|
||||
|
||||
client: Any
|
||||
|
||||
qianfan_ak: Optional[str] = None
|
||||
qianfan_sk: Optional[str] = None
|
||||
qianfan_ak: Optional[SecretStr] = None
|
||||
qianfan_sk: Optional[SecretStr] = None
|
||||
|
||||
streaming: Optional[bool] = False
|
||||
"""Whether to stream the results or not."""
|
||||
@@ -117,19 +117,23 @@ class QianfanChatEndpoint(BaseChatModel):
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
values["qianfan_ak"] = get_from_dict_or_env(
|
||||
values,
|
||||
"qianfan_ak",
|
||||
"QIANFAN_AK",
|
||||
values["qianfan_ak"] = convert_to_secret_str(
|
||||
get_from_dict_or_env(
|
||||
values,
|
||||
"qianfan_ak",
|
||||
"QIANFAN_AK",
|
||||
)
|
||||
)
|
||||
values["qianfan_sk"] = get_from_dict_or_env(
|
||||
values,
|
||||
"qianfan_sk",
|
||||
"QIANFAN_SK",
|
||||
values["qianfan_sk"] = convert_to_secret_str(
|
||||
get_from_dict_or_env(
|
||||
values,
|
||||
"qianfan_sk",
|
||||
"QIANFAN_SK",
|
||||
)
|
||||
)
|
||||
params = {
|
||||
"ak": values["qianfan_ak"],
|
||||
"sk": values["qianfan_sk"],
|
||||
"ak": values["qianfan_ak"].get_secret_value(),
|
||||
"sk": values["qianfan_sk"].get_secret_value(),
|
||||
"model": values["model"],
|
||||
"stream": values["streaming"],
|
||||
}
|
||||
|
||||
@@ -115,13 +115,13 @@ class ChatMlflow(BaseChatModel):
|
||||
"messages": message_dicts,
|
||||
"temperature": self.temperature,
|
||||
"n": self.n,
|
||||
"stop": stop or self.stop,
|
||||
"max_tokens": self.max_tokens,
|
||||
**self.extra_params,
|
||||
**kwargs,
|
||||
}
|
||||
if stop := self.stop or stop:
|
||||
data["stop"] = stop
|
||||
if self.max_tokens is not None:
|
||||
data["max_tokens"] = self.max_tokens
|
||||
resp = self._client.predict(endpoint=self.endpoint, inputs=data)
|
||||
return ChatMlflow._create_chat_result(resp)
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ from functools import partial
|
||||
from typing import Callable, List, Sequence, Union, cast
|
||||
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.stores import BaseStore
|
||||
from langchain_core.stores import BaseStore, ByteStore
|
||||
|
||||
from langchain_community.storage.encoder_backed import EncoderBackedStore
|
||||
|
||||
@@ -151,7 +151,7 @@ class CacheBackedEmbeddings(Embeddings):
|
||||
def from_bytes_store(
|
||||
cls,
|
||||
underlying_embeddings: Embeddings,
|
||||
document_embedding_cache: BaseStore[str, bytes],
|
||||
document_embedding_cache: ByteStore,
|
||||
*,
|
||||
namespace: str = "",
|
||||
) -> CacheBackedEmbeddings:
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import logging
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
from typing import Any, Dict, List, Mapping, Optional, cast
|
||||
|
||||
import requests
|
||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||
from langchain_core.language_models.llms import LLM
|
||||
from langchain_core.pydantic_v1 import Extra, Field, root_validator
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
from langchain_core.pydantic_v1 import Extra, Field, SecretStr, root_validator
|
||||
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
|
||||
|
||||
from langchain_community.llms.utils import enforce_stop_tokens
|
||||
|
||||
@@ -15,8 +15,9 @@ logger = logging.getLogger(__name__)
|
||||
class CerebriumAI(LLM):
|
||||
"""CerebriumAI large language models.
|
||||
|
||||
To use, you should have the ``cerebrium`` python package installed, and the
|
||||
environment variable ``CEREBRIUMAI_API_KEY`` set with your API key.
|
||||
To use, you should have the ``cerebrium`` python package installed.
|
||||
You should also have the environment variable ``CEREBRIUMAI_API_KEY``
|
||||
set with your API key or pass it as a named argument in the constructor.
|
||||
|
||||
Any parameters that are valid to be passed to the call can be passed
|
||||
in, even if not explicitly saved on this class.
|
||||
@@ -25,7 +26,7 @@ class CerebriumAI(LLM):
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.llms import CerebriumAI
|
||||
cerebrium = CerebriumAI(endpoint_url="")
|
||||
cerebrium = CerebriumAI(endpoint_url="", cerebriumai_api_key="my-api-key")
|
||||
|
||||
"""
|
||||
|
||||
@@ -36,7 +37,7 @@ class CerebriumAI(LLM):
|
||||
"""Holds any model parameters valid for `create` call not
|
||||
explicitly specified."""
|
||||
|
||||
cerebriumai_api_key: Optional[str] = None
|
||||
cerebriumai_api_key: Optional[SecretStr] = None
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic config."""
|
||||
@@ -64,8 +65,8 @@ class CerebriumAI(LLM):
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
cerebriumai_api_key = get_from_dict_or_env(
|
||||
values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY"
|
||||
cerebriumai_api_key = convert_to_secret_str(
|
||||
get_from_dict_or_env(values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY")
|
||||
)
|
||||
values["cerebriumai_api_key"] = cerebriumai_api_key
|
||||
return values
|
||||
@@ -91,7 +92,9 @@ class CerebriumAI(LLM):
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
headers: Dict = {
|
||||
"Authorization": self.cerebriumai_api_key,
|
||||
"Authorization": cast(
|
||||
SecretStr, self.cerebriumai_api_key
|
||||
).get_secret_value(),
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
params = self.model_kwargs or {}
|
||||
|
||||
126
libs/community/langchain_community/llms/cloudflare_workersai.py
Normal file
126
libs/community/langchain_community/llms/cloudflare_workersai.py
Normal file
@@ -0,0 +1,126 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, Iterator, List, Optional
|
||||
|
||||
import requests
|
||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||
from langchain_core.language_models.llms import LLM
|
||||
from langchain_core.outputs import GenerationChunk
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CloudflareWorkersAI(LLM):
|
||||
"""Langchain LLM class to help to access Cloudflare Workers AI service.
|
||||
|
||||
To use, you must provide an API token and
|
||||
account ID to access Cloudflare Workers AI, and
|
||||
pass it as a named parameter to the constructor.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
|
||||
|
||||
my_account_id = "my_account_id"
|
||||
my_api_token = "my_secret_api_token"
|
||||
llm_model = "@cf/meta/llama-2-7b-chat-int8"
|
||||
|
||||
cf_ai = CloudflareWorkersAI(
|
||||
account_id=my_account_id,
|
||||
api_token=my_api_token,
|
||||
model=llm_model
|
||||
)
|
||||
"""
|
||||
|
||||
account_id: str
|
||||
api_token: str
|
||||
model: str = "@cf/meta/llama-2-7b-chat-int8"
|
||||
base_url: str = "https://api.cloudflare.com/client/v4/accounts"
|
||||
streaming: bool = False
|
||||
endpoint_url: str = ""
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
"""Initialize the Cloudflare Workers AI class."""
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.endpoint_url = f"{self.base_url}/{self.account_id}/ai/run/{self.model}"
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
"""Return type of LLM."""
|
||||
return "cloudflare"
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Default parameters"""
|
||||
return {}
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Dict[str, Any]:
|
||||
"""Identifying parameters"""
|
||||
return {
|
||||
"account_id": self.account_id,
|
||||
"api_token": self.api_token,
|
||||
"model": self.model,
|
||||
"base_url": self.base_url,
|
||||
}
|
||||
|
||||
def _call_api(self, prompt: str, params: Dict[str, Any]) -> requests.Response:
|
||||
"""Call Cloudflare Workers API"""
|
||||
headers = {"Authorization": f"Bearer {self.api_token}"}
|
||||
data = {"prompt": prompt, "stream": self.streaming, **params}
|
||||
response = requests.post(self.endpoint_url, headers=headers, json=data)
|
||||
return response
|
||||
|
||||
def _process_response(self, response: requests.Response) -> str:
|
||||
"""Process API response"""
|
||||
if response.ok:
|
||||
data = response.json()
|
||||
return data["result"]["response"]
|
||||
else:
|
||||
raise ValueError(f"Request failed with status {response.status_code}")
|
||||
|
||||
def _stream(
|
||||
self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[GenerationChunk]:
|
||||
"""Streaming prediction"""
|
||||
original_steaming: bool = self.streaming
|
||||
self.streaming = True
|
||||
_response_prefix_count = len("data: ")
|
||||
_response_stream_end = b"data: [DONE]"
|
||||
for chunk in self._call_api(prompt, kwargs).iter_lines():
|
||||
if chunk == _response_stream_end:
|
||||
break
|
||||
if len(chunk) > _response_prefix_count:
|
||||
try:
|
||||
data = json.loads(chunk[_response_prefix_count:])
|
||||
except Exception as e:
|
||||
logger.debug(chunk)
|
||||
raise e
|
||||
if data is not None and "response" in data:
|
||||
yield GenerationChunk(text=data["response"])
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(data["response"])
|
||||
logger.debug("stream end")
|
||||
self.streaming = original_steaming
|
||||
|
||||
def _call(
|
||||
self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""Regular prediction"""
|
||||
if self.streaming:
|
||||
return "".join(
|
||||
[c.text for c in self._stream(prompt, stop, run_manager, **kwargs)]
|
||||
)
|
||||
else:
|
||||
response = self._call_api(prompt, kwargs)
|
||||
return self._process_response(response)
|
||||
@@ -334,13 +334,14 @@ class Databricks(LLM):
|
||||
|
||||
@property
|
||||
def _llm_params(self) -> Dict[str, Any]:
|
||||
params = {
|
||||
params: Dict[str, Any] = {
|
||||
"temperature": self.temperature,
|
||||
"n": self.n,
|
||||
"stop": self.stop,
|
||||
"max_tokens": self.max_tokens,
|
||||
**(self.model_kwargs or self.extra_params),
|
||||
}
|
||||
if self.stop:
|
||||
params["stop"] = self.stop
|
||||
if self.max_tokens is not None:
|
||||
params["max_tokens"] = self.max_tokens
|
||||
return params
|
||||
|
||||
@validator("cluster_id", always=True)
|
||||
@@ -457,11 +458,9 @@ class Databricks(LLM):
|
||||
request: Dict[str, Any] = {"prompt": prompt}
|
||||
if self._client.llm:
|
||||
request.update(self._llm_params)
|
||||
request.update(self.model_kwargs or self.extra_params)
|
||||
else:
|
||||
request.update(self.model_kwargs or self.extra_params)
|
||||
request.update(self.model_kwargs or self.extra_params)
|
||||
request.update(kwargs)
|
||||
if stop := self.stop or stop:
|
||||
if stop:
|
||||
request["stop"] = stop
|
||||
|
||||
if self.transform_input_fn:
|
||||
|
||||
@@ -106,12 +106,14 @@ class Mlflow(LLM):
|
||||
"prompt": prompt,
|
||||
"temperature": self.temperature,
|
||||
"n": self.n,
|
||||
"max_tokens": self.max_tokens,
|
||||
**self.extra_params,
|
||||
**kwargs,
|
||||
}
|
||||
if stop := self.stop or stop:
|
||||
data["stop"] = stop
|
||||
if self.max_tokens is not None:
|
||||
data["max_tokens"] = self.max_tokens
|
||||
|
||||
resp = self._client.predict(endpoint=self.endpoint, inputs=data)
|
||||
return resp["choices"][0]["text"]
|
||||
|
||||
|
||||
@@ -9,16 +9,21 @@ The primary goal of these storages is to support implementation of caching.
|
||||
from langchain_community.storage._lc_store import create_kv_docstore, create_lc_store
|
||||
from langchain_community.storage.encoder_backed import EncoderBackedStore
|
||||
from langchain_community.storage.file_system import LocalFileStore
|
||||
from langchain_community.storage.in_memory import InMemoryStore
|
||||
from langchain_community.storage.in_memory import InMemoryByteStore, InMemoryStore
|
||||
from langchain_community.storage.redis import RedisStore
|
||||
from langchain_community.storage.upstash_redis import UpstashRedisStore
|
||||
from langchain_community.storage.upstash_redis import (
|
||||
UpstashRedisByteStore,
|
||||
UpstashRedisStore,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"EncoderBackedStore",
|
||||
"InMemoryStore",
|
||||
"InMemoryByteStore",
|
||||
"LocalFileStore",
|
||||
"RedisStore",
|
||||
"create_lc_store",
|
||||
"create_kv_docstore",
|
||||
"UpstashRedisByteStore",
|
||||
"UpstashRedisStore",
|
||||
]
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Callable, Optional
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.load import Serializable, dumps, loads
|
||||
from langchain_core.stores import BaseStore
|
||||
from langchain_core.stores import BaseStore, ByteStore
|
||||
|
||||
from langchain_community.storage.encoder_backed import EncoderBackedStore
|
||||
|
||||
@@ -42,7 +42,7 @@ def _identity(x: str) -> str:
|
||||
|
||||
|
||||
def create_lc_store(
|
||||
store: BaseStore[str, bytes],
|
||||
store: ByteStore,
|
||||
*,
|
||||
key_encoder: Optional[Callable[[str], str]] = None,
|
||||
) -> BaseStore[str, Serializable]:
|
||||
@@ -64,7 +64,7 @@ def create_lc_store(
|
||||
|
||||
|
||||
def create_kv_docstore(
|
||||
store: BaseStore[str, bytes],
|
||||
store: ByteStore,
|
||||
*,
|
||||
key_encoder: Optional[Callable[[str], str]] = None,
|
||||
) -> BaseStore[str, Document]:
|
||||
|
||||
@@ -2,12 +2,12 @@ import re
|
||||
from pathlib import Path
|
||||
from typing import Iterator, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
from langchain_core.stores import BaseStore
|
||||
from langchain_core.stores import ByteStore
|
||||
|
||||
from langchain_community.storage.exceptions import InvalidKeyException
|
||||
|
||||
|
||||
class LocalFileStore(BaseStore[str, bytes]):
|
||||
class LocalFileStore(ByteStore):
|
||||
"""BaseStore interface that works on the local file system.
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -3,12 +3,24 @@
|
||||
This is a simple implementation of the BaseStore using a dictionary that is useful
|
||||
primarily for unit testing purposes.
|
||||
"""
|
||||
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Generic,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
from langchain_core.stores import BaseStore
|
||||
|
||||
V = TypeVar("V")
|
||||
|
||||
class InMemoryStore(BaseStore[str, Any]):
|
||||
|
||||
class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
|
||||
"""In-memory implementation of the BaseStore using a dictionary.
|
||||
|
||||
Attributes:
|
||||
@@ -34,9 +46,9 @@ class InMemoryStore(BaseStore[str, Any]):
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize an empty store."""
|
||||
self.store: Dict[str, Any] = {}
|
||||
self.store: Dict[str, V] = {}
|
||||
|
||||
def mget(self, keys: Sequence[str]) -> List[Optional[Any]]:
|
||||
def mget(self, keys: Sequence[str]) -> List[Optional[V]]:
|
||||
"""Get the values associated with the given keys.
|
||||
|
||||
Args:
|
||||
@@ -48,7 +60,7 @@ class InMemoryStore(BaseStore[str, Any]):
|
||||
"""
|
||||
return [self.store.get(key) for key in keys]
|
||||
|
||||
def mset(self, key_value_pairs: Sequence[Tuple[str, Any]]) -> None:
|
||||
def mset(self, key_value_pairs: Sequence[Tuple[str, V]]) -> None:
|
||||
"""Set the values for the given keys.
|
||||
|
||||
Args:
|
||||
@@ -67,7 +79,8 @@ class InMemoryStore(BaseStore[str, Any]):
|
||||
keys (Sequence[str]): A sequence of keys to delete.
|
||||
"""
|
||||
for key in keys:
|
||||
self.store.pop(key, None)
|
||||
if key in self.store:
|
||||
del self.store[key]
|
||||
|
||||
def yield_keys(self, prefix: Optional[str] = None) -> Iterator[str]:
|
||||
"""Get an iterator over keys that match the given prefix.
|
||||
@@ -84,3 +97,7 @@ class InMemoryStore(BaseStore[str, Any]):
|
||||
for key in self.store.keys():
|
||||
if key.startswith(prefix):
|
||||
yield key
|
||||
|
||||
|
||||
InMemoryStore = InMemoryBaseStore[Any]
|
||||
InMemoryByteStore = InMemoryBaseStore[bytes]
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from typing import Any, Iterator, List, Optional, Sequence, Tuple, cast
|
||||
|
||||
from langchain_core.stores import BaseStore
|
||||
from langchain_core.stores import ByteStore
|
||||
|
||||
from langchain_community.utilities.redis import get_client
|
||||
|
||||
|
||||
class RedisStore(BaseStore[str, bytes]):
|
||||
class RedisStore(ByteStore):
|
||||
"""BaseStore implementation using Redis as the underlying store.
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Any, Iterator, List, Optional, Sequence, Tuple, cast
|
||||
|
||||
from langchain_core._api.deprecation import deprecated
|
||||
from langchain_core.stores import BaseStore
|
||||
from langchain_core.stores import BaseStore, ByteStore
|
||||
|
||||
|
||||
class _UpstashRedisStore(BaseStore[str, str]):
|
||||
@@ -130,7 +130,7 @@ class UpstashRedisStore(_UpstashRedisStore):
|
||||
"""
|
||||
|
||||
|
||||
class UpstashRedisByteStore(BaseStore[str, bytes]):
|
||||
class UpstashRedisByteStore(ByteStore):
|
||||
"""
|
||||
BaseStore implementation using Upstash Redis
|
||||
as the underlying store to store raw bytes.
|
||||
|
||||
@@ -82,8 +82,8 @@ class Qdrant(VectorStore):
|
||||
qdrant = Qdrant(client, collection_name, embedding_function)
|
||||
"""
|
||||
|
||||
CONTENT_KEY = "page_content"
|
||||
METADATA_KEY = "metadata"
|
||||
CONTENT_KEY = ["page_content"]
|
||||
METADATA_KEY = ["metadata"]
|
||||
VECTOR_NAME = None
|
||||
|
||||
def __init__(
|
||||
@@ -91,8 +91,8 @@ class Qdrant(VectorStore):
|
||||
client: Any,
|
||||
collection_name: str,
|
||||
embeddings: Optional[Embeddings] = None,
|
||||
content_payload_key: str = CONTENT_KEY,
|
||||
metadata_payload_key: str = METADATA_KEY,
|
||||
content_payload_key: Union[list, str] = CONTENT_KEY,
|
||||
metadata_payload_key: Union[list, str] = METADATA_KEY,
|
||||
distance_strategy: str = "COSINE",
|
||||
vector_name: Optional[str] = VECTOR_NAME,
|
||||
embedding_function: Optional[Callable] = None, # deprecated
|
||||
@@ -112,6 +112,12 @@ class Qdrant(VectorStore):
|
||||
f"got {type(client)}"
|
||||
)
|
||||
|
||||
if isinstance(content_payload_key, str): # Ensuring Backward compatibility
|
||||
content_payload_key = [content_payload_key]
|
||||
|
||||
if isinstance(metadata_payload_key, str): # Ensuring Backward compatibility
|
||||
metadata_payload_key = [metadata_payload_key]
|
||||
|
||||
if embeddings is None and embedding_function is None:
|
||||
raise ValueError(
|
||||
"`embeddings` value can't be None. Pass `Embeddings` instance."
|
||||
@@ -127,8 +133,14 @@ class Qdrant(VectorStore):
|
||||
self._embeddings_function = embedding_function
|
||||
self.client: qdrant_client.QdrantClient = client
|
||||
self.collection_name = collection_name
|
||||
self.content_payload_key = content_payload_key or self.CONTENT_KEY
|
||||
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
|
||||
self.content_payload_key = (
|
||||
content_payload_key if content_payload_key is not None else self.CONTENT_KEY
|
||||
)
|
||||
self.metadata_payload_key = (
|
||||
metadata_payload_key
|
||||
if metadata_payload_key is not None
|
||||
else self.METADATA_KEY
|
||||
)
|
||||
self.vector_name = vector_name or self.VECTOR_NAME
|
||||
|
||||
if embedding_function is not None:
|
||||
@@ -1178,8 +1190,8 @@ class Qdrant(VectorStore):
|
||||
path: Optional[str] = None,
|
||||
collection_name: Optional[str] = None,
|
||||
distance_func: str = "Cosine",
|
||||
content_payload_key: str = CONTENT_KEY,
|
||||
metadata_payload_key: str = METADATA_KEY,
|
||||
content_payload_key: List[str] = CONTENT_KEY,
|
||||
metadata_payload_key: List[str] = METADATA_KEY,
|
||||
vector_name: Optional[str] = VECTOR_NAME,
|
||||
batch_size: int = 64,
|
||||
shard_number: Optional[int] = None,
|
||||
@@ -1354,8 +1366,8 @@ class Qdrant(VectorStore):
|
||||
path: Optional[str] = None,
|
||||
collection_name: Optional[str] = None,
|
||||
distance_func: str = "Cosine",
|
||||
content_payload_key: str = CONTENT_KEY,
|
||||
metadata_payload_key: str = METADATA_KEY,
|
||||
content_payload_key: List[str] = CONTENT_KEY,
|
||||
metadata_payload_key: List[str] = METADATA_KEY,
|
||||
vector_name: Optional[str] = VECTOR_NAME,
|
||||
batch_size: int = 64,
|
||||
shard_number: Optional[int] = None,
|
||||
@@ -1527,8 +1539,8 @@ class Qdrant(VectorStore):
|
||||
path: Optional[str] = None,
|
||||
collection_name: Optional[str] = None,
|
||||
distance_func: str = "Cosine",
|
||||
content_payload_key: str = CONTENT_KEY,
|
||||
metadata_payload_key: str = METADATA_KEY,
|
||||
content_payload_key: List[str] = CONTENT_KEY,
|
||||
metadata_payload_key: List[str] = METADATA_KEY,
|
||||
vector_name: Optional[str] = VECTOR_NAME,
|
||||
shard_number: Optional[int] = None,
|
||||
replication_factor: Optional[int] = None,
|
||||
@@ -1691,8 +1703,8 @@ class Qdrant(VectorStore):
|
||||
path: Optional[str] = None,
|
||||
collection_name: Optional[str] = None,
|
||||
distance_func: str = "Cosine",
|
||||
content_payload_key: str = CONTENT_KEY,
|
||||
metadata_payload_key: str = METADATA_KEY,
|
||||
content_payload_key: List[str] = CONTENT_KEY,
|
||||
metadata_payload_key: List[str] = METADATA_KEY,
|
||||
vector_name: Optional[str] = VECTOR_NAME,
|
||||
shard_number: Optional[int] = None,
|
||||
replication_factor: Optional[int] = None,
|
||||
@@ -1888,11 +1900,11 @@ class Qdrant(VectorStore):
|
||||
|
||||
@classmethod
|
||||
def _build_payloads(
|
||||
cls,
|
||||
cls: Type[Qdrant],
|
||||
texts: Iterable[str],
|
||||
metadatas: Optional[List[dict]],
|
||||
content_payload_key: str,
|
||||
metadata_payload_key: str,
|
||||
content_payload_key: list[str],
|
||||
metadata_payload_key: list[str],
|
||||
) -> List[dict]:
|
||||
payloads = []
|
||||
for i, text in enumerate(texts):
|
||||
@@ -1913,29 +1925,67 @@ class Qdrant(VectorStore):
|
||||
|
||||
@classmethod
|
||||
def _document_from_scored_point(
|
||||
cls,
|
||||
cls: Type[Qdrant],
|
||||
scored_point: Any,
|
||||
content_payload_key: str,
|
||||
metadata_payload_key: str,
|
||||
content_payload_key: list[str],
|
||||
metadata_payload_key: list[str],
|
||||
) -> Document:
|
||||
return Document(
|
||||
page_content=scored_point.payload.get(content_payload_key),
|
||||
metadata=scored_point.payload.get(metadata_payload_key) or {},
|
||||
payload = scored_point.payload
|
||||
return Qdrant._document_from_payload(
|
||||
payload=payload,
|
||||
content_payload_key=content_payload_key,
|
||||
metadata_payload_key=metadata_payload_key,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _document_from_scored_point_grpc(
|
||||
cls,
|
||||
cls: Type[Qdrant],
|
||||
scored_point: Any,
|
||||
content_payload_key: str,
|
||||
metadata_payload_key: str,
|
||||
content_payload_key: list[str],
|
||||
metadata_payload_key: list[str],
|
||||
) -> Document:
|
||||
from qdrant_client.conversions.conversion import grpc_to_payload
|
||||
|
||||
payload = grpc_to_payload(scored_point.payload)
|
||||
return Qdrant._document_from_payload(
|
||||
payload=payload,
|
||||
content_payload_key=content_payload_key,
|
||||
metadata_payload_key=metadata_payload_key,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _document_from_payload(
|
||||
cls: Type[Qdrant],
|
||||
payload: Any,
|
||||
content_payload_key: list[str],
|
||||
metadata_payload_key: list[str],
|
||||
) -> Document:
|
||||
if len(content_payload_key) == 1:
|
||||
content = payload.get(
|
||||
content_payload_key
|
||||
) # Ensuring backward compatibility
|
||||
elif len(content_payload_key) > 1:
|
||||
content = {
|
||||
content_key: payload.get(content_key)
|
||||
for content_key in content_payload_key
|
||||
}
|
||||
content = str(content) # Ensuring str type output
|
||||
else:
|
||||
content = ""
|
||||
if len(metadata_payload_key) == 1:
|
||||
metadata = payload.get(
|
||||
metadata_payload_key
|
||||
) # Ensuring backward compatibility
|
||||
elif len(metadata_payload_key) > 1:
|
||||
metadata = {
|
||||
metadata_key: payload.get(metadata_key)
|
||||
for metadata_key in metadata_payload_key
|
||||
}
|
||||
else:
|
||||
metadata = {}
|
||||
return Document(
|
||||
page_content=payload[content_payload_key],
|
||||
metadata=payload.get(metadata_payload_key) or {},
|
||||
page_content=content,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
def _build_condition(self, key: str, value: Any) -> List[rest.FieldCondition]:
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
from typing import cast
|
||||
|
||||
from langchain_core.pydantic_v1 import SecretStr
|
||||
from pytest import CaptureFixture, MonkeyPatch
|
||||
|
||||
from langchain_community.chat_models.baidu_qianfan_endpoint import (
|
||||
QianfanChatEndpoint,
|
||||
)
|
||||
|
||||
|
||||
def test_qianfan_key_masked_when_passed_from_env(
|
||||
monkeypatch: MonkeyPatch, capsys: CaptureFixture
|
||||
) -> None:
|
||||
"""Test initialization with an API key provided via an env variable"""
|
||||
monkeypatch.setenv("QIANFAN_AK", "test-api-key")
|
||||
monkeypatch.setenv("QIANFAN_SK", "test-secret-key")
|
||||
|
||||
chat = QianfanChatEndpoint()
|
||||
print(chat.qianfan_ak, end="")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "**********"
|
||||
|
||||
print(chat.qianfan_sk, end="")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "**********"
|
||||
|
||||
|
||||
def test_qianfan_key_masked_when_passed_via_constructor(
|
||||
capsys: CaptureFixture,
|
||||
) -> None:
|
||||
"""Test initialization with an API key provided via the initializer"""
|
||||
chat = QianfanChatEndpoint(
|
||||
qianfan_ak="test-api-key",
|
||||
qianfan_sk="test-secret-key",
|
||||
)
|
||||
print(chat.qianfan_ak, end="")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "**********"
|
||||
|
||||
print(chat.qianfan_sk, end="")
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
|
||||
|
||||
def test_uses_actual_secret_value_from_secret_str() -> None:
|
||||
"""Test that actual secret is retrieved using `.get_secret_value()`."""
|
||||
chat = QianfanChatEndpoint(
|
||||
qianfan_ak="test-api-key",
|
||||
qianfan_sk="test-secret-key",
|
||||
)
|
||||
assert cast(SecretStr, chat.qianfan_ak).get_secret_value() == "test-api-key"
|
||||
assert cast(SecretStr, chat.qianfan_sk).get_secret_value() == "test-secret-key"
|
||||
@@ -0,0 +1,46 @@
|
||||
import responses
|
||||
|
||||
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
|
||||
|
||||
|
||||
@responses.activate
|
||||
def test_cloudflare_workersai_call() -> None:
|
||||
responses.add(
|
||||
responses.POST,
|
||||
"https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8",
|
||||
json={"result": {"response": "4"}},
|
||||
status=200,
|
||||
)
|
||||
|
||||
llm = CloudflareWorkersAI(
|
||||
account_id="my_account_id",
|
||||
api_token="my_api_token",
|
||||
model="@cf/meta/llama-2-7b-chat-int8",
|
||||
)
|
||||
output = llm("What is 2 + 2?")
|
||||
|
||||
assert output == "4"
|
||||
|
||||
|
||||
@responses.activate
|
||||
def test_cloudflare_workersai_stream() -> None:
|
||||
response_body = ['data: {"response": "Hello"}', "data: [DONE]"]
|
||||
responses.add(
|
||||
responses.POST,
|
||||
"https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8",
|
||||
body="\n".join(response_body),
|
||||
status=200,
|
||||
)
|
||||
|
||||
llm = CloudflareWorkersAI(
|
||||
account_id="my_account_id",
|
||||
api_token="my_api_token",
|
||||
model="@cf/meta/llama-2-7b-chat-int8",
|
||||
streaming=True,
|
||||
)
|
||||
|
||||
outputs = []
|
||||
for chunk in llm.stream("Say Hello"):
|
||||
outputs.append(chunk)
|
||||
|
||||
assert "".join(outputs) == "Hello"
|
||||
33
libs/community/tests/unit_tests/llms/test_cerebriumai.py
Normal file
33
libs/community/tests/unit_tests/llms/test_cerebriumai.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""Test CerebriumAI llm"""
|
||||
|
||||
|
||||
from langchain_core.pydantic_v1 import SecretStr
|
||||
from pytest import CaptureFixture, MonkeyPatch
|
||||
|
||||
from langchain_community.llms.cerebriumai import CerebriumAI
|
||||
|
||||
|
||||
def test_api_key_is_secret_string() -> None:
|
||||
llm = CerebriumAI(cerebriumai_api_key="test-cerebriumai-api-key")
|
||||
assert isinstance(llm.cerebriumai_api_key, SecretStr)
|
||||
|
||||
|
||||
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) -> None:
|
||||
llm = CerebriumAI(cerebriumai_api_key="secret-api-key")
|
||||
print(llm.cerebriumai_api_key, end="")
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
assert repr(llm.cerebriumai_api_key) == "SecretStr('**********')"
|
||||
|
||||
|
||||
def test_api_key_masked_when_passed_from_env(
|
||||
monkeypatch: MonkeyPatch, capsys: CaptureFixture
|
||||
) -> None:
|
||||
monkeypatch.setenv("CEREBRIUMAI_API_KEY", "secret-api-key")
|
||||
llm = CerebriumAI()
|
||||
print(llm.cerebriumai_api_key, end="")
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert captured.out == "**********"
|
||||
assert repr(llm.cerebriumai_api_key) == "SecretStr('**********')"
|
||||
@@ -3,10 +3,12 @@ from langchain_community.storage import __all__
|
||||
EXPECTED_ALL = [
|
||||
"EncoderBackedStore",
|
||||
"InMemoryStore",
|
||||
"InMemoryByteStore",
|
||||
"LocalFileStore",
|
||||
"RedisStore",
|
||||
"create_lc_store",
|
||||
"create_kv_docstore",
|
||||
"UpstashRedisByteStore",
|
||||
"UpstashRedisStore",
|
||||
]
|
||||
|
||||
|
||||
@@ -46,8 +46,9 @@ class RunnableWithMessageHistory(RunnableBindingBase):
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.chat_models import ChatAnthropic
|
||||
from langchain_core.memory.chat_message_histories import RedisChatMessageHistory
|
||||
from langchain.chat_models import ChatAnthropic
|
||||
from langchain.memory.chat_message_histories import RedisChatMessageHistory
|
||||
|
||||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
from langchain_core.runnables.history import RunnableWithMessageHistory
|
||||
|
||||
|
||||
@@ -51,3 +51,6 @@ class BaseStore(Generic[K, V], ABC):
|
||||
This method is allowed to return an iterator over either K or str
|
||||
depending on what makes more sense for the given store.
|
||||
"""
|
||||
|
||||
|
||||
ByteStore = BaseStore[str, bytes]
|
||||
|
||||
@@ -7,41 +7,45 @@
|
||||
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
|
||||
"""
|
||||
|
||||
from langchain_core.callbacks import StdOutCallbackHandler, StreamingStdOutCallbackHandler
|
||||
from langchain_core.tracers.langchain import LangChainTracer
|
||||
from langchain_core.tracers.context import (
|
||||
collect_runs,
|
||||
tracing_enabled,
|
||||
tracing_v2_enabled,
|
||||
)
|
||||
from langchain_core.tracers.langchain import LangChainTracer
|
||||
|
||||
from langchain.callbacks.aim_callback import AimCallbackHandler
|
||||
from langchain.callbacks.argilla_callback import ArgillaCallbackHandler
|
||||
from langchain.callbacks.arize_callback import ArizeCallbackHandler
|
||||
from langchain.callbacks.arthur_callback import ArthurCallbackHandler
|
||||
from langchain.callbacks.clearml_callback import ClearMLCallbackHandler
|
||||
from langchain.callbacks.comet_ml_callback import CometCallbackHandler
|
||||
from langchain.callbacks.context_callback import ContextCallbackHandler
|
||||
from langchain.callbacks.file import FileCallbackHandler
|
||||
from langchain.callbacks.flyte_callback import FlyteCallbackHandler
|
||||
from langchain.callbacks.human import HumanApprovalCallbackHandler
|
||||
from langchain.callbacks.infino_callback import InfinoCallbackHandler
|
||||
from langchain.callbacks.labelstudio_callback import LabelStudioCallbackHandler
|
||||
from langchain.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
from langchain.callbacks.manager import get_openai_callback, wandb_tracing_enabled
|
||||
from langchain.callbacks.mlflow_callback import MlflowCallbackHandler
|
||||
from langchain.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain.callbacks.promptlayer_callback import PromptLayerCallbackHandler
|
||||
from langchain.callbacks.sagemaker_callback import SageMakerCallbackHandler
|
||||
from langchain.callbacks.stdout import StdOutCallbackHandler
|
||||
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
from langchain.callbacks.streaming_stdout_final_only import (
|
||||
from langchain_community.callbacks.aim_callback import AimCallbackHandler
|
||||
from langchain_community.callbacks.argilla_callback import ArgillaCallbackHandler
|
||||
from langchain_community.callbacks.arize_callback import ArizeCallbackHandler
|
||||
from langchain_community.callbacks.arthur_callback import ArthurCallbackHandler
|
||||
from langchain_community.callbacks.clearml_callback import ClearMLCallbackHandler
|
||||
from langchain_community.callbacks.comet_ml_callback import CometCallbackHandler
|
||||
from langchain_community.callbacks.context_callback import ContextCallbackHandler
|
||||
from langchain_community.callbacks.file import FileCallbackHandler
|
||||
from langchain_community.callbacks.flyte_callback import FlyteCallbackHandler
|
||||
from langchain_community.callbacks.human import HumanApprovalCallbackHandler
|
||||
from langchain_community.callbacks.infino_callback import InfinoCallbackHandler
|
||||
from langchain_community.callbacks.labelstudio_callback import LabelStudioCallbackHandler
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
from langchain_community.callbacks.mlflow_callback import MlflowCallbackHandler
|
||||
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain_community.callbacks.promptlayer_callback import PromptLayerCallbackHandler
|
||||
from langchain_community.callbacks.sagemaker_callback import SageMakerCallbackHandler
|
||||
from langchain_community.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
|
||||
from langchain_community.callbacks.streaming_stdout_final_only import (
|
||||
FinalStreamingStdOutCallbackHandler,
|
||||
)
|
||||
from langchain.callbacks.streamlit import LLMThoughtLabeler, StreamlitCallbackHandler
|
||||
from langchain.callbacks.trubrics_callback import TrubricsCallbackHandler
|
||||
from langchain.callbacks.wandb_callback import WandbCallbackHandler
|
||||
from langchain.callbacks.whylabs_callback import WhyLabsCallbackHandler
|
||||
from langchain_community.callbacks.streamlit import LLMThoughtLabeler, StreamlitCallbackHandler
|
||||
from langchain_community.callbacks.trubrics_callback import TrubricsCallbackHandler
|
||||
from langchain_community.callbacks.wandb_callback import WandbCallbackHandler
|
||||
from langchain_community.callbacks.whylabs_callback import WhyLabsCallbackHandler
|
||||
|
||||
from langchain_community.callbacks.manager import (
|
||||
get_openai_callback,
|
||||
wandb_tracing_enabled,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AimCallbackHandler",
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from langchain_community.callbacks.manager import (
|
||||
get_openai_callback,
|
||||
wandb_tracing_enabled,
|
||||
)
|
||||
from langchain_core.callbacks.manager import (
|
||||
AsyncCallbackManager,
|
||||
AsyncCallbackManagerForChainGroup,
|
||||
@@ -34,6 +30,11 @@ from langchain_core.tracers.context import (
|
||||
tracing_v2_enabled,
|
||||
)
|
||||
from langchain_core.utils.env import env_var_is_set
|
||||
from langchain_community.callbacks.manager import (
|
||||
get_openai_callback,
|
||||
wandb_tracing_enabled,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BaseRunManager",
|
||||
|
||||
3
libs/langchain/langchain/llms/cloudflare_workersai.py
Normal file
3
libs/langchain/langchain/llms/cloudflare_workersai.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
|
||||
|
||||
__all__ = ["CloudflareWorkersAI"]
|
||||
@@ -3,7 +3,7 @@ from typing import List, Optional
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
from langchain_core.stores import BaseStore
|
||||
from langchain_core.stores import BaseStore, ByteStore
|
||||
from langchain_core.vectorstores import VectorStore
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
|
||||
@@ -38,7 +38,7 @@ class MultiVectorRetriever(BaseRetriever):
|
||||
*,
|
||||
vectorstore: VectorStore,
|
||||
docstore: Optional[BaseStore[str, Document]] = None,
|
||||
base_store: Optional[BaseStore[str, bytes]] = None,
|
||||
base_store: Optional[ByteStore] = None,
|
||||
id_key: str = "doc_id",
|
||||
search_kwargs: Optional[dict] = None,
|
||||
search_type: SearchType = SearchType.similarity,
|
||||
|
||||
@@ -9,16 +9,18 @@ The primary goal of these storages is to support implementation of caching.
|
||||
from langchain.storage._lc_store import create_kv_docstore, create_lc_store
|
||||
from langchain.storage.encoder_backed import EncoderBackedStore
|
||||
from langchain.storage.file_system import LocalFileStore
|
||||
from langchain.storage.in_memory import InMemoryStore
|
||||
from langchain.storage.in_memory import InMemoryByteStore, InMemoryStore
|
||||
from langchain.storage.redis import RedisStore
|
||||
from langchain.storage.upstash_redis import UpstashRedisStore
|
||||
from langchain.storage.upstash_redis import UpstashRedisByteStore, UpstashRedisStore
|
||||
|
||||
__all__ = [
|
||||
"EncoderBackedStore",
|
||||
"InMemoryStore",
|
||||
"InMemoryByteStore",
|
||||
"LocalFileStore",
|
||||
"RedisStore",
|
||||
"create_lc_store",
|
||||
"create_kv_docstore",
|
||||
"UpstashRedisByteStore",
|
||||
"UpstashRedisStore",
|
||||
]
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Callable, Optional
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.load import Serializable, dumps, loads
|
||||
from langchain_core.stores import BaseStore
|
||||
from langchain_core.stores import BaseStore, ByteStore
|
||||
|
||||
from langchain.storage.encoder_backed import EncoderBackedStore
|
||||
|
||||
@@ -42,7 +42,7 @@ def _identity(x: str) -> str:
|
||||
|
||||
|
||||
def create_lc_store(
|
||||
store: BaseStore[str, bytes],
|
||||
store: ByteStore,
|
||||
*,
|
||||
key_encoder: Optional[Callable[[str], str]] = None,
|
||||
) -> BaseStore[str, Serializable]:
|
||||
@@ -64,7 +64,7 @@ def create_lc_store(
|
||||
|
||||
|
||||
def create_kv_docstore(
|
||||
store: BaseStore[str, bytes],
|
||||
store: ByteStore,
|
||||
*,
|
||||
key_encoder: Optional[Callable[[str], str]] = None,
|
||||
) -> BaseStore[str, Document]:
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
from langchain_community.storage.in_memory import InMemoryStore
|
||||
from langchain_community.storage.in_memory import (
|
||||
InMemoryBaseStore,
|
||||
InMemoryByteStore,
|
||||
InMemoryStore,
|
||||
V,
|
||||
)
|
||||
|
||||
__all__ = ["InMemoryStore"]
|
||||
__all__ = ["V", "InMemoryBaseStore", "InMemoryStore", "InMemoryByteStore"]
|
||||
|
||||
@@ -10,7 +10,6 @@ from langchain_core.prompts import PromptTemplate
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.loading import load_chain
|
||||
from tests.unit_tests.llms.fake_llm import FakeLLM
|
||||
from langchain_community.llms import get_type_to_cls_dict
|
||||
|
||||
|
||||
class FakeOutputParser(BaseOutputParser):
|
||||
@@ -28,15 +27,14 @@ def fake_llm_chain() -> LLMChain:
|
||||
return LLMChain(prompt=prompt, llm=FakeLLM(), output_key="text1")
|
||||
|
||||
|
||||
@patch("langchain_community.llms.get_type_to_cls_dict", lambda: {"fake": lambda: FakeLLM})
|
||||
@patch("langchain.llms.get_type_to_cls_dict", lambda: {"fake": lambda: FakeLLM})
|
||||
def test_serialization(fake_llm_chain: LLMChain) -> None:
|
||||
"""Test serialization."""
|
||||
assert get_type_to_cls_dict() == {"fake": 1}
|
||||
with TemporaryDirectory() as temp_dir:
|
||||
file = temp_dir + "/llm.json"
|
||||
fake_llm_chain.save(file)
|
||||
loaded_chain = load_chain(file)
|
||||
assert loaded_chain == fake_llm_chain
|
||||
assert loaded_chain == fake_llm_chain
|
||||
|
||||
|
||||
def test_missing_inputs(fake_llm_chain: LLMChain) -> None:
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
from langchain.document_loaders.blob_loaders import __all__
|
||||
|
||||
|
||||
def test_public_api() -> None:
|
||||
"""Hard-code public API to help determine if we have broken it."""
|
||||
assert sorted(__all__) == [
|
||||
"Blob",
|
||||
"BlobLoader",
|
||||
"FileSystemBlobLoader",
|
||||
"YoutubeAudioLoader",
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
from langchain.document_loaders.parsers import __all__
|
||||
|
||||
|
||||
def test_parsers_public_api_correct() -> None:
|
||||
"""Test public API of parsers for breaking changes."""
|
||||
assert set(__all__) == {
|
||||
"BS4HTMLParser",
|
||||
"DocAIParser",
|
||||
"GrobidParser",
|
||||
"LanguageParser",
|
||||
"OpenAIWhisperParser",
|
||||
"PyPDFParser",
|
||||
"PDFMinerParser",
|
||||
"PyMuPDFParser",
|
||||
"PyPDFium2Parser",
|
||||
"PDFPlumberParser",
|
||||
}
|
||||
0
libs/langchain/tests/unit_tests/graphs/__init__.py
Normal file
0
libs/langchain/tests/unit_tests/graphs/__init__.py
Normal file
0
libs/langchain/tests/unit_tests/storage/__init__.py
Normal file
0
libs/langchain/tests/unit_tests/storage/__init__.py
Normal file
@@ -3,10 +3,12 @@ from langchain.storage import __all__
|
||||
EXPECTED_ALL = [
|
||||
"EncoderBackedStore",
|
||||
"InMemoryStore",
|
||||
"InMemoryByteStore",
|
||||
"LocalFileStore",
|
||||
"RedisStore",
|
||||
"create_lc_store",
|
||||
"create_kv_docstore",
|
||||
"UpstashRedisByteStore",
|
||||
"UpstashRedisStore",
|
||||
]
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ def test_required_dependencies(poetry_conf: Mapping[str, Any]) -> None:
|
||||
package_name for package_name, required in is_required.items() if required
|
||||
]
|
||||
|
||||
assert sorted(required_dependencies) == sorted([
|
||||
assert sorted(required_dependencies) == [
|
||||
"PyYAML",
|
||||
"SQLAlchemy",
|
||||
"aiohttp",
|
||||
@@ -50,9 +50,7 @@ def test_required_dependencies(poetry_conf: Mapping[str, Any]) -> None:
|
||||
"python",
|
||||
"requests",
|
||||
"tenacity",
|
||||
"langchain-community",
|
||||
"langchain-openai"
|
||||
])
|
||||
]
|
||||
|
||||
unrequired_dependencies = [
|
||||
package_name for package_name, required in is_required.items() if not required
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
"""Test the public API of the tools package."""
|
||||
from langchain.vectorstores import __all__ as public_api
|
||||
|
||||
_EXPECTED = [
|
||||
"AlibabaCloudOpenSearch",
|
||||
"AlibabaCloudOpenSearchSettings",
|
||||
"AnalyticDB",
|
||||
"Annoy",
|
||||
"AtlasDB",
|
||||
"AwaDB",
|
||||
"AzureSearch",
|
||||
"Bagel",
|
||||
"Cassandra",
|
||||
"AstraDB",
|
||||
"Chroma",
|
||||
"Clarifai",
|
||||
"Clickhouse",
|
||||
"ClickhouseSettings",
|
||||
"DashVector",
|
||||
"DatabricksVectorSearch",
|
||||
"DeepLake",
|
||||
"Dingo",
|
||||
"DocArrayHnswSearch",
|
||||
"DocArrayInMemorySearch",
|
||||
"ElasticKnnSearch",
|
||||
"ElasticVectorSearch",
|
||||
"ElasticsearchStore",
|
||||
"Epsilla",
|
||||
"FAISS",
|
||||
"Hologres",
|
||||
"LanceDB",
|
||||
"LLMRails",
|
||||
"Marqo",
|
||||
"MatchingEngine",
|
||||
"Meilisearch",
|
||||
"Milvus",
|
||||
"MomentoVectorIndex",
|
||||
"MongoDBAtlasVectorSearch",
|
||||
"MyScale",
|
||||
"MyScaleSettings",
|
||||
"Neo4jVector",
|
||||
"OpenSearchVectorSearch",
|
||||
"PGEmbedding",
|
||||
"PGVector",
|
||||
"Pinecone",
|
||||
"Qdrant",
|
||||
"Redis",
|
||||
"Rockset",
|
||||
"SKLearnVectorStore",
|
||||
"ScaNN",
|
||||
"SemaDB",
|
||||
"SingleStoreDB",
|
||||
"SQLiteVSS",
|
||||
"StarRocks",
|
||||
"SupabaseVectorStore",
|
||||
"Tair",
|
||||
"TileDB",
|
||||
"Tigris",
|
||||
"TimescaleVector",
|
||||
"Typesense",
|
||||
"USearch",
|
||||
"Vald",
|
||||
"Vearch",
|
||||
"Vectara",
|
||||
"VespaStore",
|
||||
"Weaviate",
|
||||
"ZepVectorStore",
|
||||
"Zilliz",
|
||||
"TencentVectorDB",
|
||||
"AzureCosmosDBVectorSearch",
|
||||
"VectorStore",
|
||||
"Yellowbrick",
|
||||
]
|
||||
|
||||
|
||||
def test_public_api() -> None:
|
||||
"""Test for regressions or changes in the public API."""
|
||||
# Check that the public API is as expected
|
||||
assert set(public_api) == set(_EXPECTED)
|
||||
Reference in New Issue
Block a user