mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-06 19:48:26 +00:00
community[patch]: docstrings update (#20301)
Added missed docstrings. Format docstings to the consistent form.
This commit is contained in:
parent
2900720cd3
commit
7cf2d2759d
@ -9,7 +9,7 @@ from langchain_community.tools.connery import ConneryService
|
||||
|
||||
class ConneryToolkit(BaseToolkit):
|
||||
"""
|
||||
A LangChain Toolkit with a list of Connery Actions as tools.
|
||||
Toolkit with a list of Connery Actions as tools.
|
||||
"""
|
||||
|
||||
tools: List[BaseTool]
|
||||
|
@ -348,7 +348,7 @@ def create_openapi_agent(
|
||||
allow_dangerous_requests: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Instantiate OpenAI API planner and controller for a given spec.
|
||||
"""Construct an OpenAI API planner and controller for a given spec.
|
||||
|
||||
Inject credentials via requests_wrapper.
|
||||
|
||||
|
@ -91,7 +91,7 @@ def analyze_text(
|
||||
|
||||
|
||||
class FlyteCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
"""This callback handler that is used within a Flyte task."""
|
||||
"""Callback handler that is used within a Flyte task."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize callback handler."""
|
||||
|
@ -9,14 +9,14 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class ChildType(Enum):
|
||||
"""The enumerator of the child type."""
|
||||
"""Enumerator of the child type."""
|
||||
|
||||
MARKDOWN = "MARKDOWN"
|
||||
EXCEPTION = "EXCEPTION"
|
||||
|
||||
|
||||
class ChildRecord(NamedTuple):
|
||||
"""The child record as a NamedTuple."""
|
||||
"""Child record as a NamedTuple."""
|
||||
|
||||
type: ChildType
|
||||
kwargs: Dict[str, Any]
|
||||
@ -24,7 +24,7 @@ class ChildRecord(NamedTuple):
|
||||
|
||||
|
||||
class MutableExpander:
|
||||
"""A Streamlit expander that can be renamed and dynamically expanded/collapsed."""
|
||||
"""Streamlit expander that can be renamed and dynamically expanded/collapsed."""
|
||||
|
||||
def __init__(self, parent_container: DeltaGenerator, label: str, expanded: bool):
|
||||
"""Create a new MutableExpander.
|
||||
@ -51,7 +51,7 @@ class MutableExpander:
|
||||
|
||||
@property
|
||||
def label(self) -> str:
|
||||
"""The expander's label string."""
|
||||
"""Expander's label string."""
|
||||
return self._label
|
||||
|
||||
@property
|
||||
|
@ -40,7 +40,7 @@ class LLMThoughtState(Enum):
|
||||
|
||||
|
||||
class ToolRecord(NamedTuple):
|
||||
"""The tool record as a NamedTuple."""
|
||||
"""Tool record as a NamedTuple."""
|
||||
|
||||
name: str
|
||||
input_str: str
|
||||
|
@ -21,7 +21,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseMessageConverter(ABC):
|
||||
"""Class that converts BaseMessage to the SQLAlchemy model."""
|
||||
"""Convert BaseMessage to the SQLAlchemy model."""
|
||||
|
||||
@abstractmethod
|
||||
def from_sql_model(self, sql_message: Any) -> BaseMessage:
|
||||
|
@ -196,7 +196,7 @@ _message_type_lookups = {"human": "user", "ai": "assistant"}
|
||||
|
||||
|
||||
class BedrockChat(BaseChatModel, BedrockBase):
|
||||
"""A chat model that uses the Bedrock API."""
|
||||
"""Chat model that uses the Bedrock API."""
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
|
@ -26,8 +26,7 @@ DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful, and honest assistant."
|
||||
|
||||
|
||||
class ChatHuggingFace(BaseChatModel):
|
||||
"""
|
||||
Wrapper for using Hugging Face LLM's as ChatModels.
|
||||
"""Hugging Face LLMs as ChatModels.
|
||||
|
||||
Works with `HuggingFaceTextGenInference`, `HuggingFaceEndpoint`,
|
||||
and `HuggingFaceHub` LLMs.
|
||||
|
@ -161,7 +161,7 @@ def _convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
|
||||
|
||||
class ChatLiteLLM(BaseChatModel):
|
||||
"""A chat model that uses the LiteLLM API."""
|
||||
"""Chat model that uses the LiteLLM API."""
|
||||
|
||||
client: Any #: :meta private:
|
||||
model: str = "gpt-3.5-turbo"
|
||||
|
@ -37,7 +37,7 @@ def _parse_chat_history(history: List[BaseMessage]) -> List:
|
||||
|
||||
|
||||
class MiniMaxChat(MinimaxCommon, BaseChatModel):
|
||||
"""Wrapper around Minimax large language models.
|
||||
"""MiniMax large language models.
|
||||
|
||||
To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and
|
||||
``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to
|
||||
|
@ -9,7 +9,7 @@ from langchain_community.llms.moonshot import MOONSHOT_SERVICE_URL_BASE, Moonsho
|
||||
|
||||
|
||||
class MoonshotChat(MoonshotCommon, ChatOpenAI): # type: ignore[misc]
|
||||
"""Wrapper around Moonshot large language models.
|
||||
"""Moonshot large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``MOONSHOT_API_KEY`` set with your API key.
|
||||
|
@ -26,7 +26,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PaiEasChatEndpoint(BaseChatModel):
|
||||
"""Eas LLM Service chat model API.
|
||||
"""Alibaba Cloud PAI-EAS LLM Service chat model API.
|
||||
|
||||
To use, must have a deployed eas chat llm service on AliCloud. One can set the
|
||||
environment variable ``eas_service_url`` and ``eas_service_token`` set with your eas
|
||||
|
@ -159,7 +159,7 @@ def _messages_to_prompt_dict(
|
||||
|
||||
|
||||
class ChatPremAI(BaseChatModel, BaseModel):
|
||||
"""Use any LLM provider with Prem and Langchain.
|
||||
"""PremAI Chat models.
|
||||
|
||||
To use, you will need to have an API key. You can find your existing API Key
|
||||
or generate a new one here: https://app.premai.io/api_keys/
|
||||
@ -357,6 +357,7 @@ def create_prem_retry_decorator(
|
||||
max_retries: int = 1,
|
||||
run_manager: Optional[Union[CallbackManagerForLLMRun]] = None,
|
||||
) -> Callable[[Any], Any]:
|
||||
"""Create a retry decorator for PremAI API errors."""
|
||||
import premai.models
|
||||
|
||||
errors = [
|
||||
|
@ -10,7 +10,7 @@ from langchain_community.llms.solar import SOLAR_SERVICE_URL_BASE, SolarCommon
|
||||
|
||||
|
||||
class SolarChat(SolarCommon, ChatOpenAI): # type: ignore[misc]
|
||||
"""Wrapper around Solar large language models.
|
||||
"""Solar large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``SOLAR_API_KEY`` set with your API key.
|
||||
|
@ -89,7 +89,7 @@ def _convert_delta_to_message_chunk(
|
||||
|
||||
|
||||
class ChatSparkLLM(BaseChatModel):
|
||||
"""Wrapper around iFlyTek's Spark large language model.
|
||||
"""iFlyTek Spark large language model.
|
||||
|
||||
To use, you should pass `app_id`, `api_key`, `api_secret`
|
||||
as a named parameter to the constructor OR set environment
|
||||
|
@ -61,6 +61,7 @@ logger = logging.getLogger(__name__)
|
||||
def convert_dict_to_message(
|
||||
_dict: Mapping[str, Any], is_chunk: bool = False
|
||||
) -> Union[BaseMessage, BaseMessageChunk]:
|
||||
"""Convert a dict to a message."""
|
||||
role = _dict["role"]
|
||||
content = _dict["content"]
|
||||
if role == "user":
|
||||
@ -88,6 +89,7 @@ def convert_dict_to_message(
|
||||
|
||||
|
||||
def convert_message_chunk_to_message(message_chunk: BaseMessageChunk) -> BaseMessage:
|
||||
"""Convert a message chunk to a message."""
|
||||
if isinstance(message_chunk, HumanMessageChunk):
|
||||
return HumanMessage(content=message_chunk.content)
|
||||
elif isinstance(message_chunk, AIMessageChunk):
|
||||
|
@ -53,7 +53,7 @@ def _parse_chat_history(history: List[BaseMessage]) -> List[Dict[str, str]]:
|
||||
|
||||
|
||||
class ChatYandexGPT(_BaseYandexGPT, BaseChatModel):
|
||||
"""Wrapper around YandexGPT large language models.
|
||||
"""YandexGPT large language models.
|
||||
|
||||
There are two authentication options for the service account
|
||||
with the ``ai.languageModels.user`` role:
|
||||
|
@ -42,6 +42,7 @@ ZHIPUAI_API_BASE = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
|
||||
|
||||
@contextmanager
|
||||
def connect_sse(client: Any, method: str, url: str, **kwargs: Any) -> Iterator:
|
||||
"""Connect to a server-sent event stream."""
|
||||
from httpx_sse import EventSource
|
||||
|
||||
with client.stream(method, url, **kwargs) as response:
|
||||
@ -52,6 +53,7 @@ def connect_sse(client: Any, method: str, url: str, **kwargs: Any) -> Iterator:
|
||||
async def aconnect_sse(
|
||||
client: Any, method: str, url: str, **kwargs: Any
|
||||
) -> AsyncIterator:
|
||||
"""Async connect to a server-sent event stream."""
|
||||
from httpx_sse import EventSource
|
||||
|
||||
async with client.stream(method, url, **kwargs) as response:
|
||||
|
@ -6,7 +6,7 @@ from langchain_community.docstore.base import Docstore
|
||||
|
||||
|
||||
class DocstoreFn(Docstore):
|
||||
"""Langchain Docstore via arbitrary lookup function.
|
||||
"""Docstore via arbitrary lookup function.
|
||||
|
||||
This is useful when:
|
||||
* it's expensive to construct an InMemoryDocstore/dict
|
||||
|
@ -9,7 +9,7 @@ from langchain_community.docstore.base import Docstore
|
||||
|
||||
|
||||
class Wikipedia(Docstore):
|
||||
"""Wrapper around wikipedia API."""
|
||||
"""Wikipedia API."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Check that wikipedia package is installed."""
|
||||
|
@ -9,6 +9,8 @@ from langchain_core.pydantic_v1 import Field
|
||||
|
||||
|
||||
class RerankRequest:
|
||||
"""Request for reranking."""
|
||||
|
||||
def __init__(self, query: Any = None, passages: Any = None):
|
||||
self.query = query
|
||||
self.passages = passages if passages is not None else []
|
||||
|
@ -29,8 +29,7 @@ class TranscriptFormat(Enum):
|
||||
|
||||
|
||||
class AssemblyAIAudioTranscriptLoader(BaseLoader):
|
||||
"""
|
||||
Loader for AssemblyAI audio transcripts.
|
||||
"""Load AssemblyAI audio transcripts.
|
||||
|
||||
It uses the AssemblyAI API to transcribe audio files
|
||||
and loads the transcribed text into one or more Documents,
|
||||
@ -110,7 +109,7 @@ class AssemblyAIAudioTranscriptLoader(BaseLoader):
|
||||
|
||||
class AssemblyAIAudioLoaderById(BaseLoader):
|
||||
"""
|
||||
Loader for AssemblyAI audio transcripts.
|
||||
Load AssemblyAI audio transcripts.
|
||||
|
||||
It uses the AssemblyAI API to get an existing transcription
|
||||
and loads the transcribed text into one or more Documents,
|
||||
|
@ -15,7 +15,7 @@ AV_PATTERN = re.compile(r"av[0-9]+")
|
||||
|
||||
class BiliBiliLoader(BaseLoader):
|
||||
"""
|
||||
Loader for fetching transcripts from BiliBili videos.
|
||||
Load fetching transcripts from BiliBili videos.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
|
@ -21,7 +21,9 @@ def _litm_reordering(documents: List[Document]) -> List[Document]:
|
||||
|
||||
|
||||
class LongContextReorder(BaseDocumentTransformer, BaseModel):
|
||||
"""Lost in the middle:
|
||||
"""Reorder long context.
|
||||
|
||||
Lost in the middle:
|
||||
Performance degrades when models must access relevant information
|
||||
in the middle of long contexts.
|
||||
See: https://arxiv.org/abs//2307.03172"""
|
||||
|
@ -9,7 +9,8 @@ from langchain_community.tools.nuclia.tool import NucliaUnderstandingAPI
|
||||
|
||||
|
||||
class NucliaTextTransformer(BaseDocumentTransformer):
|
||||
"""
|
||||
"""Nuclia Text Transformer.
|
||||
|
||||
The Nuclia Understanding API splits into paragraphs and sentences,
|
||||
identifies entities, provides a summary of the text and generates
|
||||
embeddings for all sentences.
|
||||
|
@ -183,7 +183,7 @@ class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings):
|
||||
|
||||
|
||||
class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding):
|
||||
"""The symmetric version of the Aleph Alpha's semantic embeddings.
|
||||
"""Symmetric version of the Aleph Alpha's semantic embeddings.
|
||||
|
||||
The main difference is that here, both the documents and
|
||||
queries are embedded with a SemanticRepresentation.Symmetric
|
||||
|
@ -12,7 +12,7 @@ def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
|
||||
|
||||
|
||||
class DatabricksEmbeddings(MlflowEmbeddings):
|
||||
"""Wrapper around embeddings LLMs in Databricks.
|
||||
"""Databricks embeddings.
|
||||
|
||||
To use, you should have the ``mlflow`` python package installed.
|
||||
For more information, see https://mlflow.org/docs/latest/llms/deployments.
|
||||
|
@ -13,7 +13,9 @@ logger = getLogger(__name__)
|
||||
|
||||
|
||||
class InfinityEmbeddingsLocal(BaseModel, Embeddings):
|
||||
"""Optimized Embedding models https://github.com/michaelfeil/infinity
|
||||
"""Optimized Infinity embedding models.
|
||||
|
||||
https://github.com/michaelfeil/infinity
|
||||
This class deploys a local Infinity instance to embed text.
|
||||
The class requires async usage.
|
||||
|
||||
|
@ -12,8 +12,7 @@ def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
|
||||
|
||||
|
||||
class JavelinAIGatewayEmbeddings(Embeddings, BaseModel):
|
||||
"""
|
||||
Wrapper around embeddings LLMs in the Javelin AI Gateway.
|
||||
"""Javelin AI Gateway embeddings.
|
||||
|
||||
To use, you should have the ``javelin_sdk`` python package installed.
|
||||
For more information, see https://docs.getjavelin.io
|
||||
|
@ -13,8 +13,7 @@ def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
|
||||
|
||||
|
||||
class MlflowAIGatewayEmbeddings(Embeddings, BaseModel):
|
||||
"""
|
||||
Wrapper around embeddings LLMs in the MLflow AI Gateway.
|
||||
"""MLflow AI Gateway embeddings.
|
||||
|
||||
To use, you should have the ``mlflow[gateway]`` python package installed.
|
||||
For more information, see https://mlflow.org/docs/latest/gateway/index.html.
|
||||
|
@ -42,6 +42,8 @@ def is_endpoint_live(url: str, headers: Optional[dict], payload: Any) -> bool:
|
||||
|
||||
|
||||
class NeMoEmbeddings(BaseModel, Embeddings):
|
||||
"""NeMo embedding models."""
|
||||
|
||||
batch_size: int = 16
|
||||
model: str = "NV-Embed-QA-003"
|
||||
api_endpoint_url: str = "http://localhost:8088/v1/embeddings"
|
||||
|
@ -7,7 +7,7 @@ KG_TRIPLE_DELIMITER = "<|>"
|
||||
|
||||
|
||||
class KnowledgeTriple(NamedTuple):
|
||||
"""A triple in the graph."""
|
||||
"""Knowledge triple in the graph."""
|
||||
|
||||
subject: str
|
||||
predicate: str
|
||||
|
@ -8,7 +8,7 @@ NAMESPACE_UUID = uuid.UUID(int=1984)
|
||||
|
||||
|
||||
class RecordManager(ABC):
|
||||
"""An abstract base class representing the interface for a record manager."""
|
||||
"""Abstract base class for a record manager."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -11,7 +11,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CloudflareWorkersAI(LLM):
|
||||
"""Langchain LLM class to help to access Cloudflare Workers AI service.
|
||||
"""Cloudflare Workers AI service.
|
||||
|
||||
To use, you must provide an API token and
|
||||
account ID to access Cloudflare Workers AI, and
|
||||
|
@ -161,7 +161,7 @@ class _DatabricksClusterDriverProxyClient(_DatabricksClientBase):
|
||||
|
||||
|
||||
def get_repl_context() -> Any:
|
||||
"""Gets the notebook REPL context if running inside a Databricks notebook.
|
||||
"""Get the notebook REPL context if running inside a Databricks notebook.
|
||||
Returns None otherwise.
|
||||
"""
|
||||
try:
|
||||
@ -175,7 +175,7 @@ def get_repl_context() -> Any:
|
||||
|
||||
|
||||
def get_default_host() -> str:
|
||||
"""Gets the default Databricks workspace hostname.
|
||||
"""Get the default Databricks workspace hostname.
|
||||
Raises an error if the hostname cannot be automatically determined.
|
||||
"""
|
||||
host = os.getenv("DATABRICKS_HOST")
|
||||
@ -195,7 +195,7 @@ def get_default_host() -> str:
|
||||
|
||||
|
||||
def get_default_api_token() -> str:
|
||||
"""Gets the default Databricks personal access token.
|
||||
"""Get the default Databricks personal access token.
|
||||
Raises an error if the token cannot be automatically determined.
|
||||
"""
|
||||
if api_token := os.getenv("DATABRICKS_TOKEN"):
|
||||
|
@ -18,7 +18,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdenAI(LLM):
|
||||
"""Wrapper around edenai models.
|
||||
"""EdenAI models.
|
||||
|
||||
To use, you should have
|
||||
the environment variable ``EDENAI_API_KEY`` set with your API token.
|
||||
|
@ -33,9 +33,7 @@ def _collect_user_input(
|
||||
|
||||
|
||||
class HumanInputLLM(LLM):
|
||||
"""
|
||||
It returns user input as the response.
|
||||
"""
|
||||
"""User input as the response."""
|
||||
|
||||
input_func: Callable = Field(default_factory=lambda: _collect_user_input)
|
||||
prompt_func: Callable[[str], None] = Field(default_factory=lambda: _display_prompt)
|
||||
|
@ -12,7 +12,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IpexLLM(LLM):
|
||||
"""Wrapper around the IpexLLM model
|
||||
"""IpexLLM model.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
@ -16,7 +16,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Konko(LLM):
|
||||
"""Wrapper around Konko AI models.
|
||||
"""Konko AI models.
|
||||
|
||||
To use, you'll need an API key. This can be passed in as init param
|
||||
``konko_api_key`` or set as environment variable ``KONKO_API_KEY``.
|
||||
|
@ -9,6 +9,14 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def default_guardrail_violation_handler(violation: dict) -> str:
|
||||
"""Default guardrail violation handler.
|
||||
|
||||
Args:
|
||||
violation (dict): The violation dictionary.
|
||||
|
||||
Returns:
|
||||
str: The canned response.
|
||||
"""
|
||||
if violation.get("canned_response"):
|
||||
return violation["canned_response"]
|
||||
guardrail_name = (
|
||||
@ -22,6 +30,8 @@ def default_guardrail_violation_handler(violation: dict) -> str:
|
||||
|
||||
|
||||
class LayerupSecurity(LLM):
|
||||
"""Layerup Security LLM service."""
|
||||
|
||||
llm: LLM
|
||||
layerup_api_key: str
|
||||
layerup_api_base_url: str = "https://api.uselayerup.com/v1"
|
||||
|
@ -34,6 +34,7 @@ def load_llm_from_config(config: dict, **kwargs: Any) -> BaseLLM:
|
||||
|
||||
|
||||
def load_llm(file: Union[str, Path], **kwargs: Any) -> BaseLLM:
|
||||
"""Load LLM from a file."""
|
||||
# Convert file to Path object.
|
||||
if isinstance(file, str):
|
||||
file_path = Path(file)
|
||||
|
@ -9,7 +9,7 @@ from langchain_core.pydantic_v1 import Field, PrivateAttr
|
||||
|
||||
|
||||
class Mlflow(LLM):
|
||||
"""Wrapper around completions LLMs in MLflow.
|
||||
"""MLflow LLM service.
|
||||
|
||||
To use, you should have the `mlflow[genai]` python package installed.
|
||||
For more information, see https://mlflow.org/docs/latest/llms/deployments.
|
||||
|
@ -21,8 +21,7 @@ class Params(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
|
||||
|
||||
|
||||
class MlflowAIGateway(LLM):
|
||||
"""
|
||||
Wrapper around completions LLMs in the MLflow AI Gateway.
|
||||
"""MLflow AI Gateway LLMs.
|
||||
|
||||
To use, you should have the ``mlflow[gateway]`` python package installed.
|
||||
For more information, see https://mlflow.org/docs/latest/gateway/index.html.
|
||||
|
@ -31,6 +31,8 @@ class _MoonshotClient(BaseModel):
|
||||
|
||||
|
||||
class MoonshotCommon(BaseModel):
|
||||
"""Common parameters for Moonshot LLMs."""
|
||||
|
||||
_client: _MoonshotClient
|
||||
base_url: str = MOONSHOT_SERVICE_URL_BASE
|
||||
moonshot_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")
|
||||
|
@ -11,7 +11,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OpaquePrompts(LLM):
|
||||
"""An LLM wrapper that uses OpaquePrompts to sanitize prompts.
|
||||
"""LLM that uses OpaquePrompts to sanitize prompts.
|
||||
|
||||
Wraps another LLM and sanitizes prompts before passing it to the LLM, then
|
||||
de-sanitizes the response.
|
||||
|
@ -124,7 +124,7 @@ class PromptLayerOpenAI(OpenAI):
|
||||
|
||||
|
||||
class PromptLayerOpenAIChat(OpenAIChat):
|
||||
"""Wrapper around OpenAI large language models.
|
||||
"""PromptLayer OpenAI large language models.
|
||||
|
||||
To use, you should have the ``openai`` and ``promptlayer`` python
|
||||
package installed, and the environment variable ``OPENAI_API_KEY``
|
||||
|
@ -15,8 +15,7 @@ OUTPUT_TYPE = TypeVar("OUTPUT_TYPE", bound=Union[str, List[List[float]], Iterato
|
||||
|
||||
|
||||
class LineIterator:
|
||||
"""
|
||||
A helper class for parsing the byte stream input.
|
||||
"""Parse the byte stream input.
|
||||
|
||||
The output of the model will be in the following format:
|
||||
|
||||
@ -74,7 +73,7 @@ class LineIterator:
|
||||
|
||||
|
||||
class ContentHandlerBase(Generic[INPUT_TYPE, OUTPUT_TYPE]):
|
||||
"""A handler class to transform input from LLM to a
|
||||
"""Handler class to transform input from LLM to a
|
||||
format that SageMaker endpoint expects.
|
||||
|
||||
Similarly, the class handles transforming output from the
|
||||
|
@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SparkLLM(LLM):
|
||||
"""Wrapper around iFlyTek's Spark large language model.
|
||||
"""iFlyTek Spark large language model.
|
||||
|
||||
To use, you should pass `app_id`, `api_key`, `api_secret`
|
||||
as a named parameter to the constructor OR set environment
|
||||
|
@ -10,7 +10,7 @@ from langchain_community.llms.utils import enforce_stop_tokens
|
||||
|
||||
|
||||
class TitanTakeoff(LLM):
|
||||
"""Wrapper around Titan Takeoff APIs."""
|
||||
"""Titan Takeoff API LLMs."""
|
||||
|
||||
base_url: str = "http://localhost:8000"
|
||||
"""Specifies the baseURL to use for the Titan Takeoff API.
|
||||
|
@ -40,12 +40,12 @@ stream_completion_with_retry = None
|
||||
|
||||
|
||||
def is_codey_model(model_name: str) -> bool:
|
||||
"""Returns True if the model name is a Codey model."""
|
||||
"""Return True if the model name is a Codey model."""
|
||||
return "code" in model_name
|
||||
|
||||
|
||||
def is_gemini_model(model_name: str) -> bool:
|
||||
"""Returns True if the model name is a Gemini model."""
|
||||
"""Return True if the model name is a Gemini model."""
|
||||
return model_name is not None and "gemini" in model_name
|
||||
|
||||
|
||||
@ -397,7 +397,7 @@ class VertexAI(_VertexAICommon, BaseLLM):
|
||||
alternative_import="langchain_google_vertexai.VertexAIModelGarden",
|
||||
)
|
||||
class VertexAIModelGarden(_VertexAIBase, BaseLLM):
|
||||
"""Large language models served from Vertex AI Model Garden."""
|
||||
"""Vertex AI Model Garden large language models."""
|
||||
|
||||
client: "PredictionServiceClient" = None #: :meta private:
|
||||
async_client: "PredictionServiceAsyncClient" = None #: :meta private:
|
||||
|
@ -10,7 +10,7 @@ from langchain_community.utilities.arcee import ArceeWrapper, DALMFilter
|
||||
|
||||
|
||||
class ArceeRetriever(BaseRetriever):
|
||||
"""Retriever for Arcee's Domain Adapted Language Models (DALMs).
|
||||
"""Arcee Domain Adapted Language Models (DALMs) retriever.
|
||||
|
||||
To use, set the ``ARCEE_API_KEY`` environment variable with your Arcee API key,
|
||||
or pass ``arcee_api_key`` as a named parameter.
|
||||
|
@ -11,7 +11,8 @@ from langchain_community.utilities import YouSearchAPIWrapper
|
||||
|
||||
|
||||
class YouRetriever(BaseRetriever, YouSearchAPIWrapper):
|
||||
"""`You` retriever that uses You.com's search API.
|
||||
"""You.com Search API retriever.
|
||||
|
||||
It wraps results() to get_relevant_documents
|
||||
It uses all YouSearchAPIWrapper arguments without any change.
|
||||
"""
|
||||
|
@ -8,8 +8,8 @@ from langchain_core.tools import BaseTool
|
||||
|
||||
|
||||
class CogniswitchKnowledgeRequest(BaseTool):
|
||||
"""
|
||||
A tool for interacting with the Cogniswitch service to answer questions.
|
||||
"""Tool that uses the Cogniswitch service to answer questions.
|
||||
|
||||
name: str = "cogniswitch_knowledge_request"
|
||||
description: str = (
|
||||
"A wrapper around cogniswitch service to answer the question
|
||||
@ -81,9 +81,9 @@ class CogniswitchKnowledgeRequest(BaseTool):
|
||||
|
||||
|
||||
class CogniswitchKnowledgeStatus(BaseTool):
|
||||
"""
|
||||
A cogniswitch tool for interacting with the Cogniswitch services to know the
|
||||
"""Tool that uses the Cogniswitch services to get the
|
||||
status of the document or url uploaded.
|
||||
|
||||
name: str = "cogniswitch_knowledge_status"
|
||||
description: str = (
|
||||
"A wrapper around cogniswitch services to know the status of
|
||||
@ -181,8 +181,8 @@ class CogniswitchKnowledgeStatus(BaseTool):
|
||||
|
||||
|
||||
class CogniswitchKnowledgeSourceFile(BaseTool):
|
||||
"""
|
||||
A cogniswitch tool for interacting with the Cogniswitch services to store data.
|
||||
"""Tool that uses the Cogniswitch services to store data from file.
|
||||
|
||||
name: str = "cogniswitch_knowledge_source_file"
|
||||
description: str = (
|
||||
"This calls the CogniSwitch services to analyze & store data from a file.
|
||||
@ -294,8 +294,8 @@ class CogniswitchKnowledgeSourceFile(BaseTool):
|
||||
|
||||
|
||||
class CogniswitchKnowledgeSourceURL(BaseTool):
|
||||
"""
|
||||
A cogniswitch tool for interacting with the Cogniswitch services to store data.
|
||||
"""Tool that uses the Cogniswitch services to store data from a URL.
|
||||
|
||||
name: str = "cogniswitch_knowledge_source_url"
|
||||
description: str = (
|
||||
"This calls the CogniSwitch services to analyze & store data from a url.
|
||||
|
@ -10,8 +10,8 @@ from langchain_community.tools.connery.tool import ConneryAction
|
||||
|
||||
|
||||
class ConneryService(BaseModel):
|
||||
"""
|
||||
A service for interacting with the Connery Runner API.
|
||||
"""Service for interacting with the Connery Runner API.
|
||||
|
||||
It gets the list of available actions from the Connery Runner,
|
||||
wraps them in ConneryAction Tools and returns them to the user.
|
||||
It also provides a method for running the actions.
|
||||
|
@ -13,9 +13,7 @@ from langchain_community.tools.connery.models import Action, Parameter
|
||||
|
||||
|
||||
class ConneryAction(BaseTool):
|
||||
"""
|
||||
A LangChain Tool wrapping a Connery Action.
|
||||
"""
|
||||
"""Connery Action tool."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
|
@ -54,7 +54,7 @@ class SearchEventsInput(BaseModel):
|
||||
|
||||
|
||||
class O365SearchEvents(O365BaseTool):
|
||||
"""Class for searching calendar events in Office 365
|
||||
"""Search calendar events in Office 365.
|
||||
|
||||
Free, but setup is required
|
||||
"""
|
||||
|
@ -53,9 +53,9 @@ class SearchEmailsInput(BaseModel):
|
||||
|
||||
|
||||
class O365SearchEmails(O365BaseTool):
|
||||
"""Class for searching email messages in Office 365
|
||||
"""Search email messages in Office 365.
|
||||
|
||||
Free, but setup is required
|
||||
Free, but setup is required.
|
||||
"""
|
||||
|
||||
name: str = "messages_search"
|
||||
|
@ -32,7 +32,7 @@ class SendMessageSchema(BaseModel):
|
||||
|
||||
|
||||
class O365SendMessage(O365BaseTool):
|
||||
"""Tool for sending an email in Office 365."""
|
||||
"""Send an email in Office 365."""
|
||||
|
||||
name: str = "send_email"
|
||||
description: str = (
|
||||
|
@ -36,7 +36,7 @@ def clean_body(body: str) -> str:
|
||||
|
||||
|
||||
def authenticate() -> Account:
|
||||
"""Authenticate using the Microsoft Grah API"""
|
||||
"""Authenticate using the Microsoft Graph API"""
|
||||
try:
|
||||
from O365 import Account
|
||||
except ImportError as e:
|
||||
|
@ -12,11 +12,13 @@ from langchain_community.utilities.you import YouSearchAPIWrapper
|
||||
|
||||
|
||||
class YouInput(BaseModel):
|
||||
"""Input schema for the you.com tool."""
|
||||
|
||||
query: str = Field(description="should be a search query")
|
||||
|
||||
|
||||
class YouSearchTool(BaseTool):
|
||||
"""Tool that searches the you.com API"""
|
||||
"""Tool that searches the you.com API."""
|
||||
|
||||
name = "you_search"
|
||||
description = (
|
||||
|
@ -82,8 +82,9 @@ from langchain_community.utilities.zapier import ZapierNLAWrapper
|
||||
|
||||
|
||||
class ZapierNLARunAction(BaseTool):
|
||||
"""
|
||||
Args:
|
||||
"""Tool to run a specific action from the user's exposed actions.
|
||||
|
||||
Params:
|
||||
action_id: a specific action ID (from list actions) of the action to execute
|
||||
(the set api_key must be associated with the action owner)
|
||||
instructions: a natural language instruction string for using the action
|
||||
@ -167,11 +168,7 @@ ZapierNLARunAction.__doc__ = (
|
||||
|
||||
|
||||
class ZapierNLAListActions(BaseTool):
|
||||
"""
|
||||
Args:
|
||||
None
|
||||
|
||||
"""
|
||||
"""Tool to list all exposed actions for the user."""
|
||||
|
||||
name: str = "ZapierNLA_list_actions"
|
||||
description: str = BASE_ZAPIER_TOOL_PROMPT + (
|
||||
|
@ -14,6 +14,8 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class SetupMode(Enum):
|
||||
"""Setup mode for AstraDBEnvironment as enumerator."""
|
||||
|
||||
SYNC = 1
|
||||
ASYNC = 2
|
||||
OFF = 3
|
||||
|
@ -62,7 +62,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Runtime(BaseModel):
|
||||
"""This class represents a Runtime.
|
||||
"""Pebblo Runtime.
|
||||
|
||||
Args:
|
||||
type (Optional[str]): Runtime type. Defaults to ""
|
||||
@ -90,7 +90,7 @@ class Runtime(BaseModel):
|
||||
|
||||
|
||||
class Framework(BaseModel):
|
||||
"""This class represents a Framework instance.
|
||||
"""Pebblo Framework instance.
|
||||
|
||||
Args:
|
||||
name (str): Name of the Framework.
|
||||
@ -102,7 +102,7 @@ class Framework(BaseModel):
|
||||
|
||||
|
||||
class App(BaseModel):
|
||||
"""This class represents an AI application.
|
||||
"""Pebblo AI application.
|
||||
|
||||
Args:
|
||||
name (str): Name of the app.
|
||||
@ -124,7 +124,7 @@ class App(BaseModel):
|
||||
|
||||
|
||||
class Doc(BaseModel):
|
||||
"""This class represents a pebblo document.
|
||||
"""Pebblo document.
|
||||
|
||||
Args:
|
||||
name (str): Name of app originating this document.
|
||||
@ -148,8 +148,8 @@ class Doc(BaseModel):
|
||||
|
||||
|
||||
def get_full_path(path: str) -> str:
|
||||
"""Return absolute local path for a local file/directory,
|
||||
for network related path, return as is.
|
||||
"""Return an absolute local path for a local file/directory,
|
||||
for a network related path, return as is.
|
||||
|
||||
Args:
|
||||
path (str): Relative path to be resolved.
|
||||
@ -184,7 +184,7 @@ def get_loader_type(loader: str) -> str:
|
||||
|
||||
|
||||
def get_loader_full_path(loader: BaseLoader) -> str:
|
||||
"""Return absolute source path of source of loader based on the
|
||||
"""Return an absolute source path of source of loader based on the
|
||||
keys present in Document object from loader.
|
||||
|
||||
Args:
|
||||
@ -266,7 +266,7 @@ def get_runtime() -> Tuple[Framework, Runtime]:
|
||||
|
||||
|
||||
def get_ip() -> str:
|
||||
"""Fetch local runtime ip address
|
||||
"""Fetch local runtime ip address.
|
||||
|
||||
Returns:
|
||||
str: IP address
|
||||
|
@ -22,7 +22,7 @@ def create_retry_decorator(
|
||||
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
|
||||
] = None,
|
||||
) -> Callable[[Any], Any]:
|
||||
"""Creates a retry decorator for Vertex / Palm LLMs."""
|
||||
"""Create a retry decorator for Vertex / Palm LLMs."""
|
||||
import google.api_core
|
||||
|
||||
errors = [
|
||||
@ -82,7 +82,7 @@ def init_vertexai(
|
||||
|
||||
|
||||
def get_client_info(module: Optional[str] = None) -> "ClientInfo":
|
||||
r"""Returns a custom user agent header.
|
||||
r"""Return a custom user agent header.
|
||||
|
||||
Args:
|
||||
module (Optional[str]):
|
||||
@ -109,7 +109,7 @@ def get_client_info(module: Optional[str] = None) -> "ClientInfo":
|
||||
|
||||
|
||||
def load_image_from_gcs(path: str, project: Optional[str] = None) -> "Image":
|
||||
"""Loads im Image from GCS."""
|
||||
"""Load an image from Google Cloud Storage."""
|
||||
try:
|
||||
from google.cloud import storage
|
||||
except ImportError:
|
||||
|
@ -29,7 +29,7 @@ class YouHit(YouHitMetadata):
|
||||
|
||||
|
||||
class YouAPIOutput(BaseModel):
|
||||
"""The output from you.com api"""
|
||||
"""Output from you.com API."""
|
||||
|
||||
hits: List[YouHit] = Field(
|
||||
description="A list of dictionaries containing the results"
|
||||
@ -37,7 +37,7 @@ class YouAPIOutput(BaseModel):
|
||||
|
||||
|
||||
class YouDocument(BaseModel):
|
||||
"""The output of parsing one snippet"""
|
||||
"""Output of parsing one snippet."""
|
||||
|
||||
page_content: str = Field(description="One snippet of text")
|
||||
metadata: YouHitMetadata
|
||||
|
@ -28,7 +28,7 @@ def convert_pydantic_to_ernie_function(
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
) -> FunctionDescription:
|
||||
"""Converts a Pydantic model to a function description for the Ernie API."""
|
||||
"""Convert a Pydantic model to a function description for the Ernie API."""
|
||||
schema = dereference_refs(model.schema())
|
||||
schema.pop("definitions", None)
|
||||
return {
|
||||
@ -44,7 +44,7 @@ def convert_pydantic_to_ernie_tool(
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
) -> ToolDescription:
|
||||
"""Converts a Pydantic model to a function description for the Ernie API."""
|
||||
"""Convert a Pydantic model to a function description for the Ernie API."""
|
||||
function = convert_pydantic_to_ernie_function(
|
||||
model, name=name, description=description
|
||||
)
|
||||
|
@ -5,7 +5,7 @@ from typing import Any, Optional
|
||||
|
||||
|
||||
def get_client_info(module: Optional[str] = None) -> Any:
|
||||
r"""Returns a custom user agent header.
|
||||
r"""Return a custom user agent header.
|
||||
|
||||
Args:
|
||||
module (Optional[str]):
|
||||
|
@ -139,7 +139,7 @@ def remove_lucene_chars(text: str) -> str:
|
||||
|
||||
def dict_to_yaml_str(input_dict: Dict, indent: int = 0) -> str:
|
||||
"""
|
||||
Converts a dictionary to a YAML-like string without using external libraries.
|
||||
Convert a dictionary to a YAML-like string without using external libraries.
|
||||
|
||||
Parameters:
|
||||
- input_dict (dict): The dictionary to convert.
|
||||
@ -165,6 +165,8 @@ def dict_to_yaml_str(input_dict: Dict, indent: int = 0) -> str:
|
||||
def combine_queries(
|
||||
input_queries: List[Tuple[str, Dict[str, Any]]], operator: str
|
||||
) -> Tuple[str, Dict[str, Any]]:
|
||||
"""Combine multiple queries with an operator."""
|
||||
|
||||
# Initialize variables to hold the combined query and parameters
|
||||
combined_query: str = ""
|
||||
combined_params: Dict = {}
|
||||
@ -197,8 +199,7 @@ def combine_queries(
|
||||
def collect_params(
|
||||
input_data: List[Tuple[str, Dict[str, str]]],
|
||||
) -> Tuple[List[str], Dict[str, Any]]:
|
||||
"""
|
||||
Transform the input data into the desired format.
|
||||
"""Transform the input data into the desired format.
|
||||
|
||||
Args:
|
||||
- input_data (list of tuples): Input data to transform.
|
||||
@ -324,6 +325,15 @@ def _handle_field_filter(
|
||||
|
||||
|
||||
def construct_metadata_filter(filter: Dict[str, Any]) -> Tuple[str, Dict]:
|
||||
"""Construct a metadata filter.
|
||||
|
||||
Args:
|
||||
filter: A dictionary representing the filter condition.
|
||||
|
||||
Returns:
|
||||
Tuple[str, Dict]
|
||||
"""
|
||||
|
||||
if isinstance(filter, dict):
|
||||
if len(filter) == 1:
|
||||
# The only operators allowed at the top level are $AND and $OR
|
||||
|
@ -100,7 +100,7 @@ def check_operator_misuse(func: Callable) -> Callable:
|
||||
|
||||
|
||||
class RedisTag(RedisFilterField):
|
||||
"""A RedisFilterField representing a tag in a Redis index."""
|
||||
"""RedisFilterField representing a tag in a Redis index."""
|
||||
|
||||
OPERATORS: Dict[RedisFilterOperator, str] = {
|
||||
RedisFilterOperator.EQ: "==",
|
||||
@ -192,7 +192,7 @@ class RedisTag(RedisFilterField):
|
||||
|
||||
|
||||
class RedisNum(RedisFilterField):
|
||||
"""A RedisFilterField representing a numeric field in a Redis index."""
|
||||
"""RedisFilterField representing a numeric field in a Redis index."""
|
||||
|
||||
OPERATORS: Dict[RedisFilterOperator, str] = {
|
||||
RedisFilterOperator.EQ: "==",
|
||||
@ -311,7 +311,7 @@ class RedisNum(RedisFilterField):
|
||||
|
||||
|
||||
class RedisText(RedisFilterField):
|
||||
"""A RedisFilterField representing a text field in a Redis index."""
|
||||
"""RedisFilterField representing a text field in a Redis index."""
|
||||
|
||||
OPERATORS: Dict[RedisFilterOperator, str] = {
|
||||
RedisFilterOperator.EQ: "==",
|
||||
@ -381,7 +381,7 @@ class RedisText(RedisFilterField):
|
||||
|
||||
|
||||
class RedisFilterExpression:
|
||||
"""A logical expression of RedisFilterFields.
|
||||
"""Logical expression of RedisFilterFields.
|
||||
|
||||
RedisFilterExpressions can be combined using the & and | operators to create
|
||||
complex logical expressions that evaluate to the Redis Query language.
|
||||
|
@ -285,7 +285,7 @@ class RedisModel(BaseModel):
|
||||
def read_schema(
|
||||
index_schema: Optional[Union[Dict[str, List[Any]], str, os.PathLike]],
|
||||
) -> Dict[str, Any]:
|
||||
"""Reads in the index schema from a dict or yaml file.
|
||||
"""Read in the index schema from a dict or yaml file.
|
||||
|
||||
Check if it is a dict and return RedisModel otherwise, check if it's a path and
|
||||
read in the file assuming it's a yaml file and return a RedisModel
|
||||
|
@ -42,7 +42,7 @@ class BaseSerializer(ABC):
|
||||
|
||||
|
||||
class JsonSerializer(BaseSerializer):
|
||||
"""Serializes data in json using the json package from python standard library."""
|
||||
"""Serialize data in JSON using the json package from python standard library."""
|
||||
|
||||
@classmethod
|
||||
def extension(cls) -> str:
|
||||
@ -58,7 +58,7 @@ class JsonSerializer(BaseSerializer):
|
||||
|
||||
|
||||
class BsonSerializer(BaseSerializer):
|
||||
"""Serializes data in binary json using the `bson` python package."""
|
||||
"""Serialize data in Binary JSON using the `bson` python package."""
|
||||
|
||||
def __init__(self, persist_path: str) -> None:
|
||||
super().__init__(persist_path)
|
||||
@ -78,7 +78,7 @@ class BsonSerializer(BaseSerializer):
|
||||
|
||||
|
||||
class ParquetSerializer(BaseSerializer):
|
||||
"""Serializes data in `Apache Parquet` format using the `pyarrow` package."""
|
||||
"""Serialize data in `Apache Parquet` format using the `pyarrow` package."""
|
||||
|
||||
def __init__(self, persist_path: str) -> None:
|
||||
super().__init__(persist_path)
|
||||
|
@ -24,7 +24,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SQLiteVSS(VectorStore):
|
||||
"""Wrapper around SQLite with vss extension as a vector database.
|
||||
"""SQLite with VSS extension as a vector database.
|
||||
|
||||
To use, you should have the ``sqlite-vss`` python package installed.
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
@ -10,6 +10,8 @@ DEFAULT_TiDB_VECTOR_TABLE_NAME = "langchain_vector"
|
||||
|
||||
|
||||
class TiDBVectorStore(VectorStore):
|
||||
"""TiDB Vector Store."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
connection_string: str,
|
||||
|
@ -12,7 +12,7 @@ from langchain_community.vectorstores.utils import maximal_marginal_relevance
|
||||
|
||||
|
||||
class Vald(VectorStore):
|
||||
"""Wrapper around Vald vector database.
|
||||
"""Vald vector database.
|
||||
|
||||
To use, you should have the ``vald-client-python`` python package installed.
|
||||
|
||||
|
@ -77,8 +77,7 @@ def _len_check_if_sized(x: Any, y: Any, x_name: str, y_name: str) -> None:
|
||||
|
||||
|
||||
def VDMS_Client(host: str = "localhost", port: int = 55555) -> vdms.vdms:
|
||||
"""
|
||||
Wrapper to initiate and connect a VDMS client to a VDMS server
|
||||
"""VDMS client for the VDMS server.
|
||||
|
||||
Args:
|
||||
host: IP or hostname of VDMS server
|
||||
@ -98,7 +97,7 @@ def VDMS_Client(host: str = "localhost", port: int = 55555) -> vdms.vdms:
|
||||
|
||||
|
||||
class VDMS(VectorStore):
|
||||
"""Wrapper around Intel Lab's VDMS for vector-store workloads.
|
||||
"""Intel Lab's VDMS for vector-store workloads.
|
||||
|
||||
To use, you should have both:
|
||||
- the ``vdms`` python package installed
|
||||
@ -1534,6 +1533,8 @@ def _check_descriptor_exists_by_id(
|
||||
|
||||
|
||||
def embedding2bytes(embedding: Union[List[float], None]) -> Union[bytes, None]:
|
||||
"""Convert embedding to bytes."""
|
||||
|
||||
blob = None
|
||||
if embedding is not None:
|
||||
emb = np.array(embedding, dtype="float32")
|
||||
|
@ -18,7 +18,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class SummaryConfig:
|
||||
"""
|
||||
"""Configuration for summary generation.
|
||||
|
||||
is_enabled: True if summary is enabled, False otherwise
|
||||
max_results: maximum number of results to summarize
|
||||
response_lang: requested language for the summary
|
||||
@ -34,7 +35,8 @@ class SummaryConfig:
|
||||
|
||||
@dataclass
|
||||
class MMRConfig:
|
||||
"""
|
||||
"""Configuration for Maximal Marginal Relevance (MMR) search.
|
||||
|
||||
is_enabled: True if MMR is enabled, False otherwise
|
||||
mmr_k: number of results to fetch for MMR, defaults to 50
|
||||
diversity_bias: number between 0 and 1 that determines the degree
|
||||
@ -53,7 +55,8 @@ class MMRConfig:
|
||||
|
||||
@dataclass
|
||||
class VectaraQueryConfig:
|
||||
"""
|
||||
"""Configuration for Vectara query.
|
||||
|
||||
k: Number of Documents to return. Defaults to 10.
|
||||
lambda_val: lexical match parameter for hybrid search.
|
||||
filter Dictionary of argument(s) to filter on metadata. For example a
|
||||
@ -566,7 +569,7 @@ class Vectara(VectorStore):
|
||||
|
||||
|
||||
class VectaraRetriever(VectorStoreRetriever):
|
||||
"""Retriever class for `Vectara`."""
|
||||
"""Retriever for `Vectara`."""
|
||||
|
||||
vectorstore: Vectara
|
||||
"""Vectara vectorstore."""
|
||||
|
@ -23,7 +23,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Yellowbrick(VectorStore):
|
||||
"""Wrapper around Yellowbrick as a vector database.
|
||||
"""Yellowbrick as a vector database.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
from langchain_community.vectorstores import Yellowbrick
|
||||
|
Loading…
Reference in New Issue
Block a user