mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-21 22:29:51 +00:00
docstrings for LLMs
(#7976)
docstrings for the `llms/`: - added missed docstrings - update existing docstrings to consistent format (no `Wrappers`!) @baskaryan
This commit is contained in:
parent
5694e7b8cf
commit
0613ed5b95
@ -1,4 +1,4 @@
|
||||
"""Wrappers on top of large language models APIs."""
|
||||
"""Access to the large language model APIs and services."""
|
||||
from typing import Dict, Type
|
||||
|
||||
from langchain.llms.ai21 import AI21
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around AI21 APIs."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import requests
|
||||
@ -21,7 +20,7 @@ class AI21PenaltyData(BaseModel):
|
||||
|
||||
|
||||
class AI21(LLM):
|
||||
"""Wrapper around AI21 large language models.
|
||||
"""AI21 large language models.
|
||||
|
||||
To use, you should have the environment variable ``AI21_API_KEY``
|
||||
set with your API key.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Aleph Alpha APIs."""
|
||||
from typing import Any, Dict, List, Optional, Sequence
|
||||
|
||||
from pydantic import Extra, root_validator
|
||||
@ -10,7 +9,7 @@ from langchain.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
class AlephAlpha(LLM):
|
||||
"""Wrapper around Aleph Alpha large language models.
|
||||
"""Aleph Alpha large language models.
|
||||
|
||||
To use, you should have the ``aleph_alpha_client`` python package installed, and the
|
||||
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
|
||||
|
@ -9,8 +9,10 @@ from langchain.llms.utils import enforce_stop_tokens
|
||||
|
||||
|
||||
class ContentHandlerAmazonAPIGateway:
|
||||
"""Adapter class to prepare the inputs from Langchain to a format
|
||||
that LLM model expects. Also, provides helper function to extract
|
||||
"""Adapter to prepare the inputs from Langchain to a format
|
||||
that LLM model expects.
|
||||
|
||||
It also provides helper function to extract
|
||||
the generated text from the model response."""
|
||||
|
||||
@classmethod
|
||||
@ -25,7 +27,7 @@ class ContentHandlerAmazonAPIGateway:
|
||||
|
||||
|
||||
class AmazonAPIGateway(LLM):
|
||||
"""Wrapper around custom Amazon API Gateway"""
|
||||
"""Amazon API Gateway to access LLM models hosted on AWS."""
|
||||
|
||||
api_url: str
|
||||
"""API Gateway URL"""
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Anthropic APIs."""
|
||||
import re
|
||||
import warnings
|
||||
from typing import Any, Callable, Dict, Generator, List, Mapping, Optional
|
||||
@ -117,7 +116,7 @@ class _AnthropicCommon(BaseModel):
|
||||
|
||||
|
||||
class Anthropic(LLM, _AnthropicCommon):
|
||||
r"""Wrapper around Anthropic's large language models.
|
||||
"""Anthropic large language models.
|
||||
|
||||
To use, you should have the ``anthropic`` python package installed, and the
|
||||
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Anyscale"""
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
import requests
|
||||
@ -11,7 +10,8 @@ from langchain.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
class Anyscale(LLM):
|
||||
"""Wrapper around Anyscale Services.
|
||||
"""Anyscale Service models.
|
||||
|
||||
To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``,
|
||||
``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale
|
||||
Service, or pass it as a named parameter to the constructor.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Aviary"""
|
||||
import dataclasses
|
||||
import os
|
||||
from typing import Any, Dict, List, Mapping, Optional, Union, cast
|
||||
@ -77,14 +76,14 @@ def get_completions(
|
||||
|
||||
|
||||
class Aviary(LLM):
|
||||
"""Allow you to use an Aviary.
|
||||
"""Aviary hosted models.
|
||||
|
||||
Aviary is a backend for hosted models. You can
|
||||
find out more about aviary at
|
||||
http://github.com/ray-project/aviary
|
||||
|
||||
To get a list of the models supported on an
|
||||
aviary, follow the instructions on the web site to
|
||||
aviary, follow the instructions on the website to
|
||||
install the aviary CLI and then use:
|
||||
`aviary models`
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around AzureML Managed Online Endpoint API."""
|
||||
import json
|
||||
import urllib.request
|
||||
from abc import abstractmethod
|
||||
@ -12,7 +11,7 @@ from langchain.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
class AzureMLEndpointClient(object):
|
||||
"""Wrapper around AzureML Managed Online Endpoint Client."""
|
||||
"""AzureML Managed Endpoint client."""
|
||||
|
||||
def __init__(
|
||||
self, endpoint_url: str, endpoint_api_key: str, deployment_name: str
|
||||
@ -43,8 +42,8 @@ class AzureMLEndpointClient(object):
|
||||
|
||||
|
||||
class ContentFormatterBase:
|
||||
"""A handler class to transform request and response of
|
||||
AzureML endpoint to match with required schema.
|
||||
"""Transform request and response of AzureML endpoint to match with
|
||||
required schema.
|
||||
"""
|
||||
|
||||
"""
|
||||
@ -134,7 +133,7 @@ class DollyContentFormatter(ContentFormatterBase):
|
||||
|
||||
|
||||
class AzureMLOnlineEndpoint(LLM, BaseModel):
|
||||
"""Wrapper around Azure ML Hosted models using Managed Online Endpoints.
|
||||
"""Azure ML Online Endpoint models.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Banana API."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
@ -13,7 +12,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Banana(LLM):
|
||||
"""Wrapper around Banana large language models.
|
||||
"""Banana large language models.
|
||||
|
||||
To use, you should have the ``banana-dev`` python package installed,
|
||||
and the environment variable ``BANANA_API_KEY`` set with your API key.
|
||||
|
@ -116,7 +116,9 @@ def update_cache(
|
||||
|
||||
|
||||
class BaseLLM(BaseLanguageModel, ABC):
|
||||
"""LLM wrapper should take in a prompt and return a string."""
|
||||
"""Base LLM abstract interface.
|
||||
|
||||
It should take in a prompt and return a string."""
|
||||
|
||||
cache: Optional[bool] = None
|
||||
verbose: bool = Field(default_factory=_get_verbosity)
|
||||
@ -560,7 +562,7 @@ class BaseLLM(BaseLanguageModel, ABC):
|
||||
|
||||
|
||||
class LLM(BaseLLM):
|
||||
"""LLM class that expect subclasses to implement a simpler call method.
|
||||
"""Base LLM abstract class.
|
||||
|
||||
The purpose of this class is to expose a simpler interface for working
|
||||
with LLMs, rather than expect the user to implement the full _generate method.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Baseten deployed model API."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
@ -11,7 +10,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Baseten(LLM):
|
||||
"""Use your Baseten models in Langchain
|
||||
"""Baseten models.
|
||||
|
||||
To use, you should have the ``baseten`` python package installed,
|
||||
and run ``baseten.login()`` with your Baseten API key.
|
||||
@ -60,7 +59,7 @@ class Baseten(LLM):
|
||||
try:
|
||||
import baseten
|
||||
except ImportError as exc:
|
||||
raise ValueError(
|
||||
raise ImportError(
|
||||
"Could not import Baseten Python package. "
|
||||
"Please install it with `pip install baseten`."
|
||||
) from exc
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Beam API."""
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
@ -21,12 +20,12 @@ DEFAULT_SLEEP_TIME = 4
|
||||
|
||||
|
||||
class Beam(LLM):
|
||||
"""Wrapper around Beam API for gpt2 large language model.
|
||||
"""Beam API for gpt2 large language model.
|
||||
|
||||
To use, you should have the ``beam-sdk`` python package installed,
|
||||
and the environment variable ``BEAM_CLIENT_ID`` set with your client id
|
||||
and ``BEAM_CLIENT_SECRET`` set with your client secret. Information on how
|
||||
to get these is available here: https://docs.beam.cloud/account/api-keys.
|
||||
to get this is available here: https://docs.beam.cloud/account/api-keys.
|
||||
|
||||
The wrapper can then be called as follows, where the name, cpu, memory, gpu,
|
||||
python version, and python packages can be updated accordingly. Once deployed,
|
||||
|
@ -10,7 +10,9 @@ from langchain.llms.utils import enforce_stop_tokens
|
||||
|
||||
class LLMInputOutputAdapter:
|
||||
"""Adapter class to prepare the inputs from Langchain to a format
|
||||
that LLM model expects. Also, provides helper function to extract
|
||||
that LLM model expects.
|
||||
|
||||
It also provides helper function to extract
|
||||
the generated text from the model response."""
|
||||
|
||||
@classmethod
|
||||
@ -47,7 +49,7 @@ class LLMInputOutputAdapter:
|
||||
|
||||
|
||||
class Bedrock(LLM):
|
||||
"""LLM provider to invoke Bedrock models.
|
||||
"""Bedrock models.
|
||||
|
||||
To authenticate, the AWS client uses the following methods to
|
||||
automatically load credentials:
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around CerebriumAI API."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
@ -13,7 +12,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CerebriumAI(LLM):
|
||||
"""Wrapper around CerebriumAI large language models.
|
||||
"""CerebriumAI large language models.
|
||||
|
||||
To use, you should have the ``cerebrium`` python package installed, and the
|
||||
environment variable ``CEREBRIUMAI_API_KEY`` set with your API key.
|
||||
|
@ -8,7 +8,7 @@ from langchain.llms.utils import enforce_stop_tokens
|
||||
|
||||
|
||||
class ChatGLM(LLM):
|
||||
"""Wrapper around ChatGLM's LLM inference service.
|
||||
"""ChatGLM LLM service.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Clarifai's APIs."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
@ -13,7 +12,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Clarifai(LLM):
|
||||
"""Wrapper around Clarifai's large language models.
|
||||
"""Clarifai large language models.
|
||||
|
||||
To use, you should have an account on the Clarifai platform,
|
||||
the ``clarifai`` python package installed, and the
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Cohere APIs."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
@ -49,7 +48,7 @@ def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any:
|
||||
|
||||
|
||||
class Cohere(LLM):
|
||||
"""Wrapper around Cohere large language models.
|
||||
"""Cohere large language models.
|
||||
|
||||
To use, you should have the ``cohere`` python package installed, and the
|
||||
environment variable ``COHERE_API_KEY`` set with your API key, or pass
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around the C Transformers library."""
|
||||
from functools import partial
|
||||
from typing import Any, Dict, List, Optional, Sequence
|
||||
|
||||
@ -12,7 +11,7 @@ from langchain.llms.base import LLM
|
||||
|
||||
|
||||
class CTransformers(LLM):
|
||||
"""Wrapper around the C Transformers LLM interface.
|
||||
"""C Transformers LLM models.
|
||||
|
||||
To use, you should have the ``ctransformers`` python package installed.
|
||||
See https://github.com/marella/ctransformers
|
||||
|
@ -130,7 +130,8 @@ def get_default_api_token() -> str:
|
||||
|
||||
|
||||
class Databricks(LLM):
|
||||
"""LLM wrapper around a Databricks serving endpoint or a cluster driver proxy app.
|
||||
"""Databricks serving endpoint or a cluster driver proxy app for LLM.
|
||||
|
||||
It supports two endpoint types:
|
||||
|
||||
* **Serving endpoint** (recommended for both production and development).
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around DeepInfra APIs."""
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
import requests
|
||||
@ -13,7 +12,7 @@ DEFAULT_MODEL_ID = "google/flan-t5-xl"
|
||||
|
||||
|
||||
class DeepInfra(LLM):
|
||||
"""Wrapper around DeepInfra deployed models.
|
||||
"""DeepInfra models.
|
||||
|
||||
To use, you should have the ``requests`` python package installed, and the
|
||||
environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Fake LLM wrapper for testing purposes."""
|
||||
from typing import Any, List, Mapping, Optional
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
@ -9,7 +8,7 @@ from langchain.llms.base import LLM
|
||||
|
||||
|
||||
class FakeListLLM(LLM):
|
||||
"""Fake LLM wrapper for testing purposes."""
|
||||
"""Fake LLM for testing purposes."""
|
||||
|
||||
responses: List
|
||||
i: int = 0
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around ForefrontAI APIs."""
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
import requests
|
||||
@ -11,7 +10,7 @@ from langchain.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
class ForefrontAI(LLM):
|
||||
"""Wrapper around ForefrontAI large language models.
|
||||
"""ForefrontAI large language models.
|
||||
|
||||
To use, you should have the environment variable ``FOREFRONTAI_API_KEY``
|
||||
set with your API key.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Google's PaLM Text APIs."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
@ -77,6 +76,8 @@ def _strip_erroneous_leading_spaces(text: str) -> str:
|
||||
|
||||
|
||||
class GooglePalm(BaseLLM, BaseModel):
|
||||
"""Google PaLM models."""
|
||||
|
||||
client: Any #: :meta private:
|
||||
google_api_key: Optional[str]
|
||||
model_name: str = "models/text-bison-001"
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around GooseAI API."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
@ -12,7 +11,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GooseAI(LLM):
|
||||
"""Wrapper around OpenAI large language models.
|
||||
"""GooseAI large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``GOOSEAI_API_KEY`` set with your API key.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper for the GPT4All model."""
|
||||
from functools import partial
|
||||
from typing import Any, Dict, List, Mapping, Optional, Set
|
||||
|
||||
@ -10,7 +9,7 @@ from langchain.llms.utils import enforce_stop_tokens
|
||||
|
||||
|
||||
class GPT4All(LLM):
|
||||
r"""Wrapper around GPT4All language models.
|
||||
"""GPT4All language models.
|
||||
|
||||
To use, you should have the ``gpt4all`` python package installed, the
|
||||
pre-trained model file, and the model's config information.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around HuggingFace APIs."""
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
import requests
|
||||
@ -13,7 +12,7 @@ VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
|
||||
|
||||
|
||||
class HuggingFaceEndpoint(LLM):
|
||||
"""Wrapper around HuggingFaceHub Inference Endpoints.
|
||||
"""HuggingFace Endpoint models.
|
||||
|
||||
To use, you should have the ``huggingface_hub`` python package installed, and the
|
||||
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around HuggingFace APIs."""
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
from pydantic import Extra, root_validator
|
||||
@ -13,7 +12,7 @@ VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
|
||||
|
||||
|
||||
class HuggingFaceHub(LLM):
|
||||
"""Wrapper around HuggingFaceHub models.
|
||||
"""HuggingFaceHub models.
|
||||
|
||||
To use, you should have the ``huggingface_hub`` python package installed, and the
|
||||
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around HuggingFace Pipeline APIs."""
|
||||
import importlib.util
|
||||
import logging
|
||||
from typing import Any, List, Mapping, Optional
|
||||
@ -17,7 +16,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HuggingFacePipeline(LLM):
|
||||
"""Wrapper around HuggingFace Pipeline API.
|
||||
"""HuggingFace Pipeline API.
|
||||
|
||||
To use, you should have the ``transformers`` python package installed.
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Huggingface text generation inference API."""
|
||||
from functools import partial
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
@ -13,10 +12,9 @@ from langchain.llms.base import LLM
|
||||
|
||||
class HuggingFaceTextGenInference(LLM):
|
||||
"""
|
||||
HuggingFace text generation inference API.
|
||||
HuggingFace text generation API.
|
||||
|
||||
This class is a wrapper around the HuggingFace text generation inference API.
|
||||
It is used to generate text from a given prompt.
|
||||
It generates text from a given prompt.
|
||||
|
||||
Attributes:
|
||||
- max_new_tokens: The maximum number of tokens to generate.
|
||||
|
@ -34,7 +34,7 @@ def _collect_user_input(
|
||||
|
||||
class HumanInputLLM(LLM):
|
||||
"""
|
||||
A LLM wrapper which returns user input as the response.
|
||||
It returns user input as the response.
|
||||
"""
|
||||
|
||||
input_func: Callable = Field(default_factory=lambda: _collect_user_input)
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around KoboldAI API."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
@ -21,8 +20,7 @@ def clean_url(url: str) -> str:
|
||||
|
||||
|
||||
class KoboldApiLLM(LLM):
|
||||
"""
|
||||
A class that acts as a wrapper for the Kobold API language model.
|
||||
"""Kobold API language model.
|
||||
|
||||
It includes several fields that can be used to control the text generation process.
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around llama.cpp."""
|
||||
import logging
|
||||
from typing import Any, Dict, Generator, List, Optional
|
||||
|
||||
@ -11,7 +10,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LlamaCpp(LLM):
|
||||
"""Wrapper around the llama.cpp model.
|
||||
"""llama.cpp model.
|
||||
|
||||
To use, you should have the llama-cpp-python library installed, and provide the
|
||||
path to the Llama model as a named parameter to the constructor.
|
||||
@ -136,7 +135,7 @@ class LlamaCpp(LLM):
|
||||
|
||||
values["client"] = Llama(model_path, **model_params)
|
||||
except ImportError:
|
||||
raise ModuleNotFoundError(
|
||||
raise ImportError(
|
||||
"Could not import llama-cpp-python library. "
|
||||
"Please install the llama-cpp-python library to "
|
||||
"use this embedding model: pip install llama-cpp-python"
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""Base interface for loading large language models apis."""
|
||||
"""Base interface for loading large language model APIs."""
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around HazyResearch's Manifest library."""
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
from pydantic import Extra, root_validator
|
||||
@ -8,7 +7,7 @@ from langchain.llms.base import LLM
|
||||
|
||||
|
||||
class ManifestWrapper(LLM):
|
||||
"""Wrapper around HazyResearch's Manifest library."""
|
||||
"""HazyResearch's Manifest library."""
|
||||
|
||||
client: Any #: :meta private:
|
||||
llm_kwargs: Optional[Dict] = None
|
||||
@ -27,7 +26,7 @@ class ManifestWrapper(LLM):
|
||||
if not isinstance(values["client"], Manifest):
|
||||
raise ValueError
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
raise ImportError(
|
||||
"Could not import manifest python package. "
|
||||
"Please install it with `pip install manifest-ml`."
|
||||
)
|
||||
|
@ -9,13 +9,18 @@ from langchain.llms.base import LLM
|
||||
|
||||
|
||||
class Params(BaseModel, extra=Extra.allow):
|
||||
"""Parameters for the MLflow AI Gateway LLM."""
|
||||
|
||||
temperature: float = 0.0
|
||||
candidate_count: int = 1
|
||||
"""The number of candidates to return."""
|
||||
stop: Optional[List[str]] = None
|
||||
max_tokens: Optional[int] = None
|
||||
|
||||
|
||||
class MlflowAIGateway(LLM):
|
||||
"""The MLflow AI Gateway models."""
|
||||
|
||||
route: str
|
||||
gateway_uri: Optional[str] = None
|
||||
params: Optional[Params] = None
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Modal API."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
@ -13,7 +12,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Modal(LLM):
|
||||
"""Wrapper around Modal large language models.
|
||||
"""Modal large language models.
|
||||
|
||||
To use, you should have the ``modal-client`` python package installed.
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around MosaicML APIs."""
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
import requests
|
||||
@ -28,7 +27,7 @@ PROMPT_FOR_GENERATION_FORMAT = """{intro}
|
||||
|
||||
|
||||
class MosaicML(LLM):
|
||||
"""Wrapper around MosaicML's LLM inference service.
|
||||
"""MosaicML LLM service.
|
||||
|
||||
To use, you should have the
|
||||
environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around NLPCloud APIs."""
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
from pydantic import Extra, root_validator
|
||||
@ -9,7 +8,7 @@ from langchain.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
class NLPCloud(LLM):
|
||||
"""Wrapper around NLPCloud large language models.
|
||||
"""NLPCloud large language models.
|
||||
|
||||
To use, you should have the ``nlpcloud`` python package installed, and the
|
||||
environment variable ``NLPCLOUD_API_KEY`` set with your API key.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around OctoAI APIs."""
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
from pydantic import Extra, root_validator
|
||||
@ -10,7 +9,7 @@ from langchain.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
class OctoAIEndpoint(LLM):
|
||||
"""Wrapper around OctoAI Inference Endpoints.
|
||||
"""OctoAI LLM Endpoints.
|
||||
|
||||
OctoAIEndpoint is a class to interact with OctoAI
|
||||
Compute Service large language model endpoints.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around OpenAI APIs."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
@ -105,7 +104,7 @@ async def acompletion_with_retry(
|
||||
|
||||
|
||||
class BaseOpenAI(BaseLLM):
|
||||
"""Wrapper around OpenAI large language models."""
|
||||
"""Base OpenAI large language model class."""
|
||||
|
||||
@property
|
||||
def lc_secrets(self) -> Dict[str, str]:
|
||||
@ -579,7 +578,7 @@ class BaseOpenAI(BaseLLM):
|
||||
|
||||
|
||||
class OpenAI(BaseOpenAI):
|
||||
"""Wrapper around OpenAI large language models.
|
||||
"""OpenAI large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``OPENAI_API_KEY`` set with your API key.
|
||||
@ -600,7 +599,7 @@ class OpenAI(BaseOpenAI):
|
||||
|
||||
|
||||
class AzureOpenAI(BaseOpenAI):
|
||||
"""Wrapper around Azure-specific OpenAI large language models.
|
||||
"""Azure-specific OpenAI large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``OPENAI_API_KEY`` set with your API key.
|
||||
@ -655,7 +654,7 @@ class AzureOpenAI(BaseOpenAI):
|
||||
|
||||
|
||||
class OpenAIChat(BaseLLM):
|
||||
"""Wrapper around OpenAI Chat large language models.
|
||||
"""OpenAI Chat large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``OPENAI_API_KEY`` set with your API key.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around OpenLLM APIs."""
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
@ -46,7 +45,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OpenLLM(LLM):
|
||||
"""Wrapper for accessing OpenLLM, supporting both in-process model
|
||||
"""OpenLLM, supporting both in-process model
|
||||
instance and remote OpenLLM servers.
|
||||
|
||||
To use, you should have the openllm library installed:
|
||||
|
@ -6,6 +6,8 @@ from langchain.llms.openai import BaseOpenAI
|
||||
|
||||
|
||||
class OpenLM(BaseOpenAI):
|
||||
"""OpenLM models."""
|
||||
|
||||
@property
|
||||
def _invocation_params(self) -> Dict[str, Any]:
|
||||
return {**{"model": self.model_name}, **super()._invocation_params}
|
||||
@ -17,7 +19,7 @@ class OpenLM(BaseOpenAI):
|
||||
|
||||
values["client"] = openlm.Completion
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
raise ImportError(
|
||||
"Could not import openlm python package. "
|
||||
"Please install it with `pip install openlm`."
|
||||
)
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Petals API."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
@ -13,7 +12,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Petals(LLM):
|
||||
"""Wrapper around Petals Bloom models.
|
||||
"""Petals Bloom models.
|
||||
|
||||
To use, you should have the ``petals`` python package installed, and the
|
||||
environment variable ``HUGGINGFACE_API_KEY`` set with your API key.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Pipeline Cloud API."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
@ -13,7 +12,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PipelineAI(LLM, BaseModel):
|
||||
"""Wrapper around PipelineAI large language models.
|
||||
"""PipelineAI large language models.
|
||||
|
||||
To use, you should have the ``pipeline-ai`` python package installed,
|
||||
and the environment variable ``PIPELINE_API_KEY`` set with your API key.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Prediction Guard APIs."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
@ -13,7 +12,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PredictionGuard(LLM):
|
||||
"""Wrapper around Prediction Guard large language models.
|
||||
"""Prediction Guard large language models.
|
||||
|
||||
To use, you should have the ``predictionguard`` python package installed, and the
|
||||
environment variable ``PREDICTIONGUARD_TOKEN`` set with your access token, or pass
|
||||
it as a named parameter to the constructor. To use Prediction Guard's API along
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""PromptLayer wrapper."""
|
||||
import datetime
|
||||
from typing import Any, List, Optional
|
||||
|
||||
@ -11,7 +10,7 @@ from langchain.schema import LLMResult
|
||||
|
||||
|
||||
class PromptLayerOpenAI(OpenAI):
|
||||
"""Wrapper around OpenAI large language models.
|
||||
"""PromptLayer OpenAI large language models.
|
||||
|
||||
To use, you should have the ``openai`` and ``promptlayer`` python
|
||||
package installed, and the environment variable ``OPENAI_API_KEY``
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Replicate API."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
@ -12,7 +11,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Replicate(LLM):
|
||||
"""Wrapper around Replicate models.
|
||||
"""Replicate models.
|
||||
|
||||
To use, you should have the ``replicate`` python package installed,
|
||||
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""Wrapper for the RWKV model.
|
||||
"""RWKV models.
|
||||
|
||||
Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py
|
||||
https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py
|
||||
@ -13,7 +13,7 @@ from langchain.llms.utils import enforce_stop_tokens
|
||||
|
||||
|
||||
class RWKV(LLM, BaseModel):
|
||||
r"""Wrapper around RWKV language models.
|
||||
"""RWKV language models.
|
||||
|
||||
To use, you should have the ``rwkv`` python package installed, the
|
||||
pre-trained model file, and the model's config information.
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""Wrapper around Sagemaker InvokeEndpoint API."""
|
||||
"""Sagemaker InvokeEndpoint API."""
|
||||
from abc import abstractmethod
|
||||
from typing import Any, Dict, Generic, List, Mapping, Optional, TypeVar, Union
|
||||
|
||||
@ -14,8 +14,9 @@ OUTPUT_TYPE = TypeVar("OUTPUT_TYPE", bound=Union[str, List[List[float]]])
|
||||
|
||||
class ContentHandlerBase(Generic[INPUT_TYPE, OUTPUT_TYPE]):
|
||||
"""A handler class to transform input from LLM to a
|
||||
format that SageMaker endpoint expects. Similarly,
|
||||
the class also handles transforming output from the
|
||||
format that SageMaker endpoint expects.
|
||||
|
||||
Similarly, the class handles transforming output from the
|
||||
SageMaker endpoint to a format that LLM class expects.
|
||||
"""
|
||||
|
||||
@ -62,7 +63,7 @@ class LLMContentHandler(ContentHandlerBase[str, str]):
|
||||
|
||||
|
||||
class SagemakerEndpoint(LLM):
|
||||
"""Wrapper around custom Sagemaker Inference Endpoints.
|
||||
"""Sagemaker Inference Endpoint models.
|
||||
|
||||
To use, you must supply the endpoint name from your deployed
|
||||
Sagemaker model & the region where it is deployed.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Run model inference on self-hosted remote hardware."""
|
||||
import importlib.util
|
||||
import logging
|
||||
import pickle
|
||||
@ -63,7 +62,7 @@ def _send_pipeline_to_device(pipeline: Any, device: int) -> Any:
|
||||
|
||||
|
||||
class SelfHostedPipeline(LLM):
|
||||
"""Run model inference on self-hosted remote hardware.
|
||||
"""Model inference on self-hosted remote hardware.
|
||||
|
||||
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
|
||||
and Lambda, as well as servers specified
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware."""
|
||||
import importlib.util
|
||||
import logging
|
||||
from typing import Any, Callable, List, Mapping, Optional
|
||||
@ -112,7 +111,7 @@ def _load_transformer(
|
||||
|
||||
|
||||
class SelfHostedHuggingFaceLLM(SelfHostedPipeline):
|
||||
"""Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware.
|
||||
"""HuggingFace Pipeline API to run on self-hosted remote hardware.
|
||||
|
||||
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
|
||||
and Lambda, as well as servers specified
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around StochasticAI APIs."""
|
||||
import logging
|
||||
import time
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
@ -15,7 +14,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StochasticAI(LLM):
|
||||
"""Wrapper around StochasticAI large language models.
|
||||
"""StochasticAI large language models.
|
||||
|
||||
To use, you should have the environment variable ``STOCHASTICAI_API_KEY``
|
||||
set with your API key.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around text-generation-webui."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
@ -12,7 +11,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TextGen(LLM):
|
||||
"""Wrapper around the text-generation-webui model.
|
||||
"""text-generation-webui models.
|
||||
|
||||
To use, you should have the text-generation-webui installed, a model loaded,
|
||||
and --api added as a command-line option.
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Cohere APIs."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
@ -89,7 +88,7 @@ def stream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
|
||||
|
||||
|
||||
class Tongyi(LLM):
|
||||
"""Wrapper around Tongyi Qwen large language models.
|
||||
"""Tongyi Qwen large language models.
|
||||
|
||||
To use, you should have the ``dashscope`` python package installed, and the
|
||||
environment variable ``DASHSCOPE_API_KEY`` set with your API key, or pass
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""Common utility functions for working with LLM APIs."""
|
||||
"""Common utility functions for LLM APIs."""
|
||||
import re
|
||||
from typing import List
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Google VertexAI models."""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
@ -142,7 +141,7 @@ class _VertexAICommon(BaseModel):
|
||||
|
||||
|
||||
class VertexAI(_VertexAICommon, LLM):
|
||||
"""Wrapper around Google Vertex AI large language models."""
|
||||
"""Google Vertex AI large language models."""
|
||||
|
||||
model_name: str = "text-bison"
|
||||
"The name of the Vertex AI large language model."
|
||||
|
@ -1,4 +1,3 @@
|
||||
"""Wrapper around Writer APIs."""
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
|
||||
import requests
|
||||
@ -11,7 +10,7 @@ from langchain.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
class Writer(LLM):
|
||||
"""Wrapper around Writer large language models.
|
||||
"""Writer large language models.
|
||||
|
||||
To use, you should have the environment variable ``WRITER_API_KEY`` and
|
||||
``WRITER_ORG_ID`` set with your API key and organization ID respectively.
|
||||
|
Loading…
Reference in New Issue
Block a user