mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-23 15:19:33 +00:00
docstrings: chat_models
consistency (#9227)
Updated docstrings into the consistent format.
This commit is contained in:
parent
b0896210c7
commit
7810ea5812
@ -22,7 +22,7 @@ from langchain.schema.output import ChatGenerationChunk
|
||||
|
||||
|
||||
class ChatAnthropic(BaseChatModel, _AnthropicCommon):
|
||||
"""Anthropic's large language chat model.
|
||||
"""`Anthropic` chat large language models.
|
||||
|
||||
To use, you should have the ``anthropic`` python package installed, and the
|
||||
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
|
||||
|
@ -28,7 +28,7 @@ DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf"
|
||||
|
||||
|
||||
class ChatAnyscale(ChatOpenAI):
|
||||
"""Wrapper around Anyscale Chat large language models.
|
||||
"""`Anyscale` Chat large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``ANYSCALE_API_KEY`` set with your API key.
|
||||
|
@ -14,7 +14,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AzureChatOpenAI(ChatOpenAI):
|
||||
"""Wrapper around Azure OpenAI Chat Completion API.
|
||||
"""`Azure OpenAI` Chat Completion API.
|
||||
|
||||
To use this class you
|
||||
must have a deployed model on Azure OpenAI. Use `deployment_name` in the
|
||||
|
@ -17,7 +17,7 @@ from langchain.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
class LlamaContentFormatter(ContentFormatterBase):
|
||||
"""Content formatter for LLaMa"""
|
||||
"""Content formatter for `LLaMA`."""
|
||||
|
||||
SUPPORTED_ROLES = ["user", "assistant", "system"]
|
||||
|
||||
@ -66,7 +66,7 @@ class LlamaContentFormatter(ContentFormatterBase):
|
||||
|
||||
|
||||
class AzureMLChatOnlineEndpoint(SimpleChatModel):
|
||||
"""Azure ML Chat Online Endpoint models.
|
||||
"""`AzureML` Chat models API.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
@ -51,7 +51,7 @@ def _get_verbosity() -> bool:
|
||||
|
||||
|
||||
class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC):
|
||||
"""Base class for chat models."""
|
||||
"""Base class for Chat models."""
|
||||
|
||||
cache: Optional[bool] = None
|
||||
"""Whether to cache the response."""
|
||||
|
@ -38,7 +38,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChatGooglePalmError(Exception):
|
||||
"""Error raised when there is an issue with the Google PaLM API."""
|
||||
"""Error with the `Google PaLM` API."""
|
||||
|
||||
|
||||
def _truncate_at_stop_tokens(
|
||||
@ -214,7 +214,7 @@ async def achat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any:
|
||||
|
||||
|
||||
class ChatGooglePalm(BaseChatModel, BaseModel):
|
||||
"""Wrapper around Google's PaLM Chat API.
|
||||
"""`Google PaLM` Chat models API.
|
||||
|
||||
To use you must have the google.generativeai Python package installed and
|
||||
either:
|
||||
|
@ -140,8 +140,7 @@ def _convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
|
||||
|
||||
class JinaChat(BaseChatModel):
|
||||
"""Wrapper for Jina AI's LLM service, providing cost-effective
|
||||
image chat capabilities.
|
||||
"""`Jina AI` Chat models API.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``JINACHAT_API_KEY`` set to your API key, which you
|
||||
|
@ -46,7 +46,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChatLiteLLMException(Exception):
|
||||
"""Error raised when there is an issue with the LiteLLM I/O Library"""
|
||||
"""Error with the `LiteLLM I/O` library"""
|
||||
|
||||
|
||||
def _truncate_at_stop_tokens(
|
||||
@ -65,7 +65,7 @@ def _truncate_at_stop_tokens(
|
||||
|
||||
|
||||
class FunctionMessage(BaseMessage):
|
||||
"""A Message for passing the result of executing a function back to a model."""
|
||||
"""Message for passing the result of executing a function back to a model."""
|
||||
|
||||
name: str
|
||||
"""The name of the function that was executed."""
|
||||
@ -77,6 +77,8 @@ class FunctionMessage(BaseMessage):
|
||||
|
||||
|
||||
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
"""Message Chunk for passing the result of executing a function back to a model."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@ -187,7 +189,7 @@ def _convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
|
||||
|
||||
class ChatLiteLLM(BaseChatModel):
|
||||
"""Wrapper around the LiteLLM Model I/O library.
|
||||
"""`LiteLLM` Chat models API.
|
||||
|
||||
To use you must have the google.generativeai Python package installed and
|
||||
either:
|
||||
|
@ -29,7 +29,7 @@ logger = logging.getLogger(__name__)
|
||||
# Ignoring type because below is valid pydantic code
|
||||
# Unexpected keyword argument "extra" for "__init_subclass__" of "object" [call-arg]
|
||||
class ChatParams(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
|
||||
"""Parameters for the MLflow AI Gateway LLM."""
|
||||
"""Parameters for the `MLflow AI Gateway` LLM."""
|
||||
|
||||
temperature: float = 0.0
|
||||
candidate_count: int = 1
|
||||
@ -39,8 +39,7 @@ class ChatParams(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
|
||||
|
||||
|
||||
class ChatMLflowAIGateway(BaseChatModel):
|
||||
"""
|
||||
Wrapper around chat LLMs in the MLflow AI Gateway.
|
||||
"""`MLflow AI Gateway` chat models API.
|
||||
|
||||
To use, you should have the ``mlflow[gateway]`` python package installed.
|
||||
For more information, see https://mlflow.org/docs/latest/gateway/index.html.
|
||||
|
@ -118,7 +118,7 @@ def _convert_delta_to_message_chunk(
|
||||
|
||||
|
||||
class ChatOpenAI(BaseChatModel):
|
||||
"""Wrapper around OpenAI Chat large language models.
|
||||
"""`OpenAI` Chat large language models API.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``OPENAI_API_KEY`` set with your API key.
|
||||
|
@ -12,7 +12,7 @@ from langchain.schema.messages import BaseMessage
|
||||
|
||||
|
||||
class PromptLayerChatOpenAI(ChatOpenAI):
|
||||
"""Wrapper around OpenAI Chat large language models and PromptLayer.
|
||||
"""`PromptLayer` and `OpenAI` Chat large language models API.
|
||||
|
||||
To use, you should have the ``openai`` and ``promptlayer`` python
|
||||
package installed, and the environment variable ``OPENAI_API_KEY``
|
||||
|
@ -93,7 +93,7 @@ def _parse_examples(examples: List[BaseMessage]) -> List["InputOutputTextPair"]:
|
||||
|
||||
|
||||
class ChatVertexAI(_VertexAICommon, BaseChatModel):
|
||||
"""Wrapper around Vertex AI large language models."""
|
||||
"""`Vertex AI` Chat large language models API."""
|
||||
|
||||
model_name: str = "chat-bison"
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user