experimental: docstrings update (#18048)

Added missed docstrings. Formatted docsctrings to the consistent format.
This commit is contained in:
Leonid Ganeline
2024-02-23 18:24:16 -08:00
committed by GitHub
parent 56b955fc31
commit 3f6bf852ea
61 changed files with 316 additions and 102 deletions

View File

@@ -14,7 +14,10 @@ from langchain_experimental.pydantic_v1 import root_validator
class AmazonComprehendModerationChain(Chain):
"""A subclass of Chain, designed to apply moderation to LLMs."""
"""Moderation Chain, based on `Amazon Comprehend` service.
See more at https://aws.amazon.com/comprehend/
"""
output_key: str = "output" #: :meta private:
"""Key used to fetch/store the output in data containers. Defaults to `output`"""
@@ -54,7 +57,7 @@ class AmazonComprehendModerationChain(Chain):
@root_validator(pre=True)
def create_client(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""
Creates an Amazon Comprehend client
Creates an Amazon Comprehend client.
Args:
values (Dict[str, Any]): A dictionary containing configuration values.

View File

@@ -13,6 +13,8 @@ from langchain_experimental.comprehend_moderation.toxicity import ComprehendToxi
class BaseModeration:
"""Base class for moderation."""
def __init__(
self,
client: Any,
@@ -109,6 +111,8 @@ class BaseModeration:
self.run_manager.on_text(message)
def moderate(self, prompt: Any) -> str:
"""Moderate the input prompt."""
from langchain_experimental.comprehend_moderation.base_moderation_config import ( # noqa: E501
ModerationPiiConfig,
ModerationPromptSafetyConfig,

View File

@@ -2,6 +2,8 @@ from typing import Any, Callable, Dict
class BaseModerationCallbackHandler:
"""Base class for moderation callback handlers."""
def __init__(self) -> None:
if (
self._is_method_unchanged(

View File

@@ -4,6 +4,8 @@ from pydantic import BaseModel
class ModerationPiiConfig(BaseModel):
"""Configuration for PII moderation filter."""
threshold: float = 0.5
"""Threshold for PII confidence score, defaults to 0.5 i.e. 50%"""
@@ -21,6 +23,8 @@ class ModerationPiiConfig(BaseModel):
class ModerationToxicityConfig(BaseModel):
"""Configuration for Toxicity moderation filter."""
threshold: float = 0.5
"""Threshold for Toxic label confidence score, defaults to 0.5 i.e. 50%"""
@@ -29,6 +33,8 @@ class ModerationToxicityConfig(BaseModel):
class ModerationPromptSafetyConfig(BaseModel):
"""Configuration for Prompt Safety moderation filter."""
threshold: float = 0.5
"""
Threshold for Prompt Safety classification
@@ -37,6 +43,8 @@ class ModerationPromptSafetyConfig(BaseModel):
class BaseModerationConfig(BaseModel):
"""Base configuration settings for moderation."""
filters: List[
Union[
ModerationPiiConfig, ModerationToxicityConfig, ModerationPromptSafetyConfig

View File

@@ -27,7 +27,7 @@ class ModerationToxicityError(Exception):
class ModerationPromptSafetyError(Exception):
"""Exception raised if Intention entities are detected.
"""Exception raised if Unsafe prompts are detected.
Attributes:
message -- explanation of the error

View File

@@ -7,6 +7,8 @@ from langchain_experimental.comprehend_moderation.base_moderation_exceptions imp
class ComprehendPII:
"""Class to handle Personally Identifiable Information (PII) moderation."""
def __init__(
self,
client: Any,

View File

@@ -7,6 +7,8 @@ from langchain_experimental.comprehend_moderation.base_moderation_exceptions imp
class ComprehendPromptSafety:
"""Class to handle prompt safety moderation."""
def __init__(
self,
client: Any,

View File

@@ -8,6 +8,8 @@ from langchain_experimental.comprehend_moderation.base_moderation_exceptions imp
class ComprehendToxicity:
"""Class to handle toxicity moderation."""
def __init__(
self,
client: Any,