experimental: docstrings update (#18048)

Added missed docstrings. Formatted docsctrings to the consistent format.
This commit is contained in:
Leonid Ganeline
2024-02-23 18:24:16 -08:00
committed by GitHub
parent 56b955fc31
commit 3f6bf852ea
61 changed files with 316 additions and 102 deletions

View File

@@ -1,3 +1,12 @@
"""Implementation of a Tree of Thought (ToT) chain based on the paper
"Large Language Model Guided Tree-of-Thought"
https://arxiv.org/pdf/2305.08291.pdf
The Tree of Thought (ToT) chain uses a tree structure to explore the space of
possible solutions to a problem.
"""
from langchain_experimental.tot.base import ToTChain
from langchain_experimental.tot.checker import ToTChecker

View File

@@ -1,14 +1,3 @@
"""
This a Tree of Thought (ToT) chain based on the paper "Large Language Model
Guided Tree-of-Thought"
https://arxiv.org/pdf/2305.08291.pdf
The Tree of Thought (ToT) chain uses a tree structure to explore the space of
possible solutions to a problem.
"""
from __future__ import annotations
from textwrap import indent
@@ -34,7 +23,7 @@ from langchain_experimental.tot.thought_generation import (
class ToTChain(Chain):
"""
A Chain implementing the Tree of Thought (ToT).
Chain implementing the Tree of Thought (ToT).
"""
llm: BaseLanguageModel

View File

@@ -7,7 +7,9 @@ from langchain_experimental.tot.thought import Thought
class ToTDFSMemory:
"""
Memory for the Tree of Thought (ToT) chain. Implemented as a stack of
Memory for the Tree of Thought (ToT) chain.
It is implemented as a stack of
thoughts. This allows for a depth first search (DFS) of the ToT.
"""

View File

@@ -9,6 +9,8 @@ from langchain_experimental.tot.thought import ThoughtValidity
def get_cot_prompt() -> PromptTemplate:
"""Get the prompt for the Chain of Thought (CoT) chain."""
return PromptTemplate(
template_format="jinja2",
input_variables=["problem_description", "thoughts"],
@@ -36,7 +38,7 @@ def get_cot_prompt() -> PromptTemplate:
class JSONListOutputParser(BaseOutputParser):
"""Class to parse the output of a PROPOSE_PROMPT response."""
"""Parse the output of a PROPOSE_PROMPT response."""
@property
def _type(self) -> str:
@@ -53,6 +55,8 @@ class JSONListOutputParser(BaseOutputParser):
def get_propose_prompt() -> PromptTemplate:
"""Get the prompt for the PROPOSE_PROMPT chain."""
return PromptTemplate(
template_format="jinja2",
input_variables=["problem_description", "thoughts", "n"],
@@ -95,6 +99,8 @@ def get_propose_prompt() -> PromptTemplate:
class CheckerOutputParser(BaseOutputParser):
"""Parse and check the output of the language model."""
def parse(self, text: str) -> ThoughtValidity:
"""Parse the output of the language model."""
text = text.upper()

View File

@@ -7,12 +7,16 @@ from langchain_experimental.pydantic_v1 import BaseModel, Field
class ThoughtValidity(Enum):
"""Enum for the validity of a thought."""
VALID_INTERMEDIATE = 0
VALID_FINAL = 1
INVALID = 2
class Thought(BaseModel):
"""A thought in the ToT."""
text: str
validity: ThoughtValidity
children: Set[Thought] = Field(default_factory=set)

View File

@@ -39,7 +39,7 @@ class BaseThoughtGenerationStrategy(LLMChain):
class SampleCoTStrategy(BaseThoughtGenerationStrategy):
"""
Sample thoughts from a Chain-of-Thought (CoT) prompt.
Sample strategy from a Chain-of-Thought (CoT) prompt.
This strategy works better when the thought space is rich, such as when each
thought is a paragraph. Independent and identically distributed samples
@@ -62,7 +62,7 @@ class SampleCoTStrategy(BaseThoughtGenerationStrategy):
class ProposePromptStrategy(BaseThoughtGenerationStrategy):
"""
Propose thoughts sequentially using a "propose prompt".
Strategy that is sequentially using a "propose prompt".
This strategy works better when the thought space is more constrained, such
as when each thought is just a word or a line. Proposing different thoughts