mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-14 08:56:27 +00:00
add skeleton of thought (#13883)
This commit is contained in:
parent
0efa59cbb8
commit
968ba6961f
1
templates/skeleton-of-thought/.gitignore
vendored
Normal file
1
templates/skeleton-of-thought/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
__pycache__
|
21
templates/skeleton-of-thought/LICENSE
Normal file
21
templates/skeleton-of-thought/LICENSE
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 LangChain, Inc.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
70
templates/skeleton-of-thought/README.md
Normal file
70
templates/skeleton-of-thought/README.md
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
# skeleton-of-thought
|
||||||
|
|
||||||
|
Implements "Skeleton of Thought" from [this](https://sites.google.com/view/sot-llm) paper.
|
||||||
|
|
||||||
|
This technique makes it possible to generate longer generates more quickly by first generating a skeleton, then generating each point of the outline.
|
||||||
|
|
||||||
|
## Environment Setup
|
||||||
|
|
||||||
|
Set the `OPENAI_API_KEY` environment variable to access the OpenAI models.
|
||||||
|
|
||||||
|
To get your `OPENAI_API_KEY`, navigate to [API keys](https://platform.openai.com/account/api-keys) on your OpenAI account and create a new secret key.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To use this package, you should first have the LangChain CLI installed:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pip install -U langchain-cli
|
||||||
|
```
|
||||||
|
|
||||||
|
To create a new LangChain project and install this as the only package, you can do:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
langchain app new my-app --package skeleton-of-thought
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to add this to an existing project, you can just run:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
langchain app add skeleton-of-thought
|
||||||
|
```
|
||||||
|
|
||||||
|
And add the following code to your `server.py` file:
|
||||||
|
```python
|
||||||
|
from skeleton_of_thought import chain as skeleton_of_thought_chain
|
||||||
|
|
||||||
|
add_routes(app, skeleton_of_thought_chain, path="/skeleton-of-thought")
|
||||||
|
```
|
||||||
|
|
||||||
|
(Optional) Let's now configure LangSmith.
|
||||||
|
LangSmith will help us trace, monitor and debug LangChain applications.
|
||||||
|
LangSmith is currently in private beta, you can sign up [here](https://smith.langchain.com/).
|
||||||
|
If you don't have access, you can skip this section
|
||||||
|
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export LANGCHAIN_TRACING_V2=true
|
||||||
|
export LANGCHAIN_API_KEY=<your-api-key>
|
||||||
|
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you are inside this directory, then you can spin up a LangServe instance directly by:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
langchain serve
|
||||||
|
```
|
||||||
|
|
||||||
|
This will start the FastAPI app with a server is running locally at
|
||||||
|
[http://localhost:8000](http://localhost:8000)
|
||||||
|
|
||||||
|
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
|
||||||
|
We can access the playground at [http://127.0.0.1:8000/skeleton-of-thought/playground](http://127.0.0.1:8000/skeleton-of-thought/playground)
|
||||||
|
|
||||||
|
We can access the template from code with:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langserve.client import RemoteRunnable
|
||||||
|
|
||||||
|
runnable = RemoteRunnable("http://localhost:8000/skeleton-of-thought")
|
||||||
|
```
|
24
templates/skeleton-of-thought/pyproject.toml
Normal file
24
templates/skeleton-of-thought/pyproject.toml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
[tool.poetry]
|
||||||
|
name = "skeleton-of-thought"
|
||||||
|
version = "0.0.1"
|
||||||
|
description = ""
|
||||||
|
authors = []
|
||||||
|
readme = "README.md"
|
||||||
|
|
||||||
|
[tool.poetry.dependencies]
|
||||||
|
python = ">=3.8.1,<4.0"
|
||||||
|
langchain = ">=0.0.313, <0.1"
|
||||||
|
openai = "^0.28.1"
|
||||||
|
|
||||||
|
[tool.poetry.group.dev.dependencies]
|
||||||
|
langchain-cli = ">=0.0.4"
|
||||||
|
fastapi = "^0.104.0"
|
||||||
|
sse-starlette = "^1.6.5"
|
||||||
|
|
||||||
|
[tool.langserve]
|
||||||
|
export_module = "skeleton_of_thought"
|
||||||
|
export_attr = "chain"
|
||||||
|
|
||||||
|
[build-system]
|
||||||
|
requires = ["poetry-core"]
|
||||||
|
build-backend = "poetry.core.masonry.api"
|
@ -0,0 +1,3 @@
|
|||||||
|
from skeleton_of_thought.chain import chain
|
||||||
|
|
||||||
|
__all__ = ["chain"]
|
96
templates/skeleton-of-thought/skeleton_of_thought/chain.py
Normal file
96
templates/skeleton-of-thought/skeleton_of_thought/chain.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
from langchain.chat_models import ChatOpenAI
|
||||||
|
from langchain.prompts import ChatPromptTemplate
|
||||||
|
from langchain.pydantic_v1 import BaseModel
|
||||||
|
from langchain.schema.output_parser import StrOutputParser
|
||||||
|
from langchain.schema.runnable import RunnablePassthrough
|
||||||
|
|
||||||
|
skeleton_generator_template = """[User:] You’re an organizer responsible for only \
|
||||||
|
giving the skeleton (not the full content) for answering the question.
|
||||||
|
Provide the skeleton in a list of points (numbered 1., 2., 3., etc.) to answer \
|
||||||
|
the question. \
|
||||||
|
Instead of writing a full sentence, each skeleton point should be very short \
|
||||||
|
with only 3∼5 words. \
|
||||||
|
Generally, the skeleton should have 3∼10 points. Now, please provide the skeleton \
|
||||||
|
for the following question.
|
||||||
|
{question}
|
||||||
|
Skeleton:
|
||||||
|
[Assistant:] 1."""
|
||||||
|
|
||||||
|
skeleton_generator_prompt = ChatPromptTemplate.from_template(
|
||||||
|
skeleton_generator_template
|
||||||
|
)
|
||||||
|
|
||||||
|
skeleton_generator_chain = (
|
||||||
|
skeleton_generator_prompt | ChatOpenAI() | StrOutputParser() | (lambda x: "1. " + x)
|
||||||
|
)
|
||||||
|
|
||||||
|
point_expander_template = """[User:] You’re responsible for continuing \
|
||||||
|
the writing of one and only one point in the overall answer to the following question.
|
||||||
|
{question}
|
||||||
|
The skeleton of the answer is
|
||||||
|
{skeleton}
|
||||||
|
Continue and only continue the writing of point {point_index}. \
|
||||||
|
Write it **very shortly** in 1∼2 sentence and do not continue with other points!
|
||||||
|
[Assistant:] {point_index}. {point_skeleton}"""
|
||||||
|
|
||||||
|
point_expander_prompt = ChatPromptTemplate.from_template(point_expander_template)
|
||||||
|
|
||||||
|
point_expander_chain = RunnablePassthrough.assign(
|
||||||
|
continuation=point_expander_prompt | ChatOpenAI() | StrOutputParser()
|
||||||
|
) | (lambda x: x["point_skeleton"].strip() + " " + x["continuation"])
|
||||||
|
|
||||||
|
|
||||||
|
def parse_numbered_list(input_str):
|
||||||
|
"""Parses a numbered list into a list of dictionaries
|
||||||
|
|
||||||
|
Each element having two keys:
|
||||||
|
'index' for the index in the numbered list, and 'point' for the content.
|
||||||
|
"""
|
||||||
|
# Split the input string into lines
|
||||||
|
lines = input_str.split("\n")
|
||||||
|
|
||||||
|
# Initialize an empty list to store the parsed items
|
||||||
|
parsed_list = []
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
# Split each line at the first period to separate the index from the content
|
||||||
|
parts = line.split(". ", 1)
|
||||||
|
|
||||||
|
if len(parts) == 2:
|
||||||
|
# Convert the index part to an integer
|
||||||
|
# and strip any whitespace from the content
|
||||||
|
index = int(parts[0])
|
||||||
|
point = parts[1].strip()
|
||||||
|
|
||||||
|
# Add a dictionary to the parsed list
|
||||||
|
parsed_list.append({"point_index": index, "point_skeleton": point})
|
||||||
|
|
||||||
|
return parsed_list
|
||||||
|
|
||||||
|
|
||||||
|
def create_list_elements(_input):
|
||||||
|
skeleton = _input["skeleton"]
|
||||||
|
numbered_list = parse_numbered_list(skeleton)
|
||||||
|
for el in numbered_list:
|
||||||
|
el["skeleton"] = skeleton
|
||||||
|
el["question"] = _input["question"]
|
||||||
|
return numbered_list
|
||||||
|
|
||||||
|
|
||||||
|
def get_final_answer(expanded_list):
|
||||||
|
final_answer_str = "Here's a comprehensive answer:\n\n"
|
||||||
|
for i, el in enumerate(expanded_list):
|
||||||
|
final_answer_str += f"{i+1}. {el}\n\n"
|
||||||
|
return final_answer_str
|
||||||
|
|
||||||
|
|
||||||
|
class ChainInput(BaseModel):
|
||||||
|
question: str
|
||||||
|
|
||||||
|
|
||||||
|
chain = (
|
||||||
|
RunnablePassthrough.assign(skeleton=skeleton_generator_chain)
|
||||||
|
| create_list_elements
|
||||||
|
| point_expander_chain.map()
|
||||||
|
| get_final_answer
|
||||||
|
).with_types(input_type=ChainInput)
|
0
templates/skeleton-of-thought/tests/__init__.py
Normal file
0
templates/skeleton-of-thought/tests/__init__.py
Normal file
Loading…
Reference in New Issue
Block a user