mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-09 23:12:38 +00:00
Templates (#12294)
Co-authored-by: Harrison Chase <hw.chase.17@gmail.com> Co-authored-by: Lance Martin <lance@langchain.dev> Co-authored-by: Jacob Lee <jacoblee93@gmail.com>
This commit is contained in:
15
templates/sql-llama2/README.md
Normal file
15
templates/sql-llama2/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# SQL with LLaMA2
|
||||
|
||||
This template allows you to chat with a SQL database in natural language using LLaMA2.
|
||||
|
||||
It is configured to use [Replicate](https://python.langchain.com/docs/integrations/llms/replicate).
|
||||
|
||||
But, it can be adapted to any API that support LLaMA2, including [Fireworks](https://python.langchain.com/docs/integrations/chat/fireworks) and others.
|
||||
|
||||
See related templates `sql-ollama` and `sql-llamacpp` for private, local chat with SQL.
|
||||
|
||||
## Installation
|
||||
```bash
|
||||
# from inside your LangServe instance
|
||||
poe add sql-llama2
|
||||
```
|
0
templates/sql-llama2/nba_roster.db
Normal file
0
templates/sql-llama2/nba_roster.db
Normal file
1204
templates/sql-llama2/poetry.lock
generated
Normal file
1204
templates/sql-llama2/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
19
templates/sql-llama2/pyproject.toml
Normal file
19
templates/sql-llama2/pyproject.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[tool.poetry]
|
||||
name = "sql-llama2"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
authors = ["Lance Martin <lance@langchain.dev>"]
|
||||
readme = "README.md"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.8.1,<4.0"
|
||||
langchain = ">=0.0.313, <0.1"
|
||||
replicate = ">=0.15.4"
|
||||
|
||||
[tool.langserve]
|
||||
export_module = "sql_llama2"
|
||||
export_attr = "chain"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
3
templates/sql-llama2/sql_llama2/__init__.py
Normal file
3
templates/sql-llama2/sql_llama2/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from llama2.chain import chain
|
||||
|
||||
__all__ = ["chain"]
|
73
templates/sql-llama2/sql_llama2/chain.py
Normal file
73
templates/sql-llama2/sql_llama2/chain.py
Normal file
@@ -0,0 +1,73 @@
|
||||
from langchain.llms import Replicate
|
||||
from langchain.schema.output_parser import StrOutputParser
|
||||
from langchain.schema.runnable import RunnablePassthrough
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
|
||||
# make sure to set REPLICATE_API_TOKEN in your environment
|
||||
# use llama-2-13b model in replicate
|
||||
replicate_id = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d"
|
||||
llm = Replicate(
|
||||
model=replicate_id,
|
||||
model_kwargs={"temperature": 0.01, "max_length": 500, "top_p": 1},
|
||||
)
|
||||
|
||||
from pathlib import Path
|
||||
from langchain.utilities import SQLDatabase
|
||||
db_path = Path(__file__).parent / "nba_roster.db"
|
||||
rel = db_path.relative_to(Path.cwd())
|
||||
db_string = f"sqlite:///{rel}"
|
||||
db = SQLDatabase.from_uri(db_string, sample_rows_in_table_info=0)
|
||||
|
||||
def get_schema(_):
|
||||
return db.get_table_info()
|
||||
|
||||
|
||||
def run_query(query):
|
||||
return db.run(query)
|
||||
|
||||
|
||||
template_query = """Based on the table schema below, write a SQL query that would answer the user's question:
|
||||
{schema}
|
||||
|
||||
Question: {question}
|
||||
SQL Query:"""
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", "Given an input question, convert it to a SQL query. No pre-amble."),
|
||||
("human", template_query),
|
||||
]
|
||||
)
|
||||
|
||||
sql_response = (
|
||||
RunnablePassthrough.assign(schema=get_schema)
|
||||
| prompt
|
||||
| llm.bind(stop=["\nSQLResult:"])
|
||||
| StrOutputParser()
|
||||
)
|
||||
|
||||
template_response = """Based on the table schema below, question, sql query, and sql response, write a natural language response:
|
||||
{schema}
|
||||
|
||||
Question: {question}
|
||||
SQL Query: {query}
|
||||
SQL Response: {response}"""
|
||||
|
||||
prompt_response = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"Given an input question and SQL response, convert it to a natural language answer. No pre-amble.",
|
||||
),
|
||||
("human", template_response),
|
||||
]
|
||||
)
|
||||
|
||||
chain = (
|
||||
RunnablePassthrough.assign(query=sql_response)
|
||||
| RunnablePassthrough.assign(
|
||||
schema=get_schema,
|
||||
response=lambda x: db.run(x["query"]),
|
||||
)
|
||||
| prompt_response
|
||||
| llm
|
||||
)
|
BIN
templates/sql-llama2/sql_llama2/nba_roster.db
Normal file
BIN
templates/sql-llama2/sql_llama2/nba_roster.db
Normal file
Binary file not shown.
0
templates/sql-llama2/tests/__init__.py
Normal file
0
templates/sql-llama2/tests/__init__.py
Normal file
Reference in New Issue
Block a user