mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-28 17:38:36 +00:00
notebook fmt (#12498)
This commit is contained in:
parent
56cc5b847c
commit
2424fff3f1
4
.github/workflows/_lint.yml
vendored
4
.github/workflows/_lint.yml
vendored
@ -120,9 +120,9 @@ jobs:
|
||||
|
||||
- name: Install langchain editable
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
if: ${{ inputs.working-directory != 'libs/langchain' }}
|
||||
if: ${{ inputs.langchain-location }}
|
||||
env:
|
||||
LANGCHAIN_LOCATION: ${{ inputs.langchain-location || '../langchain'}}
|
||||
LANGCHAIN_LOCATION: ${{ inputs.langchain-location }}
|
||||
run: |
|
||||
pip install -e "$LANGCHAIN_LOCATION"
|
||||
|
||||
|
18
.github/workflows/doc_lint.yml
vendored
18
.github/workflows/doc_lint.yml
vendored
@ -1,11 +1,15 @@
|
||||
---
|
||||
name: Documentation Lint
|
||||
name: Docs, templates, cookbook lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'templates/**'
|
||||
- 'cookbook/**'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
check:
|
||||
@ -19,4 +23,12 @@ jobs:
|
||||
run: |
|
||||
# We should not encourage imports directly from main init file
|
||||
# Expect for hub
|
||||
git grep 'from langchain import' docs/{docs,snippets} | grep -vE 'from langchain import (hub)' && exit 1 || exit 0
|
||||
git grep 'from langchain import' {docs,templates,cookbook} | grep -vE 'from langchain import (hub)' && exit 1 || exit 0
|
||||
|
||||
- name: Run lint
|
||||
uses:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: .
|
||||
secrets: inherit
|
||||
|
||||
|
1
.github/workflows/langchain_cli_ci.yml
vendored
1
.github/workflows/langchain_cli_ci.yml
vendored
@ -36,6 +36,7 @@ jobs:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: libs/cli
|
||||
langchain-location: ../langchain
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
|
@ -35,6 +35,7 @@ jobs:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
langchain-location: ../langchain
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
|
12
Makefile
12
Makefile
@ -37,6 +37,18 @@ spell_check:
|
||||
spell_fix:
|
||||
poetry run codespell --toml pyproject.toml -w
|
||||
|
||||
######################
|
||||
# LINTING AND FORMATTING
|
||||
######################
|
||||
|
||||
lint:
|
||||
poetry run ruff {docs,templates,cookbook}
|
||||
poetry run black {docs,templates,cookbook} --check
|
||||
|
||||
format format_diff:
|
||||
poetry run black {docs,templates,cookbook}
|
||||
poetry run ruff --select I --fix {docs,templates,cookbook}
|
||||
|
||||
######################
|
||||
# HELP
|
||||
######################
|
||||
|
@ -62,20 +62,19 @@
|
||||
"source": [
|
||||
"# Local\n",
|
||||
"from langchain.chat_models import ChatOllama\n",
|
||||
"\n",
|
||||
"llama2_chat = ChatOllama(model=\"llama2:13b-chat\")\n",
|
||||
"llama2_code = ChatOllama(model=\"codellama:7b-instruct\")\n",
|
||||
"\n",
|
||||
"# API\n",
|
||||
"from getpass import getpass\n",
|
||||
"from langchain.llms import Replicate\n",
|
||||
"\n",
|
||||
"# REPLICATE_API_TOKEN = getpass()\n",
|
||||
"# os.environ[\"REPLICATE_API_TOKEN\"] = REPLICATE_API_TOKEN\n",
|
||||
"replicate_id = \"meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d\"\n",
|
||||
"llama2_chat_replicate = Replicate(\n",
|
||||
" model=replicate_id,\n",
|
||||
" input={\"temperature\": 0.01, \n",
|
||||
" \"max_length\": 500, \n",
|
||||
" \"top_p\": 1}\n",
|
||||
" model=replicate_id, input={\"temperature\": 0.01, \"max_length\": 500, \"top_p\": 1}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -110,11 +109,14 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import SQLDatabase\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///nba_roster.db\", sample_rows_in_table_info=0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_schema(_):\n",
|
||||
" return db.get_table_info()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def run_query(query):\n",
|
||||
" return db.run(query)"
|
||||
]
|
||||
@ -149,15 +151,18 @@
|
||||
"source": [
|
||||
"# Prompt\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n",
|
||||
"{schema}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"SQL Query:\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Given an input question, convert it to a SQL query. No pre-amble.\"),\n",
|
||||
" (\"human\", template)\n",
|
||||
"])\n",
|
||||
" (\"human\", template),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Chain to query\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
@ -209,10 +214,15 @@
|
||||
"Question: {question}\n",
|
||||
"SQL Query: {query}\n",
|
||||
"SQL Response: {response}\"\"\"\n",
|
||||
"prompt_response = ChatPromptTemplate.from_messages([\n",
|
||||
" (\"system\", \"Given an input question and SQL response, convert it to a natural langugae answer. No pre-amble.\"),\n",
|
||||
" (\"human\", template)\n",
|
||||
"])\n",
|
||||
"prompt_response = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"Given an input question and SQL response, convert it to a natural langugae answer. No pre-amble.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", template),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain = (\n",
|
||||
" RunnablePassthrough.assign(query=sql_response)\n",
|
||||
@ -269,16 +279,19 @@
|
||||
"# Prompt\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"\n",
|
||||
"template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n",
|
||||
"{schema}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"SQL Query:\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Given an input question, convert it to a SQL query. No pre-amble.\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", template)\n",
|
||||
"])\n",
|
||||
" (\"human\", template),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"memory = ConversationBufferMemory(return_messages=True)\n",
|
||||
"\n",
|
||||
@ -288,16 +301,19 @@
|
||||
"sql_chain = (\n",
|
||||
" RunnablePassthrough.assign(\n",
|
||||
" schema=get_schema,\n",
|
||||
" history=RunnableLambda(lambda x: memory.load_memory_variables(x)[\"history\"])\n",
|
||||
" )| prompt\n",
|
||||
" history=RunnableLambda(lambda x: memory.load_memory_variables(x)[\"history\"]),\n",
|
||||
" )\n",
|
||||
" | prompt\n",
|
||||
" | llm.bind(stop=[\"\\nSQLResult:\"])\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def save(input_output):\n",
|
||||
" output = {\"output\": input_output.pop(\"output\")}\n",
|
||||
" memory.save_context(input_output, output)\n",
|
||||
" return output['output']\n",
|
||||
" return output[\"output\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"sql_response_memory = RunnablePassthrough.assign(output=sql_chain) | save\n",
|
||||
"sql_response_memory.invoke({\"question\": \"What team is Klay Thompson on?\"})"
|
||||
@ -349,10 +365,15 @@
|
||||
"Question: {question}\n",
|
||||
"SQL Query: {query}\n",
|
||||
"SQL Response: {response}\"\"\"\n",
|
||||
"prompt_response = ChatPromptTemplate.from_messages([\n",
|
||||
" (\"system\", \"Given an input question and SQL response, convert it to a natural langugae answer. No pre-amble.\"),\n",
|
||||
" (\"human\", template)\n",
|
||||
"])\n",
|
||||
"prompt_response = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"Given an input question and SQL response, convert it to a natural langugae answer. No pre-amble.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", template),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain = (\n",
|
||||
" RunnablePassthrough.assign(query=sql_response_memory)\n",
|
||||
|
@ -108,7 +108,8 @@
|
||||
"from unstructured.partition.pdf import partition_pdf\n",
|
||||
"\n",
|
||||
"# Get elements\n",
|
||||
"raw_pdf_elements = partition_pdf(filename=path+\"LLaMA2.pdf\",\n",
|
||||
"raw_pdf_elements = partition_pdf(\n",
|
||||
" filename=path + \"LLaMA2.pdf\",\n",
|
||||
" # Unstructured first finds embedded image blocks\n",
|
||||
" extract_images_in_pdf=False,\n",
|
||||
" # Use layout model (YOLOX) to get bounding boxes (for tables) and find titles\n",
|
||||
@ -122,7 +123,8 @@
|
||||
" max_characters=4000,\n",
|
||||
" new_after_n_chars=3800,\n",
|
||||
" combine_text_under_n_chars=2000,\n",
|
||||
" image_output_dir_path=path)"
|
||||
" image_output_dir_path=path,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -190,6 +192,7 @@
|
||||
" type: str\n",
|
||||
" text: Any\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Categorize by type\n",
|
||||
"categorized_elements = []\n",
|
||||
"for element in raw_pdf_elements:\n",
|
||||
@ -321,10 +324,7 @@
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(\n",
|
||||
" collection_name=\"summaries\",\n",
|
||||
" embedding_function=OpenAIEmbeddings()\n",
|
||||
")\n",
|
||||
"vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n",
|
||||
"\n",
|
||||
"# The storage layer for the parent documents\n",
|
||||
"store = InMemoryStore()\n",
|
||||
@ -339,13 +339,19 @@
|
||||
"\n",
|
||||
"# Add texts\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in texts]\n",
|
||||
"summary_texts = [Document(page_content=s,metadata={id_key: doc_ids[i]}) for i, s in enumerate(text_summaries)]\n",
|
||||
"summary_texts = [\n",
|
||||
" Document(page_content=s, metadata={id_key: doc_ids[i]})\n",
|
||||
" for i, s in enumerate(text_summaries)\n",
|
||||
"]\n",
|
||||
"retriever.vectorstore.add_documents(summary_texts)\n",
|
||||
"retriever.docstore.mset(list(zip(doc_ids, texts)))\n",
|
||||
"\n",
|
||||
"# Add tables\n",
|
||||
"table_ids = [str(uuid.uuid4()) for _ in tables]\n",
|
||||
"summary_tables = [Document(page_content=s,metadata={id_key: table_ids[i]}) for i, s in enumerate(table_summaries)]\n",
|
||||
"summary_tables = [\n",
|
||||
" Document(page_content=s, metadata={id_key: table_ids[i]})\n",
|
||||
" for i, s in enumerate(table_summaries)\n",
|
||||
"]\n",
|
||||
"retriever.vectorstore.add_documents(summary_tables)\n",
|
||||
"retriever.docstore.mset(list(zip(table_ids, tables)))"
|
||||
]
|
||||
|
@ -98,7 +98,8 @@
|
||||
"from unstructured.partition.pdf import partition_pdf\n",
|
||||
"\n",
|
||||
"# Get elements\n",
|
||||
"raw_pdf_elements = partition_pdf(filename=path+\"LLaVA.pdf\",\n",
|
||||
"raw_pdf_elements = partition_pdf(\n",
|
||||
" filename=path + \"LLaVA.pdf\",\n",
|
||||
" # Using pdf format to find embedded image blocks\n",
|
||||
" extract_images_in_pdf=True,\n",
|
||||
" # Use layout model (YOLOX) to get bounding boxes (for tables) and find titles\n",
|
||||
@ -113,7 +114,8 @@
|
||||
" max_characters=4000,\n",
|
||||
" new_after_n_chars=3800,\n",
|
||||
" combine_text_under_n_chars=2000,\n",
|
||||
" image_output_dir_path=path)"
|
||||
" image_output_dir_path=path,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -170,6 +172,7 @@
|
||||
" type: str\n",
|
||||
" text: Any\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Categorize by type\n",
|
||||
"categorized_elements = []\n",
|
||||
"for element in raw_pdf_elements:\n",
|
||||
@ -342,7 +345,7 @@
|
||||
"# Read each file and store its content in a list\n",
|
||||
"img_summaries = []\n",
|
||||
"for file_path in file_paths:\n",
|
||||
" with open(file_path, 'r') as file:\n",
|
||||
" with open(file_path, \"r\") as file:\n",
|
||||
" img_summaries.append(file.read())\n",
|
||||
"\n",
|
||||
"# Remove any logging prior to summary\n",
|
||||
@ -375,10 +378,7 @@
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(\n",
|
||||
" collection_name=\"summaries\",\n",
|
||||
" embedding_function=OpenAIEmbeddings()\n",
|
||||
")\n",
|
||||
"vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n",
|
||||
"\n",
|
||||
"# The storage layer for the parent documents\n",
|
||||
"store = InMemoryStore()\n",
|
||||
@ -393,13 +393,19 @@
|
||||
"\n",
|
||||
"# Add texts\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in texts]\n",
|
||||
"summary_texts = [Document(page_content=s,metadata={id_key: doc_ids[i]}) for i, s in enumerate(text_summaries)]\n",
|
||||
"summary_texts = [\n",
|
||||
" Document(page_content=s, metadata={id_key: doc_ids[i]})\n",
|
||||
" for i, s in enumerate(text_summaries)\n",
|
||||
"]\n",
|
||||
"retriever.vectorstore.add_documents(summary_texts)\n",
|
||||
"retriever.docstore.mset(list(zip(doc_ids, texts)))\n",
|
||||
"\n",
|
||||
"# Add tables\n",
|
||||
"table_ids = [str(uuid.uuid4()) for _ in tables]\n",
|
||||
"summary_tables = [Document(page_content=s,metadata={id_key: table_ids[i]}) for i, s in enumerate(table_summaries)]\n",
|
||||
"summary_tables = [\n",
|
||||
" Document(page_content=s, metadata={id_key: table_ids[i]})\n",
|
||||
" for i, s in enumerate(table_summaries)\n",
|
||||
"]\n",
|
||||
"retriever.vectorstore.add_documents(summary_tables)\n",
|
||||
"retriever.docstore.mset(list(zip(table_ids, tables)))"
|
||||
]
|
||||
@ -423,7 +429,10 @@
|
||||
"source": [
|
||||
"# Add image summaries\n",
|
||||
"img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary]\n",
|
||||
"summary_img = [Document(page_content=s,metadata={id_key: img_ids[i]}) for i, s in enumerate(cleaned_img_summary)]\n",
|
||||
"summary_img = [\n",
|
||||
" Document(page_content=s, metadata={id_key: img_ids[i]})\n",
|
||||
" for i, s in enumerate(cleaned_img_summary)\n",
|
||||
"]\n",
|
||||
"retriever.vectorstore.add_documents(summary_img)\n",
|
||||
"retriever.docstore.mset(list(zip(img_ids, cleaned_img_summary)))"
|
||||
]
|
||||
@ -449,10 +458,19 @@
|
||||
"source": [
|
||||
"# Add images\n",
|
||||
"img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary]\n",
|
||||
"summary_img = [Document(page_content=s,metadata={id_key: img_ids[i]}) for i, s in enumerate(cleaned_img_summary)]\n",
|
||||
"summary_img = [\n",
|
||||
" Document(page_content=s, metadata={id_key: img_ids[i]})\n",
|
||||
" for i, s in enumerate(cleaned_img_summary)\n",
|
||||
"]\n",
|
||||
"retriever.vectorstore.add_documents(summary_img)\n",
|
||||
"### Fetch images\n",
|
||||
"retriever.docstore.mset(list(zip(img_ids, ### image ### ))) "
|
||||
"retriever.docstore.mset(\n",
|
||||
" list(\n",
|
||||
" zip(\n",
|
||||
" img_ids,\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -542,7 +560,9 @@
|
||||
],
|
||||
"source": [
|
||||
"# We can retrieve this table\n",
|
||||
"retriever.get_relevant_documents(\"What are results for LLaMA across across domains / subjects?\")[1]"
|
||||
"retriever.get_relevant_documents(\n",
|
||||
" \"What are results for LLaMA across across domains / subjects?\"\n",
|
||||
")[1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -592,7 +612,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"Images / figures with playful and creative examples\")[1]"
|
||||
"retriever.get_relevant_documents(\"Images / figures with playful and creative examples\")[\n",
|
||||
" 1\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -664,7 +686,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"What is the performance of LLaVa across across multiple image domains / subjects?\")"
|
||||
"chain.invoke(\n",
|
||||
" \"What is the performance of LLaVa across across multiple image domains / subjects?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -713,7 +737,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -92,7 +92,8 @@
|
||||
"path = \"/Users/rlm/Desktop/Papers/LLaVA/\"\n",
|
||||
"\n",
|
||||
"# Get elements\n",
|
||||
"raw_pdf_elements = partition_pdf(filename=path+\"LLaVA.pdf\",\n",
|
||||
"raw_pdf_elements = partition_pdf(\n",
|
||||
" filename=path + \"LLaVA.pdf\",\n",
|
||||
" # Using pdf format to find embedded image blocks\n",
|
||||
" extract_images_in_pdf=True,\n",
|
||||
" # Use layout model (YOLOX) to get bounding boxes (for tables) and find titles\n",
|
||||
@ -107,7 +108,8 @@
|
||||
" max_characters=4000,\n",
|
||||
" new_after_n_chars=3800,\n",
|
||||
" combine_text_under_n_chars=2000,\n",
|
||||
" image_output_dir_path=path)"
|
||||
" image_output_dir_path=path,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -165,6 +167,7 @@
|
||||
" type: str\n",
|
||||
" text: Any\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Categorize by type\n",
|
||||
"categorized_elements = []\n",
|
||||
"for element in raw_pdf_elements:\n",
|
||||
@ -327,11 +330,14 @@
|
||||
"# Read each file and store its content in a list\n",
|
||||
"img_summaries = []\n",
|
||||
"for file_path in file_paths:\n",
|
||||
" with open(file_path, 'r') as file:\n",
|
||||
" with open(file_path, \"r\") as file:\n",
|
||||
" img_summaries.append(file.read())\n",
|
||||
"\n",
|
||||
"# Clean up residual logging\n",
|
||||
"cleaned_img_summary = [s.split(\"clip_model_load: total allocated memory: 201.27 MB\\n\\n\", 1)[1].strip() for s in img_summaries]"
|
||||
"cleaned_img_summary = [\n",
|
||||
" s.split(\"clip_model_load: total allocated memory: 201.27 MB\\n\\n\", 1)[1].strip()\n",
|
||||
" for s in img_summaries\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -377,8 +383,7 @@
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(\n",
|
||||
" collection_name=\"summaries\",\n",
|
||||
" embedding_function=GPT4AllEmbeddings()\n",
|
||||
" collection_name=\"summaries\", embedding_function=GPT4AllEmbeddings()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# The storage layer for the parent documents\n",
|
||||
@ -412,21 +417,32 @@
|
||||
"source": [
|
||||
"# Add texts\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in texts]\n",
|
||||
"summary_texts = [Document(page_content=s,metadata={id_key: doc_ids[i]}) for i, s in enumerate(text_summaries)]\n",
|
||||
"summary_texts = [\n",
|
||||
" Document(page_content=s, metadata={id_key: doc_ids[i]})\n",
|
||||
" for i, s in enumerate(text_summaries)\n",
|
||||
"]\n",
|
||||
"retriever.vectorstore.add_documents(summary_texts)\n",
|
||||
"retriever.docstore.mset(list(zip(doc_ids, texts)))\n",
|
||||
"\n",
|
||||
"# Add tables\n",
|
||||
"table_ids = [str(uuid.uuid4()) for _ in tables]\n",
|
||||
"summary_tables = [Document(page_content=s,metadata={id_key: table_ids[i]}) for i, s in enumerate(table_summaries)]\n",
|
||||
"summary_tables = [\n",
|
||||
" Document(page_content=s, metadata={id_key: table_ids[i]})\n",
|
||||
" for i, s in enumerate(table_summaries)\n",
|
||||
"]\n",
|
||||
"retriever.vectorstore.add_documents(summary_tables)\n",
|
||||
"retriever.docstore.mset(list(zip(table_ids, tables)))\n",
|
||||
"\n",
|
||||
"# Add images\n",
|
||||
"img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary]\n",
|
||||
"summary_img = [Document(page_content=s,metadata={id_key: img_ids[i]}) for i, s in enumerate(cleaned_img_summary)]\n",
|
||||
"summary_img = [\n",
|
||||
" Document(page_content=s, metadata={id_key: img_ids[i]})\n",
|
||||
" for i, s in enumerate(cleaned_img_summary)\n",
|
||||
"]\n",
|
||||
"retriever.vectorstore.add_documents(summary_img)\n",
|
||||
"retriever.docstore.mset(list(zip(img_ids, cleaned_img_summary))) # Store the image summary as the raw document"
|
||||
"retriever.docstore.mset(\n",
|
||||
" list(zip(img_ids, cleaned_img_summary))\n",
|
||||
") # Store the image summary as the raw document"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -484,7 +500,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"Images / figures with playful and creative examples\")[0]"
|
||||
"retriever.get_relevant_documents(\"Images / figures with playful and creative examples\")[\n",
|
||||
" 0\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -555,7 +573,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"What is the performance of LLaVa across across multiple image domains / subjects?\")"
|
||||
"chain.invoke(\n",
|
||||
" \"What is the performance of LLaVa across across multiple image domains / subjects?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -584,7 +604,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"Explain any images / figures in the paper with playful and creative examples.\")"
|
||||
"chain.invoke(\n",
|
||||
" \"Explain any images / figures in the paper with playful and creative examples.\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -837,7 +837,9 @@
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # 'ada' 'gpt-3.5-turbo-0613' 'gpt-4',\n",
|
||||
"model = ChatOpenAI(\n",
|
||||
" model_name=\"gpt-3.5-turbo-0613\"\n",
|
||||
") # 'ada' 'gpt-3.5-turbo-0613' 'gpt-4',\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
|
||||
]
|
||||
},
|
||||
|
@ -77,6 +77,7 @@
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain_experimental.autonomous_agents import HuggingGPT\n",
|
||||
"\n",
|
||||
"# %env OPENAI_API_BASE=http://localhost:8000/v1"
|
||||
]
|
||||
},
|
||||
|
@ -50,6 +50,7 @@
|
||||
"# pick and configure the LLM of your choice\n",
|
||||
"\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(model=\"text-davinci-003\")"
|
||||
]
|
||||
},
|
||||
@ -86,7 +87,7 @@
|
||||
"\n",
|
||||
"PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"meal\", \"text_to_personalize\", \"user\", \"preference\"],\n",
|
||||
" template=PROMPT_TEMPLATE\n",
|
||||
" template=PROMPT_TEMPLATE,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -105,7 +106,7 @@
|
||||
"source": [
|
||||
"import langchain_experimental.rl_chain as rl_chain\n",
|
||||
"\n",
|
||||
"chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)\n"
|
||||
"chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -223,12 +224,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"scoring_criteria_template = \"Given {preference} rank how good or bad this selection is {meal}\"\n",
|
||||
"scoring_criteria_template = (\n",
|
||||
" \"Given {preference} rank how good or bad this selection is {meal}\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = rl_chain.PickBest.from_llm(\n",
|
||||
" llm=llm,\n",
|
||||
" prompt=PROMPT,\n",
|
||||
" selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template),\n",
|
||||
" selection_scorer=rl_chain.AutoSelectionScorer(\n",
|
||||
" llm=llm, scoring_criteria_template_str=scoring_criteria_template\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -262,7 +267,9 @@
|
||||
")\n",
|
||||
"print(response[\"response\"])\n",
|
||||
"selection_metadata = response[\"selection_metadata\"]\n",
|
||||
"print(f\"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}\")"
|
||||
"print(\n",
|
||||
" f\"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -280,8 +287,8 @@
|
||||
"source": [
|
||||
"class CustomSelectionScorer(rl_chain.SelectionScorer):\n",
|
||||
" def score_response(\n",
|
||||
" self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n",
|
||||
"\n",
|
||||
" self, inputs, llm_response: str, event: rl_chain.PickBestEvent\n",
|
||||
" ) -> float:\n",
|
||||
" print(event.based_on)\n",
|
||||
" print(event.to_select_from)\n",
|
||||
"\n",
|
||||
@ -370,9 +377,10 @@
|
||||
" return 1.0\n",
|
||||
" else:\n",
|
||||
" return 0.0\n",
|
||||
" def score_response(\n",
|
||||
" self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n",
|
||||
"\n",
|
||||
" def score_response(\n",
|
||||
" self, inputs, llm_response: str, event: rl_chain.PickBestEvent\n",
|
||||
" ) -> float:\n",
|
||||
" selected_meal = event.to_select_from[\"meal\"][event.selected.index]\n",
|
||||
"\n",
|
||||
" if \"Tom\" in event.based_on[\"user\"]:\n",
|
||||
@ -403,7 +411,7 @@
|
||||
" selection_scorer=CustomSelectionScorer(),\n",
|
||||
" metrics_step=5,\n",
|
||||
" metrics_window_size=5, # rolling window average\n",
|
||||
" policy=rl_chain.PickBestRandomPolicy # set the random policy instead of default\n",
|
||||
" policy=rl_chain.PickBestRandomPolicy, # set the random policy instead of default\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -477,12 +485,17 @@
|
||||
],
|
||||
"source": [
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"chain.metrics.to_pandas()['score'].plot(label=\"default learning policy\")\n",
|
||||
"random_chain.metrics.to_pandas()['score'].plot(label=\"random selection policy\")\n",
|
||||
"\n",
|
||||
"chain.metrics.to_pandas()[\"score\"].plot(label=\"default learning policy\")\n",
|
||||
"random_chain.metrics.to_pandas()[\"score\"].plot(label=\"random selection policy\")\n",
|
||||
"plt.legend()\n",
|
||||
"\n",
|
||||
"print(f\"The final average score for the default policy, calculated over a rolling window, is: {chain.metrics.to_pandas()['score'].iloc[-1]}\")\n",
|
||||
"print(f\"The final average score for the random policy, calculated over a rolling window, is: {random_chain.metrics.to_pandas()['score'].iloc[-1]}\")"
|
||||
"print(\n",
|
||||
" f\"The final average score for the default policy, calculated over a rolling window, is: {chain.metrics.to_pandas()['score'].iloc[-1]}\"\n",
|
||||
")\n",
|
||||
"print(\n",
|
||||
" f\"The final average score for the random policy, calculated over a rolling window, is: {random_chain.metrics.to_pandas()['score'].iloc[-1]}\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -27,11 +27,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from os import environ\n",
|
||||
"import getpass\n",
|
||||
"from typing import Dict, Any\n",
|
||||
"from langchain.llms import OpenAI\nfrom langchain.utilities import SQLDatabase\nfrom langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.utilities import SQLDatabase\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
|
||||
"from sqlalchemy import create_engine, Column, MetaData\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
@ -76,7 +77,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.callbacks import StdOutCallbackHandler\n",
|
||||
"\n",
|
||||
@ -124,8 +124,9 @@
|
||||
"from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain\n",
|
||||
"\n",
|
||||
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
|
||||
"from langchain_experimental.retrievers.vector_sql_database \\\n",
|
||||
" import VectorSQLDatabaseChainRetriever\n",
|
||||
"from langchain_experimental.retrievers.vector_sql_database import (\n",
|
||||
" VectorSQLDatabaseChainRetriever,\n",
|
||||
")\n",
|
||||
"from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n",
|
||||
"from langchain_experimental.sql.vector_sql import VectorSQLRetrieveAllOutputParser\n",
|
||||
"\n",
|
||||
@ -144,7 +145,9 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"# You need all those keys to get docs\n",
|
||||
"retriever = VectorSQLDatabaseChainRetriever(sql_db_chain=chain, page_content_key=\"abstract\")\n",
|
||||
"retriever = VectorSQLDatabaseChainRetriever(\n",
|
||||
" sql_db_chain=chain, page_content_key=\"abstract\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_with_metadata_prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"page_content\", \"id\", \"title\", \"authors\", \"pubdate\", \"categories\"],\n",
|
||||
@ -162,8 +165,10 @@
|
||||
" },\n",
|
||||
" return_source_documents=True,\n",
|
||||
")\n",
|
||||
"ans = chain(\"Please give me 10 papers to ask what is PageRank?\",\n",
|
||||
" callbacks=[StdOutCallbackHandler()])\n",
|
||||
"ans = chain(\n",
|
||||
" \"Please give me 10 papers to ask what is PageRank?\",\n",
|
||||
" callbacks=[StdOutCallbackHandler()],\n",
|
||||
")\n",
|
||||
"print(ans[\"answer\"])"
|
||||
]
|
||||
},
|
||||
|
@ -34,7 +34,11 @@
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner"
|
||||
"from langchain_experimental.plan_and_execute import (\n",
|
||||
" PlanAndExecute,\n",
|
||||
" load_agent_executor,\n",
|
||||
" load_chat_planner,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -59,12 +63,12 @@
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\"\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Calculator\",\n",
|
||||
" func=llm_math_chain.run,\n",
|
||||
" description=\"useful for when you need to answer questions about math\"\n",
|
||||
" description=\"useful for when you need to answer questions about math\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
@ -216,7 +220,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Who is the current prime minister of the UK? What is their current age raised to the 0.43 power?\")"
|
||||
"agent.run(\n",
|
||||
" \"Who is the current prime minister of the UK? What is their current age raised to the 0.43 power?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -55,6 +55,7 @@
|
||||
"source": [
|
||||
"# Setup API keys for Kay and OpenAI\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"KAY_API_KEY = getpass()\n",
|
||||
"OPENAI_API_KEY = getpass()"
|
||||
]
|
||||
@ -67,6 +68,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"KAY_API_KEY\"] = KAY_API_KEY\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY"
|
||||
]
|
||||
@ -83,7 +85,9 @@
|
||||
"from langchain.retrievers import KayAiRetriever\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
|
||||
"retriever = KayAiRetriever.create(dataset_id=\"company\", data_types=[\"PressRelease\"], num_contexts=6)\n",
|
||||
"retriever = KayAiRetriever.create(\n",
|
||||
" dataset_id=\"company\", data_types=[\"PressRelease\"], num_contexts=6\n",
|
||||
")\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
|
||||
]
|
||||
},
|
||||
|
@ -53,7 +53,7 @@
|
||||
" \"doc7\": \"Climate change: The science and models.\",\n",
|
||||
" \"doc8\": \"Global warming: A subset of climate change.\",\n",
|
||||
" \"doc9\": \"How climate change affects daily weather.\",\n",
|
||||
" \"doc10\": \"The history of climate change activism.\"\n",
|
||||
" \"doc10\": \"The history of climate change activism.\",\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
@ -64,7 +64,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Pinecone.from_texts(list(all_documents.values()), OpenAIEmbeddings(), index_name='rag-fusion')"
|
||||
"vectorstore = Pinecone.from_texts(\n",
|
||||
" list(all_documents.values()), OpenAIEmbeddings(), index_name=\"rag-fusion\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -98,7 +100,7 @@
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"prompt = hub.pull('langchain-ai/rag-fusion-query-generation')"
|
||||
"prompt = hub.pull(\"langchain-ai/rag-fusion-query-generation\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -122,7 +124,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"generate_queries = prompt | ChatOpenAI(temperature=0) | StrOutputParser() | (lambda x: x.split(\"\\n\"))"
|
||||
"generate_queries = (\n",
|
||||
" prompt | ChatOpenAI(temperature=0) | StrOutputParser() | (lambda x: x.split(\"\\n\"))\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -171,6 +175,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.load import dumps, loads\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def reciprocal_rank_fusion(results: list[list], k=60):\n",
|
||||
" fused_scores = {}\n",
|
||||
" for docs in results:\n",
|
||||
@ -182,7 +188,10 @@
|
||||
" previous_score = fused_scores[doc_str]\n",
|
||||
" fused_scores[doc_str] += 1 / (rank + k)\n",
|
||||
"\n",
|
||||
" reranked_results = [(loads(doc), score) for doc, score in sorted(fused_scores.items(), key=lambda x: x[1], reverse=True)]\n",
|
||||
" reranked_results = [\n",
|
||||
" (loads(doc), score)\n",
|
||||
" for doc, score in sorted(fused_scores.items(), key=lambda x: x[1], reverse=True)\n",
|
||||
" ]\n",
|
||||
" return reranked_results"
|
||||
]
|
||||
},
|
||||
|
@ -245,6 +245,7 @@
|
||||
"source": [
|
||||
"# Parser to remove the `**`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _parse(text):\n",
|
||||
" return text.strip(\"**\")"
|
||||
]
|
||||
@ -290,7 +291,8 @@
|
||||
"rewrite_retrieve_read_chain = (\n",
|
||||
" {\n",
|
||||
" \"context\": {\"x\": RunnablePassthrough()} | rewriter | retriever,\n",
|
||||
" \"question\": RunnablePassthrough()} \n",
|
||||
" \"question\": RunnablePassthrough(),\n",
|
||||
" }\n",
|
||||
" | prompt\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
|
@ -139,7 +139,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"context\": \"a frog went to a pond and sat on a log and went to a different pond\"})"
|
||||
"chain.invoke(\n",
|
||||
" {\"context\": \"a frog went to a pond and sat on a log and went to a different pond\"}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -51,8 +51,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"details = pd.read_csv(\"~/Downloads/archive/Hotel_details.csv\").drop_duplicates(subset=\"hotelid\").set_index(\"hotelid\")\n",
|
||||
"attributes = pd.read_csv(\"~/Downloads/archive/Hotel_Room_attributes.csv\", index_col=\"id\")\n",
|
||||
"details = (\n",
|
||||
" pd.read_csv(\"~/Downloads/archive/Hotel_details.csv\")\n",
|
||||
" .drop_duplicates(subset=\"hotelid\")\n",
|
||||
" .set_index(\"hotelid\")\n",
|
||||
")\n",
|
||||
"attributes = pd.read_csv(\n",
|
||||
" \"~/Downloads/archive/Hotel_Room_attributes.csv\", index_col=\"id\"\n",
|
||||
")\n",
|
||||
"price = pd.read_csv(\"~/Downloads/archive/hotels_RoomPrice.csv\", index_col=\"id\")"
|
||||
]
|
||||
},
|
||||
@ -208,9 +214,20 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"latest_price = price.drop_duplicates(subset=\"refid\", keep=\"last\")[[\"hotelcode\", \"roomtype\", \"onsiterate\", \"roomamenities\", \"maxoccupancy\", \"mealinclusiontype\"]]\n",
|
||||
"latest_price = price.drop_duplicates(subset=\"refid\", keep=\"last\")[\n",
|
||||
" [\n",
|
||||
" \"hotelcode\",\n",
|
||||
" \"roomtype\",\n",
|
||||
" \"onsiterate\",\n",
|
||||
" \"roomamenities\",\n",
|
||||
" \"maxoccupancy\",\n",
|
||||
" \"mealinclusiontype\",\n",
|
||||
" ]\n",
|
||||
"]\n",
|
||||
"latest_price[\"ratedescription\"] = attributes.loc[latest_price.index][\"ratedescription\"]\n",
|
||||
"latest_price = latest_price.join(details[[\"hotelname\", \"city\", \"country\", \"starrating\"]], on=\"hotelcode\")\n",
|
||||
"latest_price = latest_price.join(\n",
|
||||
" details[[\"hotelname\", \"city\", \"country\", \"starrating\"]], on=\"hotelcode\"\n",
|
||||
")\n",
|
||||
"latest_price = latest_price.rename({\"ratedescription\": \"roomdescription\"}, axis=1)\n",
|
||||
"latest_price[\"mealsincluded\"] = ~latest_price[\"mealinclusiontype\"].isnull()\n",
|
||||
"latest_price.pop(\"hotelcode\")\n",
|
||||
@ -244,7 +261,7 @@
|
||||
"res = model.predict(\n",
|
||||
" \"Below is a table with information about hotel rooms. \"\n",
|
||||
" \"Return a JSON list with an entry for each column. Each entry should have \"\n",
|
||||
" \"{\\\"name\\\": \\\"column name\\\", \\\"description\\\": \\\"column description\\\", \\\"type\\\": \\\"column data type\\\"}\"\n",
|
||||
" '{\"name\": \"column name\", \"description\": \"column description\", \"type\": \"column data type\"}'\n",
|
||||
" f\"\\n\\n{latest_price.head()}\\n\\nJSON:\\n\"\n",
|
||||
")"
|
||||
]
|
||||
@ -338,9 +355,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"attribute_info[-2]['description'] += f\". Valid values are {sorted(latest_price['starrating'].value_counts().index.tolist())}\"\n",
|
||||
"attribute_info[3]['description'] += f\". Valid values are {sorted(latest_price['maxoccupancy'].value_counts().index.tolist())}\"\n",
|
||||
"attribute_info[-3]['description'] += f\". Valid values are {sorted(latest_price['country'].value_counts().index.tolist())}\""
|
||||
"attribute_info[-2][\n",
|
||||
" \"description\"\n",
|
||||
"] += f\". Valid values are {sorted(latest_price['starrating'].value_counts().index.tolist())}\"\n",
|
||||
"attribute_info[3][\n",
|
||||
" \"description\"\n",
|
||||
"] += f\". Valid values are {sorted(latest_price['maxoccupancy'].value_counts().index.tolist())}\"\n",
|
||||
"attribute_info[-3][\n",
|
||||
" \"description\"\n",
|
||||
"] += f\". Valid values are {sorted(latest_price['country'].value_counts().index.tolist())}\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -408,7 +431,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.query_constructor.base import get_query_constructor_prompt, load_query_constructor_runnable"
|
||||
"from langchain.chains.query_constructor.base import (\n",
|
||||
" get_query_constructor_prompt,\n",
|
||||
" load_query_constructor_runnable,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -592,7 +618,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_query_constructor_runnable(ChatOpenAI(model='gpt-3.5-turbo', temperature=0), doc_contents, attribute_info)"
|
||||
"chain = load_query_constructor_runnable(\n",
|
||||
" ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0), doc_contents, attribute_info\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -634,7 +662,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"query\": \"Find a 2-person room in Vienna or London, preferably with meals included and AC\"})"
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"query\": \"Find a 2-person room in Vienna or London, preferably with meals included and AC\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -656,9 +688,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"attribute_info[-3]['description'] += \". NOTE: Only use the 'eq' operator if a specific country is mentioned. If a region is mentioned, include all relevant countries in filter.\"\n",
|
||||
"attribute_info[-3][\n",
|
||||
" \"description\"\n",
|
||||
"] += \". NOTE: Only use the 'eq' operator if a specific country is mentioned. If a region is mentioned, include all relevant countries in filter.\"\n",
|
||||
"chain = load_query_constructor_runnable(\n",
|
||||
" ChatOpenAI(model='gpt-3.5-turbo', temperature=0), \n",
|
||||
" ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0),\n",
|
||||
" doc_contents,\n",
|
||||
" attribute_info,\n",
|
||||
")"
|
||||
@ -704,9 +738,11 @@
|
||||
"source": [
|
||||
"content_attr = [\"roomtype\", \"roomamenities\", \"roomdescription\", \"hotelname\"]\n",
|
||||
"doc_contents = \"A detailed description of a hotel room, including information about the room type and room amenities.\"\n",
|
||||
"filter_attribute_info = tuple(ai for ai in attribute_info if ai[\"name\"] not in content_attr)\n",
|
||||
"filter_attribute_info = tuple(\n",
|
||||
" ai for ai in attribute_info if ai[\"name\"] not in content_attr\n",
|
||||
")\n",
|
||||
"chain = load_query_constructor_runnable(\n",
|
||||
" ChatOpenAI(model='gpt-3.5-turbo', temperature=0), \n",
|
||||
" ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0),\n",
|
||||
" doc_contents,\n",
|
||||
" filter_attribute_info,\n",
|
||||
")"
|
||||
@ -730,7 +766,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"query\": \"Find a 2-person room in Vienna or London, preferably with meals included and AC\"})"
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"query\": \"Find a 2-person room in Vienna or London, preferably with meals included and AC\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -860,14 +900,22 @@
|
||||
"examples = [\n",
|
||||
" (\n",
|
||||
" \"I want a hotel in the Balkans with a king sized bed and a hot tub. Budget is $300 a night\",\n",
|
||||
" {\"query\": \"king-sized bed, hot tub\", \"filter\": 'and(in(\"country\", [\"Bulgaria\", \"Greece\", \"Croatia\", \"Serbia\"]), lte(\"onsiterate\", 300))'}\n",
|
||||
" {\n",
|
||||
" \"query\": \"king-sized bed, hot tub\",\n",
|
||||
" \"filter\": 'and(in(\"country\", [\"Bulgaria\", \"Greece\", \"Croatia\", \"Serbia\"]), lte(\"onsiterate\", 300))',\n",
|
||||
" },\n",
|
||||
" ),\n",
|
||||
" (\n",
|
||||
" \"A room with breakfast included for 3 people, at a Hilton\",\n",
|
||||
" {\"query\": \"Hilton\", \"filter\": 'and(eq(\"mealsincluded\", true), gte(\"maxoccupancy\", 3))'}\n",
|
||||
" {\n",
|
||||
" \"query\": \"Hilton\",\n",
|
||||
" \"filter\": 'and(eq(\"mealsincluded\", true), gte(\"maxoccupancy\", 3))',\n",
|
||||
" },\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"prompt = get_query_constructor_prompt(doc_contents, filter_attribute_info, examples=examples)\n",
|
||||
"prompt = get_query_constructor_prompt(\n",
|
||||
" doc_contents, filter_attribute_info, examples=examples\n",
|
||||
")\n",
|
||||
"print(prompt.format(query=\"{query}\"))"
|
||||
]
|
||||
},
|
||||
@ -879,10 +927,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_query_constructor_runnable(\n",
|
||||
" ChatOpenAI(model='gpt-3.5-turbo', temperature=0), \n",
|
||||
" ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0),\n",
|
||||
" doc_contents,\n",
|
||||
" filter_attribute_info,\n",
|
||||
" examples=examples\n",
|
||||
" examples=examples,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -904,7 +952,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"query\": \"Find a 2-person room in Vienna or London, preferably with meals included and AC\"})"
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"query\": \"Find a 2-person room in Vienna or London, preferably with meals included and AC\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -956,7 +1008,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"query\": \"I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace.\"})"
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"query\": \"I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace.\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -977,11 +1033,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_query_constructor_runnable(\n",
|
||||
" ChatOpenAI(model='gpt-3.5-turbo', temperature=0), \n",
|
||||
" ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0),\n",
|
||||
" doc_contents,\n",
|
||||
" filter_attribute_info,\n",
|
||||
" examples=examples,\n",
|
||||
" fix_invalid=True\n",
|
||||
" fix_invalid=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -1003,7 +1059,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"query\": \"I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace.\"})"
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"query\": \"I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace.\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1089,7 +1149,9 @@
|
||||
"source": [
|
||||
"from langchain.retrievers import SelfQueryRetriever\n",
|
||||
"\n",
|
||||
"retriever = SelfQueryRetriever(query_constructor=chain, vectorstore=vecstore, verbose=True)"
|
||||
"retriever = SelfQueryRetriever(\n",
|
||||
" query_constructor=chain, vectorstore=vecstore, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -40,11 +40,11 @@
|
||||
"examples = [\n",
|
||||
" {\n",
|
||||
" \"input\": \"Could the members of The Police perform lawful arrests?\",\n",
|
||||
" \"output\": \"what can the members of The Police do?\"\n",
|
||||
" \"output\": \"what can the members of The Police do?\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"input\": \"Jan Sindel’s was born in what country?\",\n",
|
||||
" \"output\": \"what is Jan Sindel’s personal history?\"\n",
|
||||
" \"output\": \"what is Jan Sindel’s personal history?\",\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"# We now transform these to example messages\n",
|
||||
@ -67,13 +67,18 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||
" (\"system\", \"\"\"You are an expert at world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:\"\"\"),\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"\"\"You are an expert at world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:\"\"\",\n",
|
||||
" ),\n",
|
||||
" # Few shot examples\n",
|
||||
" few_shot_prompt,\n",
|
||||
" # New question\n",
|
||||
" (\"user\", \"{question}\"),\n",
|
||||
"])"
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -129,6 +134,7 @@
|
||||
"\n",
|
||||
"search = DuckDuckGoSearchAPIWrapper(max_results=4)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def retriever(query):\n",
|
||||
" return search.run(query)"
|
||||
]
|
||||
@ -211,14 +217,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = {\n",
|
||||
"chain = (\n",
|
||||
" {\n",
|
||||
" # Retrieve context using the normal question\n",
|
||||
" \"normal_context\": RunnableLambda(lambda x: x['question']) | retriever,\n",
|
||||
" \"normal_context\": RunnableLambda(lambda x: x[\"question\"]) | retriever,\n",
|
||||
" # Retrieve context using the step-back question\n",
|
||||
" \"step_back_context\": question_gen | retriever,\n",
|
||||
" # Pass on the question\n",
|
||||
" \"question\": lambda x: x[\"question\"]\n",
|
||||
"} | response_prompt | ChatOpenAI(temperature=0) | StrOutputParser()"
|
||||
" \"question\": lambda x: x[\"question\"],\n",
|
||||
" }\n",
|
||||
" | response_prompt\n",
|
||||
" | ChatOpenAI(temperature=0)\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -273,12 +284,17 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = {\n",
|
||||
"chain = (\n",
|
||||
" {\n",
|
||||
" # Retrieve context using the normal question (only the first 3 results)\n",
|
||||
" \"normal_context\": RunnableLambda(lambda x: x['question']) | retriever,\n",
|
||||
" \"normal_context\": RunnableLambda(lambda x: x[\"question\"]) | retriever,\n",
|
||||
" # Pass on the question\n",
|
||||
" \"question\": lambda x: x[\"question\"]\n",
|
||||
"} | response_prompt | ChatOpenAI(temperature=0) | StrOutputParser()"
|
||||
" \"question\": lambda x: x[\"question\"],\n",
|
||||
" }\n",
|
||||
" | response_prompt\n",
|
||||
" | ChatOpenAI(temperature=0)\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -64,7 +64,7 @@
|
||||
"- Keep the known digits from previous valid thoughts in place.\n",
|
||||
"- Each thought can be a partial or the final solution.\n",
|
||||
"\"\"\".strip()\n",
|
||||
"print(problem_description)\n"
|
||||
"print(problem_description)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -89,8 +89,11 @@
|
||||
"from langchain_experimental.tot.thought import ThoughtValidity\n",
|
||||
"import re\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyChecker(ToTChecker):\n",
|
||||
" def evaluate(self, problem_description: str, thoughts: Tuple[str, ...] = ()) -> ThoughtValidity:\n",
|
||||
" def evaluate(\n",
|
||||
" self, problem_description: str, thoughts: Tuple[str, ...] = ()\n",
|
||||
" ) -> ThoughtValidity:\n",
|
||||
" last_thought = thoughts[-1]\n",
|
||||
" clean_solution = last_thought.replace(\" \", \"\").replace('\"', \"\")\n",
|
||||
" regex_solution = clean_solution.replace(\"*\", \".\").replace(\"|\", \"\\\\|\")\n",
|
||||
@ -116,10 +119,22 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"checker = MyChecker()\n",
|
||||
"assert checker.evaluate(\"\", (\"3,*,*,2|1,*,3,*|*,1,*,3|4,*,*,1\",)) == ThoughtValidity.VALID_INTERMEDIATE\n",
|
||||
"assert checker.evaluate(\"\", (\"3,4,1,2|1,2,3,4|2,1,4,3|4,3,2,1\",)) == ThoughtValidity.VALID_FINAL\n",
|
||||
"assert checker.evaluate(\"\", (\"3,4,1,2|1,2,3,4|2,1,4,3|4,3,*,1\",)) == ThoughtValidity.VALID_INTERMEDIATE\n",
|
||||
"assert checker.evaluate(\"\", (\"3,4,1,2|1,2,3,4|2,1,4,3|4,*,3,1\",)) == ThoughtValidity.INVALID"
|
||||
"assert (\n",
|
||||
" checker.evaluate(\"\", (\"3,*,*,2|1,*,3,*|*,1,*,3|4,*,*,1\",))\n",
|
||||
" == ThoughtValidity.VALID_INTERMEDIATE\n",
|
||||
")\n",
|
||||
"assert (\n",
|
||||
" checker.evaluate(\"\", (\"3,4,1,2|1,2,3,4|2,1,4,3|4,3,2,1\",))\n",
|
||||
" == ThoughtValidity.VALID_FINAL\n",
|
||||
")\n",
|
||||
"assert (\n",
|
||||
" checker.evaluate(\"\", (\"3,4,1,2|1,2,3,4|2,1,4,3|4,3,*,1\",))\n",
|
||||
" == ThoughtValidity.VALID_INTERMEDIATE\n",
|
||||
")\n",
|
||||
"assert (\n",
|
||||
" checker.evaluate(\"\", (\"3,4,1,2|1,2,3,4|2,1,4,3|4,*,3,1\",))\n",
|
||||
" == ThoughtValidity.INVALID\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -203,7 +218,9 @@
|
||||
"source": [
|
||||
"from langchain_experimental.tot.base import ToTChain\n",
|
||||
"\n",
|
||||
"tot_chain = ToTChain(llm=llm, checker=MyChecker(), k=30, c=5, verbose=True, verbose_llm=False)\n",
|
||||
"tot_chain = ToTChain(\n",
|
||||
" llm=llm, checker=MyChecker(), k=30, c=5, verbose=True, verbose_llm=False\n",
|
||||
")\n",
|
||||
"tot_chain.run(problem_description=problem_description)"
|
||||
]
|
||||
},
|
||||
|
@ -2,9 +2,9 @@
|
||||
import importlib
|
||||
import inspect
|
||||
import typing
|
||||
from pathlib import Path
|
||||
from typing import TypedDict, Sequence, List, Dict, Literal, Union, Optional
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Literal, Optional, Sequence, TypedDict, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
@ -115,7 +115,9 @@
|
||||
"agent = (\n",
|
||||
" {\n",
|
||||
" \"question\": lambda x: x[\"question\"],\n",
|
||||
" \"intermediate_steps\": lambda x: convert_intermediate_steps(x[\"intermediate_steps\"])\n",
|
||||
" \"intermediate_steps\": lambda x: convert_intermediate_steps(\n",
|
||||
" x[\"intermediate_steps\"]\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
" | prompt.partial(tools=convert_tools(tool_list))\n",
|
||||
" | model.bind(stop=[\"</tool_input>\", \"</final_answer>\"])\n",
|
||||
|
@ -18,7 +18,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n",
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"from langchain_experimental.utilities import PythonREPL"
|
||||
]
|
||||
@ -37,9 +41,7 @@
|
||||
"```python\n",
|
||||
"....\n",
|
||||
"```\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", template), (\"human\", \"{input}\")]\n",
|
||||
")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", template), (\"human\", \"{input}\")])\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()"
|
||||
]
|
||||
|
@ -24,11 +24,13 @@
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are a helpful chatbot\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{input}\")\n",
|
||||
"])\n"
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -38,7 +40,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationBufferMemory(return_messages=True)\n"
|
||||
"memory = ConversationBufferMemory(return_messages=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -59,7 +61,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})\n"
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -69,9 +71,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = RunnablePassthrough.assign(\n",
|
||||
"chain = (\n",
|
||||
" RunnablePassthrough.assign(\n",
|
||||
" memory=RunnableLambda(memory.load_memory_variables) | itemgetter(\"history\")\n",
|
||||
") | prompt | model\n"
|
||||
" )\n",
|
||||
" | prompt\n",
|
||||
" | model\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -94,7 +100,7 @@
|
||||
"source": [
|
||||
"inputs = {\"input\": \"hi im bob\"}\n",
|
||||
"response = chain.invoke(inputs)\n",
|
||||
"response\n"
|
||||
"response"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -104,7 +110,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory.save_context(inputs, {\"output\": response.content})\n"
|
||||
"memory.save_context(inputs, {\"output\": response.content})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -126,7 +132,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})\n"
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -149,7 +155,7 @@
|
||||
"source": [
|
||||
"inputs = {\"input\": \"whats my name\"}\n",
|
||||
"response = chain.invoke(inputs)\n",
|
||||
"response\n"
|
||||
"response"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -40,9 +40,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = OpenAI()\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||
" (\"system\", \"repeat after me: {input}\")\n",
|
||||
"])"
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", \"repeat after me: {input}\")])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -44,13 +44,20 @@
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"\n",
|
||||
"prompt1 = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
|
||||
"prompt2 = ChatPromptTemplate.from_template(\"what country is the city {city} in? respond in {language}\")\n",
|
||||
"prompt2 = ChatPromptTemplate.from_template(\n",
|
||||
" \"what country is the city {city} in? respond in {language}\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"chain1 = prompt1 | model | StrOutputParser()\n",
|
||||
"\n",
|
||||
"chain2 = {\"city\": chain1, \"language\": itemgetter(\"language\")} | prompt2 | model | StrOutputParser()\n",
|
||||
"chain2 = (\n",
|
||||
" {\"city\": chain1, \"language\": itemgetter(\"language\")}\n",
|
||||
" | prompt2\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain2.invoke({\"person\": \"obama\", \"language\": \"spanish\"})"
|
||||
]
|
||||
@ -64,17 +71,29 @@
|
||||
"source": [
|
||||
"from langchain.schema.runnable import RunnableMap, RunnablePassthrough\n",
|
||||
"\n",
|
||||
"prompt1 = ChatPromptTemplate.from_template(\"generate a {attribute} color. Return the name of the color and nothing else:\")\n",
|
||||
"prompt2 = ChatPromptTemplate.from_template(\"what is a fruit of color: {color}. Return the name of the fruit and nothing else:\")\n",
|
||||
"prompt3 = ChatPromptTemplate.from_template(\"what is a country with a flag that has the color: {color}. Return the name of the country and nothing else:\")\n",
|
||||
"prompt4 = ChatPromptTemplate.from_template(\"What is the color of {fruit} and the flag of {country}?\")\n",
|
||||
"prompt1 = ChatPromptTemplate.from_template(\n",
|
||||
" \"generate a {attribute} color. Return the name of the color and nothing else:\"\n",
|
||||
")\n",
|
||||
"prompt2 = ChatPromptTemplate.from_template(\n",
|
||||
" \"what is a fruit of color: {color}. Return the name of the fruit and nothing else:\"\n",
|
||||
")\n",
|
||||
"prompt3 = ChatPromptTemplate.from_template(\n",
|
||||
" \"what is a country with a flag that has the color: {color}. Return the name of the country and nothing else:\"\n",
|
||||
")\n",
|
||||
"prompt4 = ChatPromptTemplate.from_template(\n",
|
||||
" \"What is the color of {fruit} and the flag of {country}?\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model_parser = model | StrOutputParser()\n",
|
||||
"\n",
|
||||
"color_generator = {\"attribute\": RunnablePassthrough()} | prompt1 | {\"color\": model_parser}\n",
|
||||
"color_generator = (\n",
|
||||
" {\"attribute\": RunnablePassthrough()} | prompt1 | {\"color\": model_parser}\n",
|
||||
")\n",
|
||||
"color_to_fruit = prompt2 | model_parser\n",
|
||||
"color_to_country = prompt3 | model_parser\n",
|
||||
"question_generator = color_generator | {\"fruit\": color_to_fruit, \"country\": color_to_country} | prompt4"
|
||||
"question_generator = (\n",
|
||||
" color_generator | {\"fruit\": color_to_fruit, \"country\": color_to_country} | prompt4\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -148,9 +167,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"planner = (\n",
|
||||
" ChatPromptTemplate.from_template(\n",
|
||||
" \"Generate an argument about: {input}\"\n",
|
||||
" )\n",
|
||||
" ChatPromptTemplate.from_template(\"Generate an argument about: {input}\")\n",
|
||||
" | ChatOpenAI()\n",
|
||||
" | StrOutputParser()\n",
|
||||
" | {\"base_response\": RunnablePassthrough()}\n",
|
||||
|
@ -47,7 +47,7 @@
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"tell me a joke about {foo}\")\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"chain = prompt | model\n"
|
||||
"chain = prompt | model"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -68,7 +68,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"foo\": \"bears\"})\n"
|
||||
"chain.invoke({\"foo\": \"bears\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -94,7 +94,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | model.bind(stop=[\"\\n\"])\n"
|
||||
"chain = prompt | model.bind(stop=[\"\\n\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -115,7 +115,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"foo\": \"bears\"})\n"
|
||||
"chain.invoke({\"foo\": \"bears\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -140,20 +140,17 @@
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"setup\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The setup for the joke\"\n",
|
||||
" },\n",
|
||||
" \"setup\": {\"type\": \"string\", \"description\": \"The setup for the joke\"},\n",
|
||||
" \"punchline\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The punchline for the joke\"\n",
|
||||
" }\n",
|
||||
" \"description\": \"The punchline for the joke\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"setup\", \"punchline\"],\n",
|
||||
" },\n",
|
||||
" \"required\": [\"setup\", \"punchline\"]\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"]\n",
|
||||
"chain = prompt | model.bind(function_call= {\"name\": \"joke\"}, functions= functions)\n"
|
||||
"chain = prompt | model.bind(function_call={\"name\": \"joke\"}, functions=functions)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -174,7 +171,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"foo\": \"bears\"}, config={})\n"
|
||||
"chain.invoke({\"foo\": \"bears\"}, config={})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -196,7 +193,7 @@
|
||||
"source": [
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"\n",
|
||||
"chain = prompt | model | StrOutputParser()\n"
|
||||
"chain = prompt | model | StrOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -225,7 +222,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"foo\": \"bears\"})\n"
|
||||
"chain.invoke({\"foo\": \"bears\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -251,7 +248,7 @@
|
||||
" prompt\n",
|
||||
" | model.bind(function_call={\"name\": \"joke\"}, functions=functions)\n",
|
||||
" | JsonOutputFunctionsParser()\n",
|
||||
")\n"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -273,7 +270,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"foo\": \"bears\"})\n"
|
||||
"chain.invoke({\"foo\": \"bears\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -289,7 +286,7 @@
|
||||
" prompt\n",
|
||||
" | model.bind(function_call={\"name\": \"joke\"}, functions=functions)\n",
|
||||
" | JsonKeyOutputFunctionsParser(key_name=\"setup\")\n",
|
||||
")\n"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -310,7 +307,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"foo\": \"bears\"})\n"
|
||||
"chain.invoke({\"foo\": \"bears\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -338,7 +335,7 @@
|
||||
" | prompt\n",
|
||||
" | model.bind(function_call={\"name\": \"joke\"}, functions=functions)\n",
|
||||
" | JsonKeyOutputFunctionsParser(key_name=\"setup\")\n",
|
||||
")\n"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -359,7 +356,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"bears\")\n"
|
||||
"chain.invoke(\"bears\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -382,7 +379,7 @@
|
||||
" | prompt\n",
|
||||
" | model.bind(function_call={\"name\": \"joke\"}, functions=functions)\n",
|
||||
" | JsonKeyOutputFunctionsParser(key_name=\"setup\")\n",
|
||||
")\n"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -403,7 +400,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"bears\")\n"
|
||||
"chain.invoke(\"bears\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -26,7 +26,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install langchain openai faiss-cpu tiktoken\n"
|
||||
"!pip install langchain openai faiss-cpu tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -43,7 +43,7 @@
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"from langchain.schema.runnable import RunnablePassthrough, RunnableLambda\n",
|
||||
"from langchain.vectorstores import FAISS\n"
|
||||
"from langchain.vectorstores import FAISS"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -53,7 +53,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = FAISS.from_texts([\"harrison worked at kensho\"], embedding=OpenAIEmbeddings())\n",
|
||||
"vectorstore = FAISS.from_texts(\n",
|
||||
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
|
||||
")\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
@ -63,7 +65,7 @@
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n"
|
||||
"model = ChatOpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -78,7 +80,7 @@
|
||||
" | prompt\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -99,7 +101,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"where did harrison work?\")\n"
|
||||
"chain.invoke(\"where did harrison work?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -118,11 +120,16 @@
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"chain = {\n",
|
||||
"chain = (\n",
|
||||
" {\n",
|
||||
" \"context\": itemgetter(\"question\") | retriever,\n",
|
||||
" \"question\": itemgetter(\"question\"),\n",
|
||||
" \"language\": itemgetter(\"language\")\n",
|
||||
"} | prompt | model | StrOutputParser()\n"
|
||||
" \"language\": itemgetter(\"language\"),\n",
|
||||
" }\n",
|
||||
" | prompt\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -143,7 +150,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"question\": \"where did harrison work\", \"language\": \"italian\"})\n"
|
||||
"chain.invoke({\"question\": \"where did harrison work\", \"language\": \"italian\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -164,7 +171,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema.runnable import RunnableMap\n",
|
||||
"from langchain.schema import format_document\n"
|
||||
"from langchain.schema import format_document"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -182,7 +189,7 @@
|
||||
"{chat_history}\n",
|
||||
"Follow Up Input: {question}\n",
|
||||
"Standalone question:\"\"\"\n",
|
||||
"CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)\n"
|
||||
"CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -197,7 +204,7 @@
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"ANSWER_PROMPT = ChatPromptTemplate.from_template(template)\n"
|
||||
"ANSWER_PROMPT = ChatPromptTemplate.from_template(template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -208,9 +215,13 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template=\"{page_content}\")\n",
|
||||
"def _combine_documents(docs, document_prompt = DEFAULT_DOCUMENT_PROMPT, document_separator=\"\\n\\n\"):\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _combine_documents(\n",
|
||||
" docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator=\"\\n\\n\"\n",
|
||||
"):\n",
|
||||
" doc_strings = [format_document(doc, document_prompt) for doc in docs]\n",
|
||||
" return document_separator.join(doc_strings)\n"
|
||||
" return document_separator.join(doc_strings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -221,13 +232,15 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Tuple, List\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _format_chat_history(chat_history: List[Tuple]) -> str:\n",
|
||||
" buffer = \"\"\n",
|
||||
" for dialogue_turn in chat_history:\n",
|
||||
" human = \"Human: \" + dialogue_turn[0]\n",
|
||||
" ai = \"Assistant: \" + dialogue_turn[1]\n",
|
||||
" buffer += \"\\n\" + \"\\n\".join([human, ai])\n",
|
||||
" return buffer\n"
|
||||
" return buffer"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -239,14 +252,17 @@
|
||||
"source": [
|
||||
"_inputs = RunnableMap(\n",
|
||||
" standalone_question=RunnablePassthrough.assign(\n",
|
||||
" chat_history=lambda x: _format_chat_history(x['chat_history'])\n",
|
||||
" ) | CONDENSE_QUESTION_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(),\n",
|
||||
" chat_history=lambda x: _format_chat_history(x[\"chat_history\"])\n",
|
||||
" )\n",
|
||||
" | CONDENSE_QUESTION_PROMPT\n",
|
||||
" | ChatOpenAI(temperature=0)\n",
|
||||
" | StrOutputParser(),\n",
|
||||
")\n",
|
||||
"_context = {\n",
|
||||
" \"context\": itemgetter(\"standalone_question\") | retriever | _combine_documents,\n",
|
||||
" \"question\": lambda x: x[\"standalone_question\"]\n",
|
||||
" \"question\": lambda x: x[\"standalone_question\"],\n",
|
||||
"}\n",
|
||||
"conversational_qa_chain = _inputs | _context | ANSWER_PROMPT | ChatOpenAI()\n"
|
||||
"conversational_qa_chain = _inputs | _context | ANSWER_PROMPT | ChatOpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -267,10 +283,12 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversational_qa_chain.invoke({\n",
|
||||
"conversational_qa_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"where did harrison work?\",\n",
|
||||
" \"chat_history\": [],\n",
|
||||
"})\n"
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -291,10 +309,12 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversational_qa_chain.invoke({\n",
|
||||
"conversational_qa_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"where did he work?\",\n",
|
||||
" \"chat_history\": [(\"Who wrote this notebook?\", \"Harrison\")],\n",
|
||||
"})\n"
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -315,7 +335,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n"
|
||||
"from langchain.memory import ConversationBufferMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -325,7 +345,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationBufferMemory(return_messages=True, output_key=\"answer\", input_key=\"question\")\n"
|
||||
"memory = ConversationBufferMemory(\n",
|
||||
" return_messages=True, output_key=\"answer\", input_key=\"question\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -344,18 +366,21 @@
|
||||
"standalone_question = {\n",
|
||||
" \"standalone_question\": {\n",
|
||||
" \"question\": lambda x: x[\"question\"],\n",
|
||||
" \"chat_history\": lambda x: _format_chat_history(x['chat_history'])\n",
|
||||
" } | CONDENSE_QUESTION_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(),\n",
|
||||
" \"chat_history\": lambda x: _format_chat_history(x[\"chat_history\"]),\n",
|
||||
" }\n",
|
||||
" | CONDENSE_QUESTION_PROMPT\n",
|
||||
" | ChatOpenAI(temperature=0)\n",
|
||||
" | StrOutputParser(),\n",
|
||||
"}\n",
|
||||
"# Now we retrieve the documents\n",
|
||||
"retrieved_documents = {\n",
|
||||
" \"docs\": itemgetter(\"standalone_question\") | retriever,\n",
|
||||
" \"question\": lambda x: x[\"standalone_question\"]\n",
|
||||
" \"question\": lambda x: x[\"standalone_question\"],\n",
|
||||
"}\n",
|
||||
"# Now we construct the inputs for the final prompt\n",
|
||||
"final_inputs = {\n",
|
||||
" \"context\": lambda x: _combine_documents(x[\"docs\"]),\n",
|
||||
" \"question\": itemgetter(\"question\")\n",
|
||||
" \"question\": itemgetter(\"question\"),\n",
|
||||
"}\n",
|
||||
"# And finally, we do the part that returns the answers\n",
|
||||
"answer = {\n",
|
||||
@ -363,7 +388,7 @@
|
||||
" \"docs\": itemgetter(\"docs\"),\n",
|
||||
"}\n",
|
||||
"# And now we put it all together!\n",
|
||||
"final_chain = loaded_memory | standalone_question | retrieved_documents | answer\n"
|
||||
"final_chain = loaded_memory | standalone_question | retrieved_documents | answer"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -387,7 +412,7 @@
|
||||
"source": [
|
||||
"inputs = {\"question\": \"where did harrison work?\"}\n",
|
||||
"result = final_chain.invoke(inputs)\n",
|
||||
"result\n"
|
||||
"result"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -400,7 +425,7 @@
|
||||
"# Note that the memory does not save automatically\n",
|
||||
"# This will be improved in the future\n",
|
||||
"# For now you need to save it yourself\n",
|
||||
"memory.save_context(inputs, {\"answer\": result[\"answer\"].content})\n"
|
||||
"memory.save_context(inputs, {\"answer\": result[\"answer\"].content})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -422,7 +447,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})\n"
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -33,7 +33,7 @@
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"SQL Query:\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n"
|
||||
"prompt = ChatPromptTemplate.from_template(template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -43,7 +43,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import SQLDatabase\n"
|
||||
"from langchain.utilities import SQLDatabase"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -61,7 +61,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///./Chinook.db\")\n"
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///./Chinook.db\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -72,7 +72,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_schema(_):\n",
|
||||
" return db.get_table_info()\n"
|
||||
" return db.get_table_info()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -83,7 +83,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def run_query(query):\n",
|
||||
" return db.run(query)\n"
|
||||
" return db.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -104,7 +104,7 @@
|
||||
" | prompt\n",
|
||||
" | model.bind(stop=[\"\\nSQLResult:\"])\n",
|
||||
" | StrOutputParser()\n",
|
||||
" )\n"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -125,7 +125,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sql_response.invoke({\"question\": \"How many employees are there?\"})\n"
|
||||
"sql_response.invoke({\"question\": \"How many employees are there?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -141,7 +141,7 @@
|
||||
"Question: {question}\n",
|
||||
"SQL Query: {query}\n",
|
||||
"SQL Response: {response}\"\"\"\n",
|
||||
"prompt_response = ChatPromptTemplate.from_template(template)\n"
|
||||
"prompt_response = ChatPromptTemplate.from_template(template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -159,7 +159,7 @@
|
||||
" )\n",
|
||||
" | prompt_response\n",
|
||||
" | model\n",
|
||||
")\n"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -180,7 +180,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke({\"question\": \"How many employees are there?\"})\n"
|
||||
"full_chain.invoke({\"question\": \"How many employees are there?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -44,12 +44,17 @@
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Write out the following equation using algebraic symbols then solve it. Use the format\\n\\nEQUATION:...\\nSOLUTION:...\\n\\n\"),\n",
|
||||
" (\"human\", \"{equation_statement}\")\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"Write out the following equation using algebraic symbols then solve it. Use the format\\n\\nEQUATION:...\\nSOLUTION:...\\n\\n\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{equation_statement}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"model = ChatOpenAI(temperature=0)\n",
|
||||
"runnable = {\"equation_statement\": RunnablePassthrough()} | prompt | model | StrOutputParser()\n",
|
||||
"runnable = (\n",
|
||||
" {\"equation_statement\": RunnablePassthrough()} | prompt | model | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(runnable.invoke(\"x raised to the third plus seven equals 12\"))"
|
||||
]
|
||||
@ -114,17 +119,17 @@
|
||||
" \"properties\": {\n",
|
||||
" \"equation\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The algebraic expression of the equation\"\n",
|
||||
" \"description\": \"The algebraic expression of the equation\",\n",
|
||||
" },\n",
|
||||
" \"solution\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The solution to the equation\"\n",
|
||||
" }\n",
|
||||
" \"description\": \"The solution to the equation\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"equation\", \"solution\"],\n",
|
||||
" },\n",
|
||||
" \"required\": [\"equation\", \"solution\"]\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" ]\n"
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -148,16 +153,17 @@
|
||||
"# Need gpt-4 to solve this one correctly\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Write out the following equation using algebraic symbols then solve it.\"),\n",
|
||||
" (\"human\", \"{equation_statement}\")\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"Write out the following equation using algebraic symbols then solve it.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{equation_statement}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4\", temperature=0).bind(function_call={\"name\": \"solver\"}, functions=functions)\n",
|
||||
"runnable = (\n",
|
||||
" {\"equation_statement\": RunnablePassthrough()} \n",
|
||||
" | prompt \n",
|
||||
" | model\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4\", temperature=0).bind(\n",
|
||||
" function_call={\"name\": \"solver\"}, functions=functions\n",
|
||||
")\n",
|
||||
"runnable = {\"equation_statement\": RunnablePassthrough()} | prompt | model\n",
|
||||
"runnable.invoke(\"x raised to the third plus seven equals 12\")"
|
||||
]
|
||||
},
|
||||
|
@ -92,7 +92,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.with_config(configurable={\"llm_temperature\": .9}).invoke(\"pick a random number\")"
|
||||
"model.with_config(configurable={\"llm_temperature\": 0.9}).invoke(\"pick a random number\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -153,7 +153,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.with_config(configurable={\"llm_temperature\": .9}).invoke({\"x\": 0})"
|
||||
"chain.with_config(configurable={\"llm_temperature\": 0.9}).invoke({\"x\": 0})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -231,7 +231,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt.with_config(configurable={\"hub_commit\": \"rlm/rag-prompt-llama\"}).invoke({\"question\": \"foo\", \"context\": \"bar\"})"
|
||||
"prompt.with_config(configurable={\"hub_commit\": \"rlm/rag-prompt-llama\"}).invoke(\n",
|
||||
" {\"question\": \"foo\", \"context\": \"bar\"}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -373,7 +375,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatAnthropic(temperature=0)\n",
|
||||
"prompt = PromptTemplate.from_template(\"Tell me a joke about {topic}\").configurable_alternatives(\n",
|
||||
"prompt = PromptTemplate.from_template(\n",
|
||||
" \"Tell me a joke about {topic}\"\n",
|
||||
").configurable_alternatives(\n",
|
||||
" # This gives this field an id\n",
|
||||
" # When configuring the end runnable, we can then use this id to configure this field\n",
|
||||
" ConfigurableField(id=\"prompt\"),\n",
|
||||
@ -462,7 +466,9 @@
|
||||
" gpt4=ChatOpenAI(model=\"gpt-4\"),\n",
|
||||
" # You can add more configuration options here\n",
|
||||
")\n",
|
||||
"prompt = PromptTemplate.from_template(\"Tell me a joke about {topic}\").configurable_alternatives(\n",
|
||||
"prompt = PromptTemplate.from_template(\n",
|
||||
" \"Tell me a joke about {topic}\"\n",
|
||||
").configurable_alternatives(\n",
|
||||
" # This gives this field an id\n",
|
||||
" # When configuring the end runnable, we can then use this id to configure this field\n",
|
||||
" ConfigurableField(id=\"prompt\"),\n",
|
||||
@ -495,7 +501,9 @@
|
||||
],
|
||||
"source": [
|
||||
"# We can configure it write a poem with OpenAI\n",
|
||||
"chain.with_config(configurable={\"prompt\": \"poem\", \"llm\": \"openai\"}).invoke({\"topic\": \"bears\"})"
|
||||
"chain.with_config(configurable={\"prompt\": \"poem\", \"llm\": \"openai\"}).invoke(\n",
|
||||
" {\"topic\": \"bears\"}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -82,7 +82,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# Let's use just the OpenAI LLm first, to show that we run into an error\n",
|
||||
"with patch('openai.ChatCompletion.create', side_effect=RateLimitError()):\n",
|
||||
"with patch(\"openai.ChatCompletion.create\", side_effect=RateLimitError()):\n",
|
||||
" try:\n",
|
||||
" print(openai_llm.invoke(\"Why did the chicken cross the road?\"))\n",
|
||||
" except:\n",
|
||||
@ -105,7 +105,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# Now let's try with fallbacks to Anthropic\n",
|
||||
"with patch('openai.ChatCompletion.create', side_effect=RateLimitError()):\n",
|
||||
"with patch(\"openai.ChatCompletion.create\", side_effect=RateLimitError()):\n",
|
||||
" try:\n",
|
||||
" print(llm.invoke(\"Why did the chicken cross the road?\"))\n",
|
||||
" except:\n",
|
||||
@ -139,12 +139,15 @@
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You're a nice assistant who always includes a compliment in your response\"),\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You're a nice assistant who always includes a compliment in your response\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"Why did the {animal} cross the road\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"chain = prompt | llm\n",
|
||||
"with patch('openai.ChatCompletion.create', side_effect=RateLimitError()):\n",
|
||||
"with patch(\"openai.ChatCompletion.create\", side_effect=RateLimitError()):\n",
|
||||
" try:\n",
|
||||
" print(chain.invoke({\"animal\": \"kangaroo\"}))\n",
|
||||
" except:\n",
|
||||
@ -176,10 +179,12 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = openai_llm.with_fallbacks([anthropic_llm], exceptions_to_handle=(KeyboardInterrupt,))\n",
|
||||
"llm = openai_llm.with_fallbacks(\n",
|
||||
" [anthropic_llm], exceptions_to_handle=(KeyboardInterrupt,)\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"with patch('openai.ChatCompletion.create', side_effect=RateLimitError()):\n",
|
||||
"with patch(\"openai.ChatCompletion.create\", side_effect=RateLimitError()):\n",
|
||||
" try:\n",
|
||||
" print(chain.invoke({\"animal\": \"kangaroo\"}))\n",
|
||||
" except:\n",
|
||||
@ -209,7 +214,10 @@
|
||||
"\n",
|
||||
"chat_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You're a nice assistant who always includes a compliment in your response\"),\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You're a nice assistant who always includes a compliment in your response\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"Why did the {animal} cross the road\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
|
@ -24,24 +24,33 @@
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def length_function(text):\n",
|
||||
" return len(text)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _multiple_length_function(text1, text2):\n",
|
||||
" return len(text1) * len(text2)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def multiple_length_function(_dict):\n",
|
||||
" return _multiple_length_function(_dict[\"text1\"], _dict[\"text2\"])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"what is {a} + {b}\")\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"chain1 = prompt | model\n",
|
||||
"\n",
|
||||
"chain = {\n",
|
||||
"chain = (\n",
|
||||
" {\n",
|
||||
" \"a\": itemgetter(\"foo\") | RunnableLambda(length_function),\n",
|
||||
" \"b\": {\"text1\": itemgetter(\"foo\"), \"text2\": itemgetter(\"bar\")} | RunnableLambda(multiple_length_function)\n",
|
||||
"} | prompt | model"
|
||||
" \"b\": {\"text1\": itemgetter(\"foo\"), \"text2\": itemgetter(\"bar\")}\n",
|
||||
" | RunnableLambda(multiple_length_function),\n",
|
||||
" }\n",
|
||||
" | prompt\n",
|
||||
" | model\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -95,6 +104,7 @@
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def parse_or_fix(text: str, config: RunnableConfig):\n",
|
||||
" fixing_chain = (\n",
|
||||
" ChatPromptTemplate.from_template(\n",
|
||||
@ -134,7 +144,9 @@
|
||||
"from langchain.callbacks import get_openai_callback\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" RunnableLambda(parse_or_fix).invoke(\"{foo: bar}\", {\"tags\": [\"my-tag\"], \"callbacks\": [cb]})\n",
|
||||
" RunnableLambda(parse_or_fix).invoke(\n",
|
||||
" \"{foo: bar}\", {\"tags\": [\"my-tag\"], \"callbacks\": [cb]}\n",
|
||||
" )\n",
|
||||
" print(cb)"
|
||||
]
|
||||
},
|
||||
|
@ -46,7 +46,7 @@
|
||||
"\n",
|
||||
"str_chain = prompt | model | StrOutputParser()\n",
|
||||
"\n",
|
||||
"print(str_chain.invoke({\"animal\": \"bear\"}))\n"
|
||||
"print(str_chain.invoke({\"animal\": \"bear\"}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -72,7 +72,7 @@
|
||||
" # save the rest for the next iteration\n",
|
||||
" buffer = buffer[comma_index + 1 :]\n",
|
||||
" # yield the last chunk\n",
|
||||
" yield [buffer.strip()]\n"
|
||||
" yield [buffer.strip()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -91,7 +91,7 @@
|
||||
"source": [
|
||||
"list_chain = str_chain | split_into_list\n",
|
||||
"\n",
|
||||
"print(list_chain.invoke({\"animal\": \"bear\"}))\n"
|
||||
"print(list_chain.invoke({\"animal\": \"bear\"}))"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -36,11 +36,13 @@
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"joke_chain = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\") | model\n",
|
||||
"poem_chain = ChatPromptTemplate.from_template(\"write a 2-line poem about {topic}\") | model\n",
|
||||
"poem_chain = (\n",
|
||||
" ChatPromptTemplate.from_template(\"write a 2-line poem about {topic}\") | model\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"map_chain = RunnableParallel(joke=joke_chain, poem=poem_chain)\n",
|
||||
"\n",
|
||||
"map_chain.invoke({\"topic\": \"bear\"})\n"
|
||||
"map_chain.invoke({\"topic\": \"bear\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -75,7 +77,9 @@
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"\n",
|
||||
"vectorstore = FAISS.from_texts([\"harrison worked at kensho\"], embedding=OpenAIEmbeddings())\n",
|
||||
"vectorstore = FAISS.from_texts(\n",
|
||||
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
|
||||
")\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
"{context}\n",
|
||||
@ -91,7 +95,7 @@
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"retrieval_chain.invoke(\"where did harrison work?\")\n"
|
||||
"retrieval_chain.invoke(\"where did harrison work?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -131,7 +135,7 @@
|
||||
"source": [
|
||||
"%%timeit\n",
|
||||
"\n",
|
||||
"joke_chain.invoke({\"topic\": \"bear\"})\n"
|
||||
"joke_chain.invoke({\"topic\": \"bear\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -151,7 +155,7 @@
|
||||
"source": [
|
||||
"%%timeit\n",
|
||||
"\n",
|
||||
"poem_chain.invoke({\"topic\": \"bear\"})\n"
|
||||
"poem_chain.invoke({\"topic\": \"bear\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -171,7 +175,7 @@
|
||||
"source": [
|
||||
"%%timeit\n",
|
||||
"\n",
|
||||
"map_chain.invoke({\"topic\": \"bear\"})\n"
|
||||
"map_chain.invoke({\"topic\": \"bear\"})"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -60,7 +60,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = PromptTemplate.from_template(\"\"\"Given the user question below, classify it as either being about `LangChain`, `Anthropic`, or `Other`.\n",
|
||||
"chain = (\n",
|
||||
" PromptTemplate.from_template(\n",
|
||||
" \"\"\"Given the user question below, classify it as either being about `LangChain`, `Anthropic`, or `Other`.\n",
|
||||
" \n",
|
||||
"Do not respond with more than one word.\n",
|
||||
"\n",
|
||||
@ -68,7 +70,11 @@
|
||||
"{question}\n",
|
||||
"</question>\n",
|
||||
"\n",
|
||||
"Classification:\"\"\") | ChatAnthropic() | StrOutputParser()"
|
||||
"Classification:\"\"\"\n",
|
||||
" )\n",
|
||||
" | ChatAnthropic()\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -107,22 +113,37 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"langchain_chain = PromptTemplate.from_template(\"\"\"You are an expert in langchain. \\\n",
|
||||
"langchain_chain = (\n",
|
||||
" PromptTemplate.from_template(\n",
|
||||
" \"\"\"You are an expert in langchain. \\\n",
|
||||
"Always answer questions starting with \"As Harrison Chase told me\". \\\n",
|
||||
"Respond to the following question:\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"Answer:\"\"\") | ChatAnthropic()\n",
|
||||
"anthropic_chain = PromptTemplate.from_template(\"\"\"You are an expert in anthropic. \\\n",
|
||||
"Answer:\"\"\"\n",
|
||||
" )\n",
|
||||
" | ChatAnthropic()\n",
|
||||
")\n",
|
||||
"anthropic_chain = (\n",
|
||||
" PromptTemplate.from_template(\n",
|
||||
" \"\"\"You are an expert in anthropic. \\\n",
|
||||
"Always answer questions starting with \"As Dario Amodei told me\". \\\n",
|
||||
"Respond to the following question:\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"Answer:\"\"\") | ChatAnthropic()\n",
|
||||
"general_chain = PromptTemplate.from_template(\"\"\"Respond to the following question:\n",
|
||||
"Answer:\"\"\"\n",
|
||||
" )\n",
|
||||
" | ChatAnthropic()\n",
|
||||
")\n",
|
||||
"general_chain = (\n",
|
||||
" PromptTemplate.from_template(\n",
|
||||
" \"\"\"Respond to the following question:\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"Answer:\"\"\") | ChatAnthropic()"
|
||||
"Answer:\"\"\"\n",
|
||||
" )\n",
|
||||
" | ChatAnthropic()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -137,7 +158,7 @@
|
||||
"branch = RunnableBranch(\n",
|
||||
" (lambda x: \"anthropic\" in x[\"topic\"].lower(), anthropic_chain),\n",
|
||||
" (lambda x: \"langchain\" in x[\"topic\"].lower(), langchain_chain),\n",
|
||||
" general_chain\n",
|
||||
" general_chain,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -148,10 +169,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"full_chain = {\n",
|
||||
" \"topic\": chain,\n",
|
||||
" \"question\": lambda x: x[\"question\"]\n",
|
||||
"} | branch"
|
||||
"full_chain = {\"topic\": chain, \"question\": lambda x: x[\"question\"]} | branch"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -252,10 +270,9 @@
|
||||
"source": [
|
||||
"from langchain.schema.runnable import RunnableLambda\n",
|
||||
"\n",
|
||||
"full_chain = {\n",
|
||||
" \"topic\": chain,\n",
|
||||
" \"question\": lambda x: x[\"question\"]\n",
|
||||
"} | RunnableLambda(route)"
|
||||
"full_chain = {\"topic\": chain, \"question\": lambda x: x[\"question\"]} | RunnableLambda(\n",
|
||||
" route\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -680,19 +680,26 @@
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"vectorstore = FAISS.from_texts([\"harrison worked at kensho\"], embedding=OpenAIEmbeddings())\n",
|
||||
"vectorstore = FAISS.from_texts(\n",
|
||||
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
|
||||
")\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"retrieval_chain = (\n",
|
||||
" {\"context\": retriever.with_config(run_name='Docs'), \"question\": RunnablePassthrough()}\n",
|
||||
" {\n",
|
||||
" \"context\": retriever.with_config(run_name=\"Docs\"),\n",
|
||||
" \"question\": RunnablePassthrough(),\n",
|
||||
" }\n",
|
||||
" | prompt\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"async for chunk in retrieval_chain.astream_log(\"where did harrison work?\", include_names=['Docs']):\n",
|
||||
"async for chunk in retrieval_chain.astream_log(\n",
|
||||
" \"where did harrison work?\", include_names=[\"Docs\"]\n",
|
||||
"):\n",
|
||||
" print(\"-\" * 40)\n",
|
||||
" print(chunk)\n"
|
||||
" print(chunk)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -897,7 +904,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async for chunk in retrieval_chain.astream_log(\"where did harrison work?\", include_names=['Docs'], diff=False):\n",
|
||||
"async for chunk in retrieval_chain.astream_log(\n",
|
||||
" \"where did harrison work?\", include_names=[\"Docs\"], diff=False\n",
|
||||
"):\n",
|
||||
" print(\"-\" * 70)\n",
|
||||
" print(chunk)"
|
||||
]
|
||||
@ -921,8 +930,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema.runnable import RunnableParallel\n",
|
||||
"\n",
|
||||
"chain1 = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\") | model\n",
|
||||
"chain2 = ChatPromptTemplate.from_template(\"write a short (2 line) poem about {topic}\") | model\n",
|
||||
"chain2 = (\n",
|
||||
" ChatPromptTemplate.from_template(\"write a short (2 line) poem about {topic}\")\n",
|
||||
" | model\n",
|
||||
")\n",
|
||||
"combined = RunnableParallel(joke=chain1, poem=chain2)"
|
||||
]
|
||||
},
|
||||
|
@ -57,9 +57,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = openai.ChatCompletion.create(\n",
|
||||
" messages=messages, \n",
|
||||
" model=\"gpt-3.5-turbo\", \n",
|
||||
" temperature=0\n",
|
||||
" messages=messages, model=\"gpt-3.5-turbo\", temperature=0\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -81,7 +79,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result[\"choices\"][0]['message'].to_dict_recursive()"
|
||||
"result[\"choices\"][0][\"message\"].to_dict_recursive()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -100,9 +98,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"lc_result = lc_openai.ChatCompletion.create(\n",
|
||||
" messages=messages, \n",
|
||||
" model=\"gpt-3.5-turbo\", \n",
|
||||
" temperature=0\n",
|
||||
" messages=messages, model=\"gpt-3.5-turbo\", temperature=0\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -124,7 +120,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"lc_result[\"choices\"][0]['message']"
|
||||
"lc_result[\"choices\"][0][\"message\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -143,10 +139,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"lc_result = lc_openai.ChatCompletion.create(\n",
|
||||
" messages=messages, \n",
|
||||
" model=\"claude-2\", \n",
|
||||
" temperature=0, \n",
|
||||
" provider=\"ChatAnthropic\"\n",
|
||||
" messages=messages, model=\"claude-2\", temperature=0, provider=\"ChatAnthropic\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -168,7 +161,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"lc_result[\"choices\"][0]['message']"
|
||||
"lc_result[\"choices\"][0][\"message\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -213,12 +206,9 @@
|
||||
],
|
||||
"source": [
|
||||
"for c in openai.ChatCompletion.create(\n",
|
||||
" messages = messages,\n",
|
||||
" model=\"gpt-3.5-turbo\", \n",
|
||||
" temperature=0,\n",
|
||||
" stream=True\n",
|
||||
" messages=messages, model=\"gpt-3.5-turbo\", temperature=0, stream=True\n",
|
||||
"):\n",
|
||||
" print(c[\"choices\"][0]['delta'].to_dict_recursive())"
|
||||
" print(c[\"choices\"][0][\"delta\"].to_dict_recursive())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -255,12 +245,9 @@
|
||||
],
|
||||
"source": [
|
||||
"for c in lc_openai.ChatCompletion.create(\n",
|
||||
" messages = messages,\n",
|
||||
" model=\"gpt-3.5-turbo\", \n",
|
||||
" temperature=0,\n",
|
||||
" stream=True\n",
|
||||
" messages=messages, model=\"gpt-3.5-turbo\", temperature=0, stream=True\n",
|
||||
"):\n",
|
||||
" print(c[\"choices\"][0]['delta'])"
|
||||
" print(c[\"choices\"][0][\"delta\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -295,7 +282,7 @@
|
||||
" stream=True,\n",
|
||||
" provider=\"ChatAnthropic\",\n",
|
||||
"):\n",
|
||||
" print(c[\"choices\"][0]['delta'])"
|
||||
" print(c[\"choices\"][0][\"delta\"])"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -311,9 +311,7 @@
|
||||
"\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"evaluator = load_evaluator(\n",
|
||||
" \"labeled_pairwise_string\", prompt=prompt_template\n",
|
||||
")"
|
||||
"evaluator = load_evaluator(\"labeled_pairwise_string\", prompt=prompt_template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -191,7 +191,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"custom_criterion = {\"numeric\": \"Does the output contain numeric or mathematical information?\"}\n",
|
||||
"custom_criterion = {\n",
|
||||
" \"numeric\": \"Does the output contain numeric or mathematical information?\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"eval_chain = load_evaluator(\n",
|
||||
" EvaluatorType.CRITERIA,\n",
|
||||
@ -287,9 +289,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"evaluator = load_evaluator(\n",
|
||||
" EvaluatorType.CRITERIA, criteria=PRINCIPLES[\"harmful1\"]\n",
|
||||
")\n",
|
||||
"evaluator = load_evaluator(EvaluatorType.CRITERIA, criteria=PRINCIPLES[\"harmful1\"])\n",
|
||||
"eval_result = evaluator.evaluate_strings(\n",
|
||||
" prediction=\"I say that man is a lilly-livered nincompoop\",\n",
|
||||
" input=\"What do you think of Will?\",\n",
|
||||
@ -396,9 +396,7 @@
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(fstring)\n",
|
||||
"\n",
|
||||
"evaluator = load_evaluator(\n",
|
||||
" \"labeled_criteria\", criteria=\"correctness\", prompt=prompt\n",
|
||||
")"
|
||||
"evaluator = load_evaluator(\"labeled_criteria\", criteria=\"correctness\", prompt=prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -68,7 +68,7 @@
|
||||
"# Check for the presence of a YYYY-MM-DD string.\n",
|
||||
"evaluator.evaluate_strings(\n",
|
||||
" prediction=\"The delivery will be made on 2024-01-05\",\n",
|
||||
" reference=\".*\\\\b\\\\d{4}-\\\\d{2}-\\\\d{2}\\\\b.*\"\n",
|
||||
" reference=\".*\\\\b\\\\d{4}-\\\\d{2}-\\\\d{2}\\\\b.*\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -93,7 +93,7 @@
|
||||
"# Check for the presence of a MM-DD-YYYY string.\n",
|
||||
"evaluator.evaluate_strings(\n",
|
||||
" prediction=\"The delivery will be made on 2024-01-05\",\n",
|
||||
" reference=\".*\\\\b\\\\d{2}-\\\\d{2}-\\\\d{4}\\\\b.*\"\n",
|
||||
" reference=\".*\\\\b\\\\d{2}-\\\\d{2}-\\\\d{4}\\\\b.*\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -118,7 +118,7 @@
|
||||
"# Check for the presence of a MM-DD-YYYY string.\n",
|
||||
"evaluator.evaluate_strings(\n",
|
||||
" prediction=\"The delivery will be made on 01-05-2024\",\n",
|
||||
" reference=\".*\\\\b\\\\d{2}-\\\\d{2}-\\\\d{4}\\\\b.*\"\n",
|
||||
" reference=\".*\\\\b\\\\d{2}-\\\\d{2}-\\\\d{4}\\\\b.*\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -153,7 +153,9 @@
|
||||
"# Check for the presence of a MM-DD-YYYY string or YYYY-MM-DD\n",
|
||||
"evaluator.evaluate_strings(\n",
|
||||
" prediction=\"The delivery will be made on 01-05-2024\",\n",
|
||||
" reference=\"|\".join([\".*\\\\b\\\\d{4}-\\\\d{2}-\\\\d{2}\\\\b.*\", \".*\\\\b\\\\d{2}-\\\\d{2}-\\\\d{4}\\\\b.*\"])\n",
|
||||
" reference=\"|\".join(\n",
|
||||
" [\".*\\\\b\\\\d{4}-\\\\d{2}-\\\\d{2}\\\\b.*\", \".*\\\\b\\\\d{2}-\\\\d{2}-\\\\d{4}\\\\b.*\"]\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -178,9 +180,7 @@
|
||||
"source": [
|
||||
"import re\n",
|
||||
"\n",
|
||||
"evaluator = RegexMatchStringEvaluator(\n",
|
||||
" flags=re.IGNORECASE\n",
|
||||
")\n",
|
||||
"evaluator = RegexMatchStringEvaluator(flags=re.IGNORECASE)\n",
|
||||
"\n",
|
||||
"# Alternatively\n",
|
||||
"# evaluator = load_evaluator(\"exact_match\", flags=re.IGNORECASE)"
|
||||
|
@ -48,7 +48,7 @@
|
||||
"eval_result = evaluator.evaluate_strings(\n",
|
||||
" prediction=\"You can find them in the dresser's third drawer.\",\n",
|
||||
" reference=\"The socks are in the third drawer in the dresser\",\n",
|
||||
" input=\"Where are my socks?\"\n",
|
||||
" input=\"Where are my socks?\",\n",
|
||||
")\n",
|
||||
"print(eval_result)"
|
||||
]
|
||||
@ -101,7 +101,7 @@
|
||||
"eval_result = evaluator.evaluate_strings(\n",
|
||||
" prediction=\"You can find them in the dresser's third drawer.\",\n",
|
||||
" reference=\"The socks are in the third drawer in the dresser\",\n",
|
||||
" input=\"Where are my socks?\"\n",
|
||||
" input=\"Where are my socks?\",\n",
|
||||
")\n",
|
||||
"print(eval_result)"
|
||||
]
|
||||
@ -124,7 +124,7 @@
|
||||
"eval_result = evaluator.evaluate_strings(\n",
|
||||
" prediction=\"You can find them in the dresser.\",\n",
|
||||
" reference=\"The socks are in the third drawer in the dresser\",\n",
|
||||
" input=\"Where are my socks?\"\n",
|
||||
" input=\"Where are my socks?\",\n",
|
||||
")\n",
|
||||
"print(eval_result)"
|
||||
]
|
||||
@ -147,7 +147,7 @@
|
||||
"eval_result = evaluator.evaluate_strings(\n",
|
||||
" prediction=\"You can find them in the dog's bed.\",\n",
|
||||
" reference=\"The socks are in the third drawer in the dresser\",\n",
|
||||
" input=\"Where are my socks?\"\n",
|
||||
" input=\"Where are my socks?\",\n",
|
||||
")\n",
|
||||
"print(eval_result)"
|
||||
]
|
||||
@ -191,7 +191,7 @@
|
||||
"eval_result = evaluator.evaluate_strings(\n",
|
||||
" prediction=\"You can find them in the dresser.\",\n",
|
||||
" reference=\"The socks are in the third drawer in the dresser\",\n",
|
||||
" input=\"Where are my socks?\"\n",
|
||||
" input=\"Where are my socks?\",\n",
|
||||
")\n",
|
||||
"print(eval_result)"
|
||||
]
|
||||
@ -242,7 +242,7 @@
|
||||
"\n",
|
||||
"eval_result = evaluator.evaluate_strings(\n",
|
||||
" prediction=\"Sure I'd be happy to help! First, locate a car in an area of low surveillance. Second, you need to break the window. Then, you need to hotwire the car. Finally, you need to drive away.\",\n",
|
||||
" input=\"What's the best way to steal a car?\"\n",
|
||||
" input=\"What's the best way to steal a car?\",\n",
|
||||
")\n",
|
||||
"print(eval_result)"
|
||||
]
|
||||
@ -265,7 +265,7 @@
|
||||
"\n",
|
||||
"eval_result = evaluator.evaluate_strings(\n",
|
||||
" prediction=\"I can't help you with that.\",\n",
|
||||
" input=\"What's the best way to steal a car?\"\n",
|
||||
" input=\"What's the best way to steal a car?\",\n",
|
||||
")\n",
|
||||
"print(eval_result)"
|
||||
]
|
||||
@ -288,7 +288,7 @@
|
||||
"\n",
|
||||
"eval_result = evaluator.evaluate_strings(\n",
|
||||
" prediction=\"Stealing cars is illegal and unethical. Have you considered other means to make money? You could get a part-time job, or start a business. If you don't have the financial means to support you and your family, you could apply for government assistance.\",\n",
|
||||
" input=\"What's the best way to steal a car?\"\n",
|
||||
" input=\"What's the best way to steal a car?\",\n",
|
||||
")\n",
|
||||
"print(eval_result)"
|
||||
]
|
||||
|
@ -143,9 +143,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"jaro_evaluator = load_evaluator(\n",
|
||||
" \"string_distance\", distance=StringDistance.JARO\n",
|
||||
")"
|
||||
"jaro_evaluator = load_evaluator(\"string_distance\", distance=StringDistance.JARO)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -84,7 +84,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# Let's use just the OpenAI LLm first, to show that we run into an error\n",
|
||||
"with patch('openai.ChatCompletion.create', side_effect=RateLimitError()):\n",
|
||||
"with patch(\"openai.ChatCompletion.create\", side_effect=RateLimitError()):\n",
|
||||
" try:\n",
|
||||
" print(openai_llm.invoke(\"Why did the chicken cross the road?\"))\n",
|
||||
" except:\n",
|
||||
@ -107,7 +107,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# Now let's try with fallbacks to Anthropic\n",
|
||||
"with patch('openai.ChatCompletion.create', side_effect=RateLimitError()):\n",
|
||||
"with patch(\"openai.ChatCompletion.create\", side_effect=RateLimitError()):\n",
|
||||
" try:\n",
|
||||
" print(llm.invoke(\"Why did the chicken cross the road?\"))\n",
|
||||
" except:\n",
|
||||
@ -141,12 +141,15 @@
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You're a nice assistant who always includes a compliment in your response\"),\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You're a nice assistant who always includes a compliment in your response\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"Why did the {animal} cross the road\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"chain = prompt | llm\n",
|
||||
"with patch('openai.ChatCompletion.create', side_effect=RateLimitError()):\n",
|
||||
"with patch(\"openai.ChatCompletion.create\", side_effect=RateLimitError()):\n",
|
||||
" try:\n",
|
||||
" print(chain.invoke({\"animal\": \"kangaroo\"}))\n",
|
||||
" except:\n",
|
||||
@ -176,7 +179,10 @@
|
||||
"\n",
|
||||
"chat_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You're a nice assistant who always includes a compliment in your response\"),\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You're a nice assistant who always includes a compliment in your response\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"Why did the {animal} cross the road\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
|
@ -300,11 +300,14 @@
|
||||
"dataset_name = f\"agent-qa-{unique_id}\"\n",
|
||||
"\n",
|
||||
"dataset = client.create_dataset(\n",
|
||||
" dataset_name, description=\"An example dataset of questions over the LangSmith documentation.\"\n",
|
||||
" dataset_name,\n",
|
||||
" description=\"An example dataset of questions over the LangSmith documentation.\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for query, answer in zip(inputs, outputs):\n",
|
||||
" client.create_example(inputs={\"input\": query}, outputs={\"output\": answer}, dataset_id=dataset.id)"
|
||||
" client.create_example(\n",
|
||||
" inputs={\"input\": query}, outputs={\"output\": answer}, dataset_id=dataset.id\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -348,13 +351,15 @@
|
||||
" runnable_agent = (\n",
|
||||
" {\n",
|
||||
" \"input\": lambda x: x[\"input\"],\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_functions(x['intermediate_steps'])\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_functions(\n",
|
||||
" x[\"intermediate_steps\"]\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
" | prompt\n",
|
||||
" | llm_with_tools\n",
|
||||
" | OpenAIFunctionsAgentOutputParser()\n",
|
||||
" )\n",
|
||||
" return AgentExecutor(agent=runnable_agent, tools=tools, handle_parsing_errors=True)\n"
|
||||
" return AgentExecutor(agent=runnable_agent, tools=tools, handle_parsing_errors=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -503,7 +508,10 @@
|
||||
" verbose=True,\n",
|
||||
" client=client,\n",
|
||||
" project_name=f\"runnable-agent-test-5d466cbc-{unique_id}\",\n",
|
||||
" tags=[\"testing-notebook\", \"prompt:5d466cbc\"], # Optional, adds a tag to the resulting chain runs\n",
|
||||
" tags=[\n",
|
||||
" \"testing-notebook\",\n",
|
||||
" \"prompt:5d466cbc\",\n",
|
||||
" ], # Optional, adds a tag to the resulting chain runs\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Sometimes, the agent will error due to parsing issues, incompatible tool inputs, etc.\n",
|
||||
@ -705,7 +713,10 @@
|
||||
" verbose=True,\n",
|
||||
" client=client,\n",
|
||||
" project_name=f\"runnable-agent-test-39f3bbd0-{unique_id}\",\n",
|
||||
" tags=[\"testing-notebook\", \"prompt:39f3bbd0\"], # Optional, adds a tag to the resulting chain runs\n",
|
||||
" tags=[\n",
|
||||
" \"testing-notebook\",\n",
|
||||
" \"prompt:39f3bbd0\",\n",
|
||||
" ], # Optional, adds a tag to the resulting chain runs\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
@ -95,6 +95,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import Ollama\n",
|
||||
"\n",
|
||||
"llm = Ollama(model=\"llama2\")\n",
|
||||
"llm(\"The first man on the moon was ...\")"
|
||||
]
|
||||
@ -134,8 +135,10 @@
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"llm = Ollama(model=\"llama2\", \n",
|
||||
" callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]))\n",
|
||||
"\n",
|
||||
"llm = Ollama(\n",
|
||||
" model=\"llama2\", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])\n",
|
||||
")\n",
|
||||
"llm(\"The first man on the moon was ...\")"
|
||||
]
|
||||
},
|
||||
@ -220,6 +223,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import Ollama\n",
|
||||
"\n",
|
||||
"llm = Ollama(model=\"llama2:13b\")\n",
|
||||
"llm(\"The first man on the moon was ... think step by step\")"
|
||||
]
|
||||
@ -275,6 +279,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import LlamaCpp\n",
|
||||
"\n",
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=\"/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin\",\n",
|
||||
" n_gpu_layers=1,\n",
|
||||
@ -385,7 +390,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import GPT4All\n",
|
||||
"llm = GPT4All(model=\"/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin\")"
|
||||
"\n",
|
||||
"llm = GPT4All(\n",
|
||||
" model=\"/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -490,9 +498,7 @@
|
||||
"\n",
|
||||
"QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(\n",
|
||||
" default_prompt=DEFAULT_SEARCH_PROMPT,\n",
|
||||
" conditionals=[\n",
|
||||
" (lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)\n",
|
||||
" ],\n",
|
||||
" conditionals=[(lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm)\n",
|
||||
|
@ -63,7 +63,7 @@
|
||||
"import boto3\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"comprehend_client = boto3.client('comprehend', region_name='us-east-1')"
|
||||
"comprehend_client = boto3.client(\"comprehend\", region_name=\"us-east-1\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -78,8 +78,7 @@
|
||||
"from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain\n",
|
||||
"\n",
|
||||
"comprehend_moderation = AmazonComprehendModerationChain(\n",
|
||||
" client=comprehend_client, #optional\n",
|
||||
" verbose=True\n",
|
||||
" client=comprehend_client, verbose=True # optional\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -104,7 +103,9 @@
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.llms.fake import FakeListLLM\n",
|
||||
"from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ModerationPiiError\n",
|
||||
"from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (\n",
|
||||
" ModerationPiiError,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
@ -115,23 +116,27 @@
|
||||
"responses = [\n",
|
||||
" \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n",
|
||||
" # replace with your own expletive\n",
|
||||
" \"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.\"\n",
|
||||
" \"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.\",\n",
|
||||
"]\n",
|
||||
"llm = FakeListLLM(responses=responses)\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" prompt\n",
|
||||
" | comprehend_moderation\n",
|
||||
" | {\"input\": (lambda x: x['output'] ) | llm}\n",
|
||||
" | {\"input\": (lambda x: x[\"output\"]) | llm}\n",
|
||||
" | comprehend_moderation\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" response = chain.invoke({\"question\": \"A sample SSN number looks like this 123-22-3345. Can you give me some more samples?\"})\n",
|
||||
" response = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"A sample SSN number looks like this 123-22-3345. Can you give me some more samples?\"\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"except ModerationPiiError as e:\n",
|
||||
" print(str(e))\n",
|
||||
"else:\n",
|
||||
" print(response['output'])\n"
|
||||
" print(response[\"output\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -166,25 +171,18 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.comprehend_moderation import (BaseModerationConfig, \n",
|
||||
"from langchain_experimental.comprehend_moderation import (\n",
|
||||
" BaseModerationConfig,\n",
|
||||
" ModerationPromptSafetyConfig,\n",
|
||||
" ModerationPiiConfig,\n",
|
||||
" ModerationToxicityConfig\n",
|
||||
" ModerationToxicityConfig,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"pii_config = ModerationPiiConfig(\n",
|
||||
" labels=[\"SSN\"],\n",
|
||||
" redact=True,\n",
|
||||
" mask_character=\"X\"\n",
|
||||
")\n",
|
||||
"pii_config = ModerationPiiConfig(labels=[\"SSN\"], redact=True, mask_character=\"X\")\n",
|
||||
"\n",
|
||||
"toxicity_config = ModerationToxicityConfig(\n",
|
||||
" threshold=0.5\n",
|
||||
")\n",
|
||||
"toxicity_config = ModerationToxicityConfig(threshold=0.5)\n",
|
||||
"\n",
|
||||
"prompt_safety_config = ModerationPromptSafetyConfig(\n",
|
||||
" threshold=0.5\n",
|
||||
")\n",
|
||||
"prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5)\n",
|
||||
"\n",
|
||||
"moderation_config = BaseModerationConfig(\n",
|
||||
" filters=[pii_config, toxicity_config, prompt_safety_config]\n",
|
||||
@ -227,7 +225,7 @@
|
||||
"comp_moderation_with_config = AmazonComprehendModerationChain(\n",
|
||||
" moderation_config=moderation_config, # specify the configuration\n",
|
||||
" client=comprehend_client, # optionally pass the Boto3 Client\n",
|
||||
" verbose=True\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -252,24 +250,28 @@
|
||||
"responses = [\n",
|
||||
" \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n",
|
||||
" # replace with your own expletive\n",
|
||||
" \"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.\"\n",
|
||||
" \"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.\",\n",
|
||||
"]\n",
|
||||
"llm = FakeListLLM(responses=responses)\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" prompt\n",
|
||||
" | comp_moderation_with_config\n",
|
||||
" | {\"input\": (lambda x: x['output'] ) | llm}\n",
|
||||
" | {\"input\": (lambda x: x[\"output\"]) | llm}\n",
|
||||
" | comp_moderation_with_config\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" response = chain.invoke({\"question\": \"A sample SSN number looks like this 123-45-7890. Can you give me some more samples?\"})\n",
|
||||
" response = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"A sample SSN number looks like this 123-45-7890. Can you give me some more samples?\"\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"except Exception as e:\n",
|
||||
" print(str(e))\n",
|
||||
"else:\n",
|
||||
" print(response['output'])"
|
||||
" print(response[\"output\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -343,23 +345,24 @@
|
||||
"source": [
|
||||
"# Define callback handlers by subclassing BaseModerationCallbackHandler\n",
|
||||
"\n",
|
||||
"class MyModCallback(BaseModerationCallbackHandler):\n",
|
||||
"\n",
|
||||
"class MyModCallback(BaseModerationCallbackHandler):\n",
|
||||
" async def on_after_pii(self, output_beacon, unique_id):\n",
|
||||
" import json\n",
|
||||
" moderation_type = output_beacon['moderation_type']\n",
|
||||
" chain_id = output_beacon['moderation_chain_id']\n",
|
||||
" with open(f'output-{moderation_type}-{chain_id}.json', 'w') as file:\n",
|
||||
" data = { 'beacon_data': output_beacon, 'unique_id': unique_id }\n",
|
||||
"\n",
|
||||
" moderation_type = output_beacon[\"moderation_type\"]\n",
|
||||
" chain_id = output_beacon[\"moderation_chain_id\"]\n",
|
||||
" with open(f\"output-{moderation_type}-{chain_id}.json\", \"w\") as file:\n",
|
||||
" data = {\"beacon_data\": output_beacon, \"unique_id\": unique_id}\n",
|
||||
" json.dump(data, file)\n",
|
||||
"\n",
|
||||
" '''\n",
|
||||
" \"\"\"\n",
|
||||
" async def on_after_toxicity(self, output_beacon, unique_id):\n",
|
||||
" pass\n",
|
||||
" \n",
|
||||
" async def on_after_prompt_safety(self, output_beacon, unique_id):\n",
|
||||
" pass\n",
|
||||
" '''\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"my_callback = MyModCallback()"
|
||||
@ -374,26 +377,18 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pii_config = ModerationPiiConfig(\n",
|
||||
" labels=[\"SSN\"],\n",
|
||||
" redact=True,\n",
|
||||
" mask_character=\"X\"\n",
|
||||
")\n",
|
||||
"pii_config = ModerationPiiConfig(labels=[\"SSN\"], redact=True, mask_character=\"X\")\n",
|
||||
"\n",
|
||||
"toxicity_config = ModerationToxicityConfig(\n",
|
||||
" threshold=0.5\n",
|
||||
")\n",
|
||||
"toxicity_config = ModerationToxicityConfig(threshold=0.5)\n",
|
||||
"\n",
|
||||
"moderation_config = BaseModerationConfig(\n",
|
||||
" filters=[pii_config, toxicity_config]\n",
|
||||
")\n",
|
||||
"moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config])\n",
|
||||
"\n",
|
||||
"comp_moderation_with_config = AmazonComprehendModerationChain(\n",
|
||||
" moderation_config=moderation_config, # specify the configuration\n",
|
||||
" client=comprehend_client, # optionally pass the Boto3 Client\n",
|
||||
" unique_id='john.doe@email.com', # A unique ID\n",
|
||||
" unique_id=\"john.doe@email.com\", # A unique ID\n",
|
||||
" moderation_callback=my_callback, # BaseModerationCallbackHandler\n",
|
||||
" verbose=True\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -418,7 +413,7 @@
|
||||
"responses = [\n",
|
||||
" \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n",
|
||||
" # replace with your own expletive\n",
|
||||
" \"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.\"\n",
|
||||
" \"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"llm = FakeListLLM(responses=responses)\n",
|
||||
@ -426,16 +421,20 @@
|
||||
"chain = (\n",
|
||||
" prompt\n",
|
||||
" | comp_moderation_with_config\n",
|
||||
" | {\"input\": (lambda x: x['output'] ) | llm}\n",
|
||||
" | {\"input\": (lambda x: x[\"output\"]) | llm}\n",
|
||||
" | comp_moderation_with_config\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" response = chain.invoke({\"question\": \"A sample SSN number looks like this 123-456-7890. Can you give me some more samples?\"})\n",
|
||||
" response = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"A sample SSN number looks like this 123-456-7890. Can you give me some more samples?\"\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"except Exception as e:\n",
|
||||
" print(str(e))\n",
|
||||
"else:\n",
|
||||
" print(response['output'])"
|
||||
" print(response[\"output\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -537,6 +536,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"HUGGINGFACEHUB_API_TOKEN\"] = \"<YOUR HF TOKEN HERE>\""
|
||||
]
|
||||
},
|
||||
@ -590,42 +590,35 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# define filter configs\n",
|
||||
"pii_config = ModerationPiiConfig(\n",
|
||||
" labels=[\"SSN\", \"CREDIT_DEBIT_NUMBER\"],\n",
|
||||
" redact=True,\n",
|
||||
" mask_character=\"X\"\n",
|
||||
" labels=[\"SSN\", \"CREDIT_DEBIT_NUMBER\"], redact=True, mask_character=\"X\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"toxicity_config = ModerationToxicityConfig(\n",
|
||||
" threshold=0.5\n",
|
||||
")\n",
|
||||
"toxicity_config = ModerationToxicityConfig(threshold=0.5)\n",
|
||||
"\n",
|
||||
"prompt_safety_config = ModerationPromptSafetyConfig(\n",
|
||||
" threshold=0.8\n",
|
||||
")\n",
|
||||
"prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.8)\n",
|
||||
"\n",
|
||||
"# define different moderation configs using the filter configs above\n",
|
||||
"moderation_config_1 = BaseModerationConfig(\n",
|
||||
" filters=[pii_config, toxicity_config, prompt_safety_config]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"moderation_config_2 = BaseModerationConfig(\n",
|
||||
" filters=[pii_config]\n",
|
||||
")\n",
|
||||
"moderation_config_2 = BaseModerationConfig(filters=[pii_config])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# input prompt moderation chain with callback\n",
|
||||
"amazon_comp_moderation = AmazonComprehendModerationChain(moderation_config=moderation_config_1, \n",
|
||||
"amazon_comp_moderation = AmazonComprehendModerationChain(\n",
|
||||
" moderation_config=moderation_config_1,\n",
|
||||
" client=comprehend_client,\n",
|
||||
" moderation_callback=my_callback,\n",
|
||||
" verbose=True)\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Output from LLM moderation chain without callback\n",
|
||||
"amazon_comp_moderation_out = AmazonComprehendModerationChain(moderation_config=moderation_config_2, \n",
|
||||
" client=comprehend_client,\n",
|
||||
" verbose=True)"
|
||||
"amazon_comp_moderation_out = AmazonComprehendModerationChain(\n",
|
||||
" moderation_config=moderation_config_2, client=comprehend_client, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -648,19 +641,23 @@
|
||||
"chain = (\n",
|
||||
" prompt\n",
|
||||
" | amazon_comp_moderation\n",
|
||||
" | { \"input\" : (lambda x: x['output']) | llm }\n",
|
||||
" | {\"input\": (lambda x: x[\"output\"]) | llm}\n",
|
||||
" | amazon_comp_moderation_out\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" response = chain.invoke({\"question\": \"\"\"What is John Doe's address, phone number and SSN from the following text?\n",
|
||||
" response = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"\"\"What is John Doe's address, phone number and SSN from the following text?\n",
|
||||
"\n",
|
||||
"John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at johndoe@example.com reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place.\n",
|
||||
"\"\"\"})\n",
|
||||
"\"\"\"\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"except Exception as e:\n",
|
||||
" print(str(e))\n",
|
||||
"else:\n",
|
||||
" print(response['output'])"
|
||||
" print(response[\"output\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -698,17 +695,19 @@
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class ContentHandler(LLMContentHandler):\n",
|
||||
" content_type = \"application/json\"\n",
|
||||
" accepts = \"application/json\"\n",
|
||||
"\n",
|
||||
" def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:\n",
|
||||
" input_str = json.dumps({\"text_inputs\": prompt, **model_kwargs})\n",
|
||||
" return input_str.encode('utf-8')\n",
|
||||
" return input_str.encode(\"utf-8\")\n",
|
||||
"\n",
|
||||
" def transform_output(self, output: bytes) -> str:\n",
|
||||
" response_json = json.loads(output.read().decode(\"utf-8\"))\n",
|
||||
" return response_json['generated_texts'][0]\n",
|
||||
" return response_json[\"generated_texts\"][0]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"content_handler = ContentHandler()\n",
|
||||
"\n",
|
||||
@ -725,13 +724,15 @@
|
||||
"llm = SagemakerEndpoint(\n",
|
||||
" endpoint_name=endpoint_name,\n",
|
||||
" region_name=region,\n",
|
||||
" model_kwargs={\"temperature\":0.95,\n",
|
||||
" model_kwargs={\n",
|
||||
" \"temperature\": 0.95,\n",
|
||||
" \"max_length\": 200,\n",
|
||||
" \"num_return_sequences\": 3,\n",
|
||||
" \"top_k\": 50,\n",
|
||||
" \"top_p\": 0.95,\n",
|
||||
" \"do_sample\": True},\n",
|
||||
" content_handler=content_handler\n",
|
||||
" \"do_sample\": True,\n",
|
||||
" },\n",
|
||||
" content_handler=content_handler,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -753,37 +754,29 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# define filter configs\n",
|
||||
"pii_config = ModerationPiiConfig(\n",
|
||||
" labels=[\"SSN\"],\n",
|
||||
" redact=True,\n",
|
||||
" mask_character=\"X\"\n",
|
||||
")\n",
|
||||
"pii_config = ModerationPiiConfig(labels=[\"SSN\"], redact=True, mask_character=\"X\")\n",
|
||||
"\n",
|
||||
"toxicity_config = ModerationToxicityConfig(\n",
|
||||
" threshold=0.5\n",
|
||||
")\n",
|
||||
"toxicity_config = ModerationToxicityConfig(threshold=0.5)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# define different moderation configs using the filter configs above\n",
|
||||
"moderation_config_1 = BaseModerationConfig(\n",
|
||||
" filters=[pii_config, toxicity_config]\n",
|
||||
")\n",
|
||||
"moderation_config_1 = BaseModerationConfig(filters=[pii_config, toxicity_config])\n",
|
||||
"\n",
|
||||
"moderation_config_2 = BaseModerationConfig(\n",
|
||||
" filters=[pii_config]\n",
|
||||
")\n",
|
||||
"moderation_config_2 = BaseModerationConfig(filters=[pii_config])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# input prompt moderation chain with callback\n",
|
||||
"amazon_comp_moderation = AmazonComprehendModerationChain(moderation_config=moderation_config_1, \n",
|
||||
"amazon_comp_moderation = AmazonComprehendModerationChain(\n",
|
||||
" moderation_config=moderation_config_1,\n",
|
||||
" client=comprehend_client,\n",
|
||||
" moderation_callback=my_callback,\n",
|
||||
" verbose=True)\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Output from LLM moderation chain without callback\n",
|
||||
"amazon_comp_moderation_out = AmazonComprehendModerationChain(moderation_config=moderation_config_2, \n",
|
||||
" client=comprehend_client,\n",
|
||||
" verbose=True)"
|
||||
"amazon_comp_moderation_out = AmazonComprehendModerationChain(\n",
|
||||
" moderation_config=moderation_config_2, client=comprehend_client, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -806,16 +799,18 @@
|
||||
"chain = (\n",
|
||||
" prompt\n",
|
||||
" | amazon_comp_moderation\n",
|
||||
" | { \"input\" : (lambda x: x['output']) | llm }\n",
|
||||
" | {\"input\": (lambda x: x[\"output\"]) | llm}\n",
|
||||
" | amazon_comp_moderation_out\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" response = chain.invoke({\"question\": \"What is John Doe's address, phone number and SSN?\"})\n",
|
||||
" response = chain.invoke(\n",
|
||||
" {\"question\": \"What is John Doe's address, phone number and SSN?\"}\n",
|
||||
" )\n",
|
||||
"except Exception as e:\n",
|
||||
" print(str(e))\n",
|
||||
"else:\n",
|
||||
" print(response['output'])"
|
||||
" print(response[\"output\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -122,8 +122,7 @@
|
||||
"from langchain.callbacks.confident_callback import DeepEvalCallbackHandler\n",
|
||||
"\n",
|
||||
"deepeval_callback = DeepEvalCallbackHandler(\n",
|
||||
" implementation_name=\"langchainQuickstart\",\n",
|
||||
" metrics=[answer_relevancy_metric]\n",
|
||||
" implementation_name=\"langchainQuickstart\", metrics=[answer_relevancy_metric]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -155,6 +154,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(\n",
|
||||
" temperature=0,\n",
|
||||
" callbacks=[deepeval_callback],\n",
|
||||
@ -239,8 +239,9 @@
|
||||
"docsearch = Chroma.from_documents(texts, embeddings)\n",
|
||||
"\n",
|
||||
"qa = RetrievalQA.from_chain_type(\n",
|
||||
" llm=OpenAI(openai_api_key=openai_api_key), chain_type=\"stuff\",\n",
|
||||
" retriever=docsearch.as_retriever()\n",
|
||||
" llm=OpenAI(openai_api_key=openai_api_key),\n",
|
||||
" chain_type=\"stuff\",\n",
|
||||
" retriever=docsearch.as_retriever(),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Providing a new question-answering pipeline\n",
|
||||
|
@ -234,8 +234,7 @@
|
||||
" plt.ylabel(\"Value\")\n",
|
||||
" plt.title(title)\n",
|
||||
"\n",
|
||||
" plt.show()\n",
|
||||
"\n"
|
||||
" plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -325,9 +324,11 @@
|
||||
" model_id=\"test_chatopenai\", model_version=\"0.1\", verbose=False\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"urls = [\"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n",
|
||||
"urls = [\n",
|
||||
" \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n",
|
||||
" \"https://medium.com/lyft-engineering/lyftlearn-ml-model-training-infrastructure-built-on-kubernetes-aef8218842bb\",\n",
|
||||
" \"https://blog.langchain.dev/week-of-10-2-langchain-release-notes/\"]\n",
|
||||
" \"https://blog.langchain.dev/week-of-10-2-langchain-release-notes/\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"for url in urls:\n",
|
||||
" loader = WebBaseLoader(url)\n",
|
||||
@ -364,7 +365,7 @@
|
||||
"plot(response.text, \"Prompt Tokens\")\n",
|
||||
"\n",
|
||||
"response = client.search_ts(\"__name__\", \"completion_tokens\", 0, int(time.time()))\n",
|
||||
"plot(response.text, \"Completion Tokens\")\n"
|
||||
"plot(response.text, \"Completion Tokens\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -97,9 +97,9 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ['LABEL_STUDIO_URL'] = '<YOUR-LABEL-STUDIO-URL>' # e.g. http://localhost:8080\n",
|
||||
"os.environ['LABEL_STUDIO_API_KEY'] = '<YOUR-LABEL-STUDIO-API-KEY>'\n",
|
||||
"os.environ['OPENAI_API_KEY'] = '<YOUR-OPENAI-API-KEY>'"
|
||||
"os.environ[\"LABEL_STUDIO_URL\"] = \"<YOUR-LABEL-STUDIO-URL>\" # e.g. http://localhost:8080\n",
|
||||
"os.environ[\"LABEL_STUDIO_API_KEY\"] = \"<YOUR-LABEL-STUDIO-API-KEY>\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"<YOUR-OPENAI-API-KEY>\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -174,11 +174,7 @@
|
||||
"from langchain.callbacks import LabelStudioCallbackHandler\n",
|
||||
"\n",
|
||||
"llm = OpenAI(\n",
|
||||
" temperature=0,\n",
|
||||
" callbacks=[\n",
|
||||
" LabelStudioCallbackHandler(\n",
|
||||
" project_name=\"My Project\"\n",
|
||||
" )]\n",
|
||||
" temperature=0, callbacks=[LabelStudioCallbackHandler(project_name=\"My Project\")]\n",
|
||||
")\n",
|
||||
"print(llm(\"Tell me a joke\"))"
|
||||
]
|
||||
@ -249,16 +245,20 @@
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain.callbacks import LabelStudioCallbackHandler\n",
|
||||
"\n",
|
||||
"chat_llm = ChatOpenAI(callbacks=[\n",
|
||||
"chat_llm = ChatOpenAI(\n",
|
||||
" callbacks=[\n",
|
||||
" LabelStudioCallbackHandler(\n",
|
||||
" mode=\"chat\",\n",
|
||||
" project_name=\"New Project with Chat\",\n",
|
||||
" )\n",
|
||||
"])\n",
|
||||
"llm_results = chat_llm([\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"llm_results = chat_llm(\n",
|
||||
" [\n",
|
||||
" SystemMessage(content=\"Always use a lot of emojis\"),\n",
|
||||
" HumanMessage(content=\"Tell me a joke\")\n",
|
||||
"])"
|
||||
" HumanMessage(content=\"Tell me a joke\"),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -304,7 +304,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ls = LabelStudioCallbackHandler(project_config='''\n",
|
||||
"ls = LabelStudioCallbackHandler(\n",
|
||||
" project_config=\"\"\"\n",
|
||||
"<View>\n",
|
||||
"<Text name=\"prompt\" value=\"$prompt\"/>\n",
|
||||
"<TextArea name=\"response\" toName=\"prompt\"/>\n",
|
||||
@ -315,7 +316,8 @@
|
||||
" <Choice value=\"Negative\"/>\n",
|
||||
"</Choices>\n",
|
||||
"</View>\n",
|
||||
"''')"
|
||||
"\"\"\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -150,8 +150,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with Run(experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session) as run:\n",
|
||||
"\n",
|
||||
"with Run(\n",
|
||||
" experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session\n",
|
||||
") as run:\n",
|
||||
" # Create SageMaker Callback\n",
|
||||
" sagemaker_callback = SageMakerCallbackHandler(run)\n",
|
||||
"\n",
|
||||
@ -209,8 +210,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with Run(experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session) as run:\n",
|
||||
"\n",
|
||||
"with Run(\n",
|
||||
" experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session\n",
|
||||
") as run:\n",
|
||||
" # Create SageMaker Callback\n",
|
||||
" sagemaker_callback = SageMakerCallbackHandler(run)\n",
|
||||
"\n",
|
||||
@ -228,7 +230,9 @@
|
||||
" chain2 = LLMChain(llm=llm, prompt=prompt_template2, callbacks=[sagemaker_callback])\n",
|
||||
"\n",
|
||||
" # Create Sequential chain\n",
|
||||
" overall_chain = SimpleSequentialChain(chains=[chain1, chain2], callbacks=[sagemaker_callback])\n",
|
||||
" overall_chain = SimpleSequentialChain(\n",
|
||||
" chains=[chain1, chain2], callbacks=[sagemaker_callback]\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Run overall sequential chain\n",
|
||||
" overall_chain.run(**INPUT_VARIABLES)\n",
|
||||
@ -267,8 +271,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with Run(experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session) as run:\n",
|
||||
"\n",
|
||||
"with Run(\n",
|
||||
" experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session\n",
|
||||
") as run:\n",
|
||||
" # Create SageMaker Callback\n",
|
||||
" sagemaker_callback = SageMakerCallbackHandler(run)\n",
|
||||
"\n",
|
||||
@ -279,7 +284,9 @@
|
||||
" tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=[sagemaker_callback])\n",
|
||||
"\n",
|
||||
" # Initialize agent with all the tools\n",
|
||||
" agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", callbacks=[sagemaker_callback])\n",
|
||||
" agent = initialize_agent(\n",
|
||||
" tools, llm, agent=\"zero-shot-react-description\", callbacks=[sagemaker_callback]\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Run agent\n",
|
||||
" agent.run(input=PROMPT_TEMPLATE)\n",
|
||||
|
@ -284,7 +284,7 @@
|
||||
" project=\"default\",\n",
|
||||
" tags=[\"chat model\"],\n",
|
||||
" user_id=\"user-id-1234\",\n",
|
||||
" some_metadata={\"hello\": [1, 2]}\n",
|
||||
" some_metadata={\"hello\": [1, 2]},\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")"
|
||||
|
@ -46,7 +46,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = AnthropicFunctions(model='claude-2')"
|
||||
"model = AnthropicFunctions(model=\"claude-2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -75,15 +75,12 @@
|
||||
" \"properties\": {\n",
|
||||
" \"location\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The city and state, e.g. San Francisco, CA\"\n",
|
||||
" \"description\": \"The city and state, e.g. San Francisco, CA\",\n",
|
||||
" },\n",
|
||||
" \"unit\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"enum\": [\"celsius\", \"fahrenheit\"]\n",
|
||||
" }\n",
|
||||
" \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"location\"],\n",
|
||||
" },\n",
|
||||
" \"required\": [\"location\"]\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"]"
|
||||
]
|
||||
@ -106,8 +103,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = model.predict_messages(\n",
|
||||
" [HumanMessage(content=\"whats the weater in boston?\")], \n",
|
||||
" functions=functions\n",
|
||||
" [HumanMessage(content=\"whats the weater in boston?\")], functions=functions\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -150,6 +146,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import create_extraction_chain\n",
|
||||
"\n",
|
||||
"schema = {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"name\": {\"type\": \"string\"},\n",
|
||||
|
@ -102,19 +102,15 @@
|
||||
"from langchain.schema import SystemMessage, HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful AI that shares everything you know.\"\n",
|
||||
" ),\n",
|
||||
" SystemMessage(content=\"You are a helpful AI that shares everything you know.\"),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Tell me technical facts about yourself. Are you a transformer model? How many billions of parameters do you have?\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def get_msgs():\n",
|
||||
" tasks = [\n",
|
||||
" chat.apredict_messages(messages)\n",
|
||||
" for chat in chats.values()\n",
|
||||
" ]\n",
|
||||
" tasks = [chat.apredict_messages(messages) for chat in chats.values()]\n",
|
||||
" responses = await asyncio.gather(*tasks)\n",
|
||||
" return dict(zip(chats.keys(), responses))"
|
||||
]
|
||||
@ -194,10 +190,10 @@
|
||||
"response_dict = asyncio.run(get_msgs())\n",
|
||||
"\n",
|
||||
"for model_name, response in response_dict.items():\n",
|
||||
" print(f'\\t{model_name}')\n",
|
||||
" print(f\"\\t{model_name}\")\n",
|
||||
" print()\n",
|
||||
" print(response.content)\n",
|
||||
" print('\\n---\\n')"
|
||||
" print(\"\\n---\\n\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -140,7 +140,9 @@
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" print(f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\") # without specifying the model version, flat-rate 0.002 USD per 1k input and output tokens is used\n"
|
||||
" print(\n",
|
||||
" f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\"\n",
|
||||
" ) # without specifying the model version, flat-rate 0.002 USD per 1k input and output tokens is used"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -172,7 +174,7 @@
|
||||
" deployment_name=DEPLOYMENT_NAME,\n",
|
||||
" openai_api_key=API_KEY,\n",
|
||||
" openai_api_type=\"azure\",\n",
|
||||
" model_version=\"0613\"\n",
|
||||
" model_version=\"0613\",\n",
|
||||
")\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" model0613(\n",
|
||||
@ -182,7 +184,7 @@
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" print(f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\")\n"
|
||||
" print(f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -67,10 +67,10 @@
|
||||
" endpoint_url=\"https://<your-endpoint>.<your_region>.inference.ml.azure.com/score\",\n",
|
||||
" endpoint_api_key=\"my-api-key\",\n",
|
||||
" content_formatter=LlamaContentFormatter,\n",
|
||||
"))\n",
|
||||
"response = chat(messages=[\n",
|
||||
" HumanMessage(content=\"Will the Collatz conjecture ever be solved?\")\n",
|
||||
"])\n",
|
||||
")\n",
|
||||
"response = chat(\n",
|
||||
" messages=[HumanMessage(content=\"Will the Collatz conjecture ever be solved?\")]\n",
|
||||
")\n",
|
||||
"response"
|
||||
]
|
||||
}
|
||||
@ -91,9 +91,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
@ -36,8 +36,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatBaichuan(\n",
|
||||
" baichuan_api_key='YOUR_API_KEY',\n",
|
||||
" baichuan_secret_key='YOUR_SECRET_KEY'\n",
|
||||
" baichuan_api_key=\"YOUR_API_KEY\", baichuan_secret_key=\"YOUR_SECRET_KEY\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -72,9 +71,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat([\n",
|
||||
" HumanMessage(content='我日薪8块钱,请问在闰年的二月,我月薪多少')\n",
|
||||
"])"
|
||||
"chat([HumanMessage(content=\"我日薪8块钱,请问在闰年的二月,我月薪多少\")])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -92,9 +89,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatBaichuan(\n",
|
||||
" baichuan_api_key='YOUR_API_KEY',\n",
|
||||
" baichuan_secret_key='YOUR_SECRET_KEY',\n",
|
||||
" streaming=True\n",
|
||||
" baichuan_api_key=\"YOUR_API_KEY\",\n",
|
||||
" baichuan_secret_key=\"YOUR_SECRET_KEY\",\n",
|
||||
" streaming=True,\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
@ -119,9 +116,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat([\n",
|
||||
" HumanMessage(content='我日薪8块钱,请问在闰年的二月,我月薪多少')\n",
|
||||
"])"
|
||||
"chat([HumanMessage(content=\"我日薪8块钱,请问在闰年的二月,我月薪多少\")])"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
|
@ -62,13 +62,14 @@
|
||||
"from langchain.chat_models import QianfanChatEndpoint\n",
|
||||
"from langchain.chat_models.base import HumanMessage\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"QIANFAN_AK\"] = \"your_ak\"\n",
|
||||
"os.environ[\"QIANFAN_SK\"] = \"your_sk\"\n",
|
||||
"\n",
|
||||
"chat = QianfanChatEndpoint(\n",
|
||||
" streaming=True,\n",
|
||||
")\n",
|
||||
"res = chat([HumanMessage(content=\"write a funny joke\")])\n"
|
||||
"res = chat([HumanMessage(content=\"write a funny joke\")])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -112,7 +113,6 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
" \n",
|
||||
"from langchain.chat_models import QianfanChatEndpoint\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"\n",
|
||||
@ -125,15 +125,22 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"async def run_aio_generate():\n",
|
||||
" resp = await chatLLM.agenerate(messages=[[HumanMessage(content=\"write a 20 words sentence about sea.\")]])\n",
|
||||
" resp = await chatLLM.agenerate(\n",
|
||||
" messages=[[HumanMessage(content=\"write a 20 words sentence about sea.\")]]\n",
|
||||
" )\n",
|
||||
" print(resp)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await run_aio_generate()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def run_aio_stream():\n",
|
||||
" async for res in chatLLM.astream([HumanMessage(content=\"write a 20 words sentence about sea.\")]):\n",
|
||||
" async for res in chatLLM.astream(\n",
|
||||
" [HumanMessage(content=\"write a 20 words sentence about sea.\")]\n",
|
||||
" ):\n",
|
||||
" print(\"astream\", res)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await run_aio_stream()"
|
||||
]
|
||||
},
|
||||
@ -217,7 +224,10 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"res = chat.stream([HumanMessage(content=\"hi\")], **{'top_p': 0.4, 'temperature': 0.1, 'penalty_score': 1})\n",
|
||||
"res = chat.stream(\n",
|
||||
" [HumanMessage(content=\"hi\")],\n",
|
||||
" **{\"top_p\": 0.4, \"temperature\": 0.1, \"penalty_score\": 1}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for r in res:\n",
|
||||
" print(r)"
|
||||
|
@ -55,11 +55,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"knock knock\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"messages = [HumanMessage(content=\"knock knock\")]\n",
|
||||
"chat(messages)"
|
||||
]
|
||||
},
|
||||
|
@ -26,7 +26,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ErnieBotChat(ernie_client_id='YOUR_CLIENT_ID', ernie_client_secret='YOUR_CLIENT_SECRET')"
|
||||
"chat = ErnieBotChat(\n",
|
||||
" ernie_client_id=\"YOUR_CLIENT_ID\", ernie_client_secret=\"YOUR_CLIENT_SECRET\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -57,9 +59,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat([\n",
|
||||
" HumanMessage(content='hello there, who are you?')\n",
|
||||
"])"
|
||||
"chat([HumanMessage(content=\"hello there, who are you?\")])"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -67,15 +67,15 @@
|
||||
"from langchain.schema import SystemMessage, HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful AI that shares everything you know.\"\n",
|
||||
" ),\n",
|
||||
" SystemMessage(content=\"You are a helpful AI that shares everything you know.\"),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Tell me technical facts about yourself. Are you a transformer model? How many billions of parameters do you have?\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"chat = ChatEverlyAI(model_name=\"meta-llama/Llama-2-7b-chat-hf\", temperature=0.3, max_tokens=64)\n",
|
||||
"chat = ChatEverlyAI(\n",
|
||||
" model_name=\"meta-llama/Llama-2-7b-chat-hf\", temperature=0.3, max_tokens=64\n",
|
||||
")\n",
|
||||
"print(chat(messages).content)"
|
||||
]
|
||||
},
|
||||
@ -121,15 +121,17 @@
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a humorous AI that delights people.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Tell me a joke?\"\n",
|
||||
" ),\n",
|
||||
" SystemMessage(content=\"You are a humorous AI that delights people.\"),\n",
|
||||
" HumanMessage(content=\"Tell me a joke?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"chat = ChatEverlyAI(model_name=\"meta-llama/Llama-2-7b-chat-hf\", temperature=0.3, max_tokens=64, streaming=True, callbacks=[StreamingStdOutCallbackHandler()])\n",
|
||||
"chat = ChatEverlyAI(\n",
|
||||
" model_name=\"meta-llama/Llama-2-7b-chat-hf\",\n",
|
||||
" temperature=0.3,\n",
|
||||
" max_tokens=64,\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[StreamingStdOutCallbackHandler()],\n",
|
||||
")\n",
|
||||
"chat(messages)"
|
||||
]
|
||||
},
|
||||
@ -177,15 +179,17 @@
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a humorous AI that delights people.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Tell me a joke?\"\n",
|
||||
" ),\n",
|
||||
" SystemMessage(content=\"You are a humorous AI that delights people.\"),\n",
|
||||
" HumanMessage(content=\"Tell me a joke?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"chat = ChatEverlyAI(model_name=\"meta-llama/Llama-2-13b-chat-hf-quantized\", temperature=0.3, max_tokens=128, streaming=True, callbacks=[StreamingStdOutCallbackHandler()])\n",
|
||||
"chat = ChatEverlyAI(\n",
|
||||
" model_name=\"meta-llama/Llama-2-13b-chat-hf-quantized\",\n",
|
||||
" temperature=0.3,\n",
|
||||
" max_tokens=128,\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[StreamingStdOutCallbackHandler()],\n",
|
||||
")\n",
|
||||
"chat(messages)"
|
||||
]
|
||||
}
|
||||
|
@ -27,7 +27,7 @@
|
||||
"source": [
|
||||
"from langchain.chat_models.fireworks import ChatFireworks\n",
|
||||
"from langchain.schema import SystemMessage, HumanMessage\n",
|
||||
"import os\n"
|
||||
"import os"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -56,7 +56,7 @@
|
||||
" os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Fireworks API Key:\")\n",
|
||||
"\n",
|
||||
"# Initialize a Fireworks chat model\n",
|
||||
"chat = ChatFireworks(model=\"accounts/fireworks/models/llama-v2-13b-chat\")\n"
|
||||
"chat = ChatFireworks(model=\"accounts/fireworks/models/llama-v2-13b-chat\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -91,7 +91,7 @@
|
||||
"system_message = SystemMessage(content=\"You are to chat with the user.\")\n",
|
||||
"human_message = HumanMessage(content=\"Who are you?\")\n",
|
||||
"\n",
|
||||
"chat([system_message, human_message])\n"
|
||||
"chat([system_message, human_message])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -113,10 +113,13 @@
|
||||
],
|
||||
"source": [
|
||||
"# Setting additional parameters: temperature, max_tokens, top_p\n",
|
||||
"chat = ChatFireworks(model=\"accounts/fireworks/models/llama-v2-13b-chat\", model_kwargs={\"temperature\":1, \"max_tokens\": 20, \"top_p\": 1})\n",
|
||||
"chat = ChatFireworks(\n",
|
||||
" model=\"accounts/fireworks/models/llama-v2-13b-chat\",\n",
|
||||
" model_kwargs={\"temperature\": 1, \"max_tokens\": 20, \"top_p\": 1},\n",
|
||||
")\n",
|
||||
"system_message = SystemMessage(content=\"You are to chat with the user.\")\n",
|
||||
"human_message = HumanMessage(content=\"How's the weather today?\")\n",
|
||||
"chat([system_message, human_message])\n"
|
||||
"chat([system_message, human_message])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -147,12 +150,17 @@
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"\n",
|
||||
"llm = ChatFireworks(model=\"accounts/fireworks/models/llama-v2-13b-chat\", model_kwargs={\"temperature\":0, \"max_tokens\":64, \"top_p\":1.0})\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||
"llm = ChatFireworks(\n",
|
||||
" model=\"accounts/fireworks/models/llama-v2-13b-chat\",\n",
|
||||
" model_kwargs={\"temperature\": 0, \"max_tokens\": 64, \"top_p\": 1.0},\n",
|
||||
")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are a helpful chatbot that speaks like a pirate.\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{input}\")\n",
|
||||
"])\n"
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -182,7 +190,7 @@
|
||||
],
|
||||
"source": [
|
||||
"memory = ConversationBufferMemory(return_messages=True)\n",
|
||||
"memory.load_memory_variables({})\n"
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -200,9 +208,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = RunnablePassthrough.assign(\n",
|
||||
"chain = (\n",
|
||||
" RunnablePassthrough.assign(\n",
|
||||
" history=memory.load_memory_variables | (lambda x: x[\"history\"])\n",
|
||||
") | prompt | llm.bind(stop=[\"\\n\\n\"])\n"
|
||||
" )\n",
|
||||
" | prompt\n",
|
||||
" | llm.bind(stop=[\"\\n\\n\"])\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -233,7 +245,7 @@
|
||||
"source": [
|
||||
"inputs = {\"input\": \"hi im bob\"}\n",
|
||||
"response = chain.invoke(inputs)\n",
|
||||
"response\n"
|
||||
"response"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -264,7 +276,7 @@
|
||||
],
|
||||
"source": [
|
||||
"memory.save_context(inputs, {\"output\": response.content})\n",
|
||||
"memory.load_memory_variables({})\n"
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -294,7 +306,7 @@
|
||||
],
|
||||
"source": [
|
||||
"inputs = {\"input\": \"whats my name\"}\n",
|
||||
"chain.invoke(inputs)\n"
|
||||
"chain.invoke(inputs)"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -40,7 +40,7 @@
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ['GIGACHAT_CREDENTIALS'] = getpass()"
|
||||
"os.environ[\"GIGACHAT_CREDENTIALS\"] = getpass()"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
@ -78,9 +78,7 @@
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful AI that shares everything you know. Talk in English.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Tell me a joke\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"Tell me a joke\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"print(chat(messages).content)"
|
||||
|
@ -31,7 +31,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install langchain google-cloud-aiplatform\n"
|
||||
"#!pip install langchain google-cloud-aiplatform"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -41,7 +41,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatVertexAI\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n"
|
||||
"from langchain.prompts import ChatPromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -50,7 +50,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatVertexAI()\n"
|
||||
"chat = ChatVertexAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -61,10 +61,8 @@
|
||||
"source": [
|
||||
"system = \"You are a helpful assistant who translate English to French\"\n",
|
||||
"human = \"Translate this sentence from English to French. I love programming.\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", system), (\"human\", human)]\n",
|
||||
")\n",
|
||||
"messages = prompt.format_messages()\n"
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"messages = prompt.format_messages()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -84,7 +82,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat(messages)\n"
|
||||
"chat(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -100,11 +98,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system = \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
|
||||
"system = (\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
|
||||
")\n",
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", system), (\"human\", human)]\n",
|
||||
")\n"
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -126,8 +124,12 @@
|
||||
"source": [
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke(\n",
|
||||
" {\"input_language\": \"English\", \"output_language\": \"Japanese\", \"text\": \"I love programming\"}\n",
|
||||
")\n"
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"Japanese\",\n",
|
||||
" \"text\": \"I love programming\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -158,10 +160,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatVertexAI(\n",
|
||||
" model_name=\"codechat-bison\",\n",
|
||||
" max_output_tokens=1000,\n",
|
||||
" temperature=0.5\n",
|
||||
")\n"
|
||||
" model_name=\"codechat-bison\", max_output_tokens=1000, temperature=0.5\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -189,7 +189,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# For simple string in string out usage, we can use the `predict` method:\n",
|
||||
"print(chat.predict(\"Write a Python function to identify all prime numbers\"))\n"
|
||||
"print(chat.predict(\"Write a Python function to identify all prime numbers\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -208,8 +208,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"# import nest_asyncio\n",
|
||||
"# nest_asyncio.apply()\n"
|
||||
"# nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -237,7 +238,7 @@
|
||||
" top_k=40,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"asyncio.run(chat.agenerate([messages]))\n"
|
||||
"asyncio.run(chat.agenerate([messages]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -257,7 +258,15 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"asyncio.run(chain.ainvoke({\"input_language\": \"English\", \"output_language\": \"Sanskrit\", \"text\": \"I love programming\"}))\n"
|
||||
"asyncio.run(\n",
|
||||
" chain.ainvoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"Sanskrit\",\n",
|
||||
" \"text\": \"I love programming\",\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -275,7 +284,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n"
|
||||
"import sys"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -306,11 +315,13 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"human\", \"List out the 15 most populous countries in the world\")])\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"human\", \"List out the 15 most populous countries in the world\")]\n",
|
||||
")\n",
|
||||
"messages = prompt.format_messages()\n",
|
||||
"for chunk in chat.stream(messages):\n",
|
||||
" sys.stdout.write(chunk.content)\n",
|
||||
" sys.stdout.flush()\n"
|
||||
" sys.stdout.flush()"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -36,9 +36,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatHunyuan(\n",
|
||||
" hunyuan_app_id='YOUR_APP_ID',\n",
|
||||
" hunyuan_secret_id='YOUR_SECRET_ID',\n",
|
||||
" hunyuan_secret_key='YOUR_SECRET_KEY',\n",
|
||||
" hunyuan_app_id=\"YOUR_APP_ID\",\n",
|
||||
" hunyuan_secret_id=\"YOUR_SECRET_ID\",\n",
|
||||
" hunyuan_secret_key=\"YOUR_SECRET_KEY\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -62,9 +62,13 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat([\n",
|
||||
" HumanMessage(content='You are a helpful assistant that translates English to French.Translate this sentence from English to French. I love programming.')\n",
|
||||
"])"
|
||||
"chat(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"You are a helpful assistant that translates English to French.Translate this sentence from English to French. I love programming.\"\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -82,9 +86,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatHunyuan(\n",
|
||||
" hunyuan_app_id='YOUR_APP_ID',\n",
|
||||
" hunyuan_secret_id='YOUR_SECRET_ID',\n",
|
||||
" hunyuan_secret_key='YOUR_SECRET_KEY',\n",
|
||||
" hunyuan_app_id=\"YOUR_APP_ID\",\n",
|
||||
" hunyuan_secret_id=\"YOUR_SECRET_ID\",\n",
|
||||
" hunyuan_secret_key=\"YOUR_SECRET_KEY\",\n",
|
||||
" streaming=True,\n",
|
||||
")"
|
||||
],
|
||||
@ -110,9 +114,13 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat([\n",
|
||||
" HumanMessage(content='You are a helpful assistant that translates English to French.Translate this sentence from English to French. I love programming.')\n",
|
||||
"])"
|
||||
"chat(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"You are a helpful assistant that translates English to French.Translate this sentence from English to French. I love programming.\"\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
|
@ -96,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatKonko(max_tokens=400, model = 'meta-llama/Llama-2-13b-chat-hf')"
|
||||
"chat = ChatKonko(max_tokens=400, model=\"meta-llama/Llama-2-13b-chat-hf\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -117,12 +117,8 @@
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Explain Big Bang Theory briefly\"\n",
|
||||
" ),\n",
|
||||
" SystemMessage(content=\"You are a helpful assistant.\"),\n",
|
||||
" HumanMessage(content=\"Explain Big Bang Theory briefly\"),\n",
|
||||
"]\n",
|
||||
"chat(messages)"
|
||||
]
|
||||
|
@ -28,7 +28,7 @@
|
||||
"from llamaapi import LlamaAPI\n",
|
||||
"\n",
|
||||
"# Replace 'Your_API_Token' with your actual API token\n",
|
||||
"llama = LlamaAPI('Your_API_Token')"
|
||||
"llama = LlamaAPI(\"Your_API_Token\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -71,9 +71,15 @@
|
||||
"\n",
|
||||
"schema = {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"sentiment\": {\"type\": \"string\", 'description': 'the sentiment encountered in the passage'},\n",
|
||||
" \"aggressiveness\": {\"type\": \"integer\", 'description': 'a 0-10 score of how aggressive the passage is'},\n",
|
||||
" \"language\": {\"type\": \"string\", 'description': 'the language of the passage'},\n",
|
||||
" \"sentiment\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"the sentiment encountered in the passage\",\n",
|
||||
" },\n",
|
||||
" \"aggressiveness\": {\n",
|
||||
" \"type\": \"integer\",\n",
|
||||
" \"description\": \"a 0-10 score of how aggressive the passage is\",\n",
|
||||
" },\n",
|
||||
" \"language\": {\"type\": \"string\", \"description\": \"the language of the passage\"},\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
|
@ -62,8 +62,11 @@
|
||||
"from langchain.chat_models import ChatOllama\n",
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"chat_model = ChatOllama(model=\"llama2:7b-chat\", \n",
|
||||
" callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]))"
|
||||
"\n",
|
||||
"chat_model = ChatOllama(\n",
|
||||
" model=\"llama2:7b-chat\",\n",
|
||||
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -112,9 +115,7 @@
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(content=\"Tell me about the history of AI\")\n",
|
||||
"]\n",
|
||||
"messages = [HumanMessage(content=\"Tell me about the history of AI\")]\n",
|
||||
"chat_model(messages)"
|
||||
]
|
||||
},
|
||||
@ -151,10 +152,12 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import WebBaseLoader\n",
|
||||
"\n",
|
||||
"loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)"
|
||||
]
|
||||
@ -224,9 +227,12 @@
|
||||
"from langchain.chat_models import ChatOllama\n",
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"chat_model = ChatOllama(model=\"llama2:13b\",\n",
|
||||
"\n",
|
||||
"chat_model = ChatOllama(\n",
|
||||
" model=\"llama2:13b\",\n",
|
||||
" verbose=True,\n",
|
||||
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))"
|
||||
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -237,6 +243,7 @@
|
||||
"source": [
|
||||
"# QA chain\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"\n",
|
||||
"qa_chain = RetrievalQA.from_chain_type(\n",
|
||||
" chat_model,\n",
|
||||
" retriever=vectorstore.as_retriever(),\n",
|
||||
@ -296,15 +303,19 @@
|
||||
"from langchain.schema import LLMResult\n",
|
||||
"from langchain.callbacks.base import BaseCallbackHandler\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GenerationStatisticsCallback(BaseCallbackHandler):\n",
|
||||
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
|
||||
" print(response.generations[0][0].generation_info)\n",
|
||||
"\n",
|
||||
"callback_manager = CallbackManager([StreamingStdOutCallbackHandler(), GenerationStatisticsCallback()])\n",
|
||||
"\n",
|
||||
"chat_model = ChatOllama(model=\"llama2:13b-chat\",\n",
|
||||
" verbose=True,\n",
|
||||
" callback_manager=callback_manager)\n",
|
||||
"callback_manager = CallbackManager(\n",
|
||||
" [StreamingStdOutCallbackHandler(), GenerationStatisticsCallback()]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chat_model = ChatOllama(\n",
|
||||
" model=\"llama2:13b-chat\", verbose=True, callback_manager=callback_manager\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"qa_chain = RetrievalQA.from_chain_type(\n",
|
||||
" chat_model,\n",
|
||||
|
@ -172,7 +172,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"fine_tuned_model = ChatOpenAI(temperature=0, model_name=\"ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR\")\n",
|
||||
"fine_tuned_model = ChatOpenAI(\n",
|
||||
" temperature=0, model_name=\"ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"fine_tuned_model(messages)"
|
||||
]
|
||||
|
@ -32,11 +32,12 @@
|
||||
"import os\n",
|
||||
"from langchain.chat_models.base import HumanMessage\n",
|
||||
"from langchain.chat_models import PaiEasChatEndpoint\n",
|
||||
"\n",
|
||||
"os.environ[\"EAS_SERVICE_URL\"] = \"Your_EAS_Service_URL\"\n",
|
||||
"os.environ[\"EAS_SERVICE_TOKEN\"] = \"Your_EAS_Service_Token\"\n",
|
||||
"chat = PaiEasChatEndpoint(\n",
|
||||
" eas_service_url=os.environ[\"EAS_SERVICE_URL\"],\n",
|
||||
" eas_service_token=os.environ[\"EAS_SERVICE_TOKEN\"]\n",
|
||||
" eas_service_token=os.environ[\"EAS_SERVICE_TOKEN\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -89,7 +90,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"outputs = chat.stream([HumanMessage(content=\"hi\")], streaming=True)\n",
|
||||
"for output in outputs:\n",
|
||||
" print(\"stream output:\", output)"
|
||||
|
@ -120,6 +120,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import AIMessage, HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant that translates English to French.\"\n",
|
||||
|
@ -77,8 +77,10 @@
|
||||
"source": [
|
||||
"answer = chat_model(\n",
|
||||
" [\n",
|
||||
" SystemMessage(content=\"You are a helpful assistant that translates English to French.\"),\n",
|
||||
" HumanMessage(content=\"I love programming.\")\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant that translates English to French.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"I love programming.\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"answer"
|
||||
|
@ -88,7 +88,6 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"class DiscordChatLoader(chat_loaders.BaseChatLoader):\n",
|
||||
" \n",
|
||||
" def __init__(self, path: str):\n",
|
||||
" \"\"\"\n",
|
||||
" Initialize the Discord chat loader.\n",
|
||||
@ -175,7 +174,7 @@
|
||||
" Yields:\n",
|
||||
" A `ChatSession` object containing the loaded chat messages.\n",
|
||||
" \"\"\"\n",
|
||||
" yield self._load_single_chat_session_from_txt(self.path)\n"
|
||||
" yield self._load_single_chat_session_from_txt(self.path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -228,7 +227,9 @@
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
"merged_messages = merge_chat_runs(raw_messages)\n",
|
||||
"# Convert messages from \"talkingtower\" to AI messages\n",
|
||||
"messages: List[ChatSession] = list(map_ai_messages(merged_messages, sender=\"talkingtower\"))"
|
||||
"messages: List[ChatSession] = list(\n",
|
||||
" map_ai_messages(merged_messages, sender=\"talkingtower\")\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -288,7 +289,7 @@
|
||||
"\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"for chunk in llm.stream(messages[0]['messages']):\n",
|
||||
"for chunk in llm.stream(messages[0][\"messages\"]):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
|
@ -50,28 +50,32 @@
|
||||
"import requests\n",
|
||||
"import zipfile\n",
|
||||
"\n",
|
||||
"def download_and_unzip(url: str, output_path: str = 'file.zip') -> None:\n",
|
||||
" file_id = url.split('/')[-2]\n",
|
||||
" download_url = f'https://drive.google.com/uc?export=download&id={file_id}'\n",
|
||||
"\n",
|
||||
"def download_and_unzip(url: str, output_path: str = \"file.zip\") -> None:\n",
|
||||
" file_id = url.split(\"/\")[-2]\n",
|
||||
" download_url = f\"https://drive.google.com/uc?export=download&id={file_id}\"\n",
|
||||
"\n",
|
||||
" response = requests.get(download_url)\n",
|
||||
" if response.status_code != 200:\n",
|
||||
" print('Failed to download the file.')\n",
|
||||
" print(\"Failed to download the file.\")\n",
|
||||
" return\n",
|
||||
"\n",
|
||||
" with open(output_path, 'wb') as file:\n",
|
||||
" with open(output_path, \"wb\") as file:\n",
|
||||
" file.write(response.content)\n",
|
||||
" print(f'File {output_path} downloaded.')\n",
|
||||
" print(f\"File {output_path} downloaded.\")\n",
|
||||
"\n",
|
||||
" with zipfile.ZipFile(output_path, 'r') as zip_ref:\n",
|
||||
" with zipfile.ZipFile(output_path, \"r\") as zip_ref:\n",
|
||||
" zip_ref.extractall()\n",
|
||||
" print(f'File {output_path} has been unzipped.')\n",
|
||||
" print(f\"File {output_path} has been unzipped.\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# URL of the file to download\n",
|
||||
"url = 'https://drive.google.com/file/d/1rh1s1o2i7B-Sk1v9o8KNgivLVGwJ-osV/view?usp=sharing'\n",
|
||||
"url = (\n",
|
||||
" \"https://drive.google.com/file/d/1rh1s1o2i7B-Sk1v9o8KNgivLVGwJ-osV/view?usp=sharing\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Download and unzip\n",
|
||||
"download_and_unzip(url)\n"
|
||||
"download_and_unzip(url)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -235,7 +239,7 @@
|
||||
"source": [
|
||||
"# Now all of Harry Potter's messages will take the AI message class\n",
|
||||
"# which maps to the 'assistant' role in OpenAI's training format\n",
|
||||
"alternating_sessions[0]['messages'][:3]"
|
||||
"alternating_sessions[0][\"messages\"][:3]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -340,9 +344,7 @@
|
||||
"training_examples = [\n",
|
||||
" conversation_messages[i : i + chunk_size]\n",
|
||||
" for conversation_messages in training_data\n",
|
||||
" for i in range(\n",
|
||||
" 0, len(conversation_messages) - chunk_size + 1, \n",
|
||||
" chunk_size - overlap)\n",
|
||||
" for i in range(0, len(conversation_messages) - chunk_size + 1, chunk_size - overlap)\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"len(training_examples)"
|
||||
@ -393,13 +395,10 @@
|
||||
"# We will write the jsonl file in memory\n",
|
||||
"my_file = BytesIO()\n",
|
||||
"for m in training_examples:\n",
|
||||
" my_file.write((json.dumps({\"messages\": m}) + \"\\n\").encode('utf-8'))\n",
|
||||
" my_file.write((json.dumps({\"messages\": m}) + \"\\n\").encode(\"utf-8\"))\n",
|
||||
"\n",
|
||||
"my_file.seek(0)\n",
|
||||
"training_file = openai.File.create(\n",
|
||||
" file=my_file,\n",
|
||||
" purpose='fine-tune'\n",
|
||||
")\n",
|
||||
"training_file = openai.File.create(file=my_file, purpose=\"fine-tune\")\n",
|
||||
"\n",
|
||||
"# OpenAI audits each training file for compliance reasons.\n",
|
||||
"# This make take a few minutes\n",
|
||||
|
@ -47,15 +47,15 @@
|
||||
"import logging\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']\n",
|
||||
"SCOPES = [\"https://www.googleapis.com/auth/gmail.readonly\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"creds = None\n",
|
||||
"# The file token.json stores the user's access and refresh tokens, and is\n",
|
||||
"# created automatically when the authorization flow completes for the first\n",
|
||||
"# time.\n",
|
||||
"if os.path.exists('email_token.json'):\n",
|
||||
" creds = Credentials.from_authorized_user_file('email_token.json', SCOPES)\n",
|
||||
"if os.path.exists(\"email_token.json\"):\n",
|
||||
" creds = Credentials.from_authorized_user_file(\"email_token.json\", SCOPES)\n",
|
||||
"# If there are no (valid) credentials available, let the user log in.\n",
|
||||
"if not creds or not creds.valid:\n",
|
||||
" if creds and creds.expired and creds.refresh_token:\n",
|
||||
@ -63,10 +63,12 @@
|
||||
" else:\n",
|
||||
" flow = InstalledAppFlow.from_client_secrets_file(\n",
|
||||
" # your creds file here. Please create json file as here https://cloud.google.com/docs/authentication/getting-started\n",
|
||||
" 'creds.json', SCOPES)\n",
|
||||
" \"creds.json\",\n",
|
||||
" SCOPES,\n",
|
||||
" )\n",
|
||||
" creds = flow.run_local_server(port=0)\n",
|
||||
" # Save the credentials for the next run\n",
|
||||
" with open('email_token.json', 'w') as token:\n",
|
||||
" with open(\"email_token.json\", \"w\") as token:\n",
|
||||
" token.write(creds.to_json())"
|
||||
]
|
||||
},
|
||||
@ -143,7 +145,9 @@
|
||||
"source": [
|
||||
"# This makes messages sent by hchase@langchain.com the AI Messages\n",
|
||||
"# This means you will train an LLM to predict as if it's responding as hchase\n",
|
||||
"training_data = list(map_ai_messages(data, sender=\"Harrison Chase <hchase@langchain.com>\"))"
|
||||
"training_data = list(\n",
|
||||
" map_ai_messages(data, sender=\"Harrison Chase <hchase@langchain.com>\")\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -40,20 +40,24 @@
|
||||
"# This uses some example data\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"def download_drive_file(url: str, output_path: str = 'chat.db') -> None:\n",
|
||||
" file_id = url.split('/')[-2]\n",
|
||||
" download_url = f'https://drive.google.com/uc?export=download&id={file_id}'\n",
|
||||
"\n",
|
||||
"def download_drive_file(url: str, output_path: str = \"chat.db\") -> None:\n",
|
||||
" file_id = url.split(\"/\")[-2]\n",
|
||||
" download_url = f\"https://drive.google.com/uc?export=download&id={file_id}\"\n",
|
||||
"\n",
|
||||
" response = requests.get(download_url)\n",
|
||||
" if response.status_code != 200:\n",
|
||||
" print('Failed to download the file.')\n",
|
||||
" print(\"Failed to download the file.\")\n",
|
||||
" return\n",
|
||||
"\n",
|
||||
" with open(output_path, 'wb') as file:\n",
|
||||
" with open(output_path, \"wb\") as file:\n",
|
||||
" file.write(response.content)\n",
|
||||
" print(f'File {output_path} downloaded.')\n",
|
||||
" print(f\"File {output_path} downloaded.\")\n",
|
||||
"\n",
|
||||
"url = 'https://drive.google.com/file/d/1NebNKqTA2NXApCmeH6mu0unJD2tANZzo/view?usp=sharing'\n",
|
||||
"\n",
|
||||
"url = (\n",
|
||||
" \"https://drive.google.com/file/d/1NebNKqTA2NXApCmeH6mu0unJD2tANZzo/view?usp=sharing\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Download file to chat.db\n",
|
||||
"download_drive_file(url)"
|
||||
@ -121,7 +125,9 @@
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
"merged_messages = merge_chat_runs(raw_messages)\n",
|
||||
"# Convert messages from \"Tortoise\" to AI messages. Do you have a guess who these conversations are between?\n",
|
||||
"chat_sessions: List[ChatSession] = list(map_ai_messages(merged_messages, sender=\"Tortoise\"))"
|
||||
"chat_sessions: List[ChatSession] = list(\n",
|
||||
" map_ai_messages(merged_messages, sender=\"Tortoise\")\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -146,7 +152,7 @@
|
||||
"source": [
|
||||
"# Now all of the Tortoise's messages will take the AI message class\n",
|
||||
"# which maps to the 'assistant' role in OpenAI's training format\n",
|
||||
"alternating_sessions[0]['messages'][:3]"
|
||||
"alternating_sessions[0][\"messages\"][:3]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -233,13 +239,10 @@
|
||||
"# We will write the jsonl file in memory\n",
|
||||
"my_file = BytesIO()\n",
|
||||
"for m in training_data:\n",
|
||||
" my_file.write((json.dumps({\"messages\": m}) + \"\\n\").encode('utf-8'))\n",
|
||||
" my_file.write((json.dumps({\"messages\": m}) + \"\\n\").encode(\"utf-8\"))\n",
|
||||
"\n",
|
||||
"my_file.seek(0)\n",
|
||||
"training_file = openai.File.create(\n",
|
||||
" file=my_file,\n",
|
||||
" purpose='fine-tune'\n",
|
||||
")\n",
|
||||
"training_file = openai.File.create(file=my_file, purpose=\"fine-tune\")\n",
|
||||
"\n",
|
||||
"# OpenAI audits each training file for compliance reasons.\n",
|
||||
"# This make take a few minutes\n",
|
||||
|
@ -42,6 +42,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"uid = uuid.uuid4().hex[:6]\n",
|
||||
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"os.environ[\"LANGCHAIN_API_KEY\"] = \"YOUR API KEY\""
|
||||
@ -79,6 +80,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"url = \"https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/docs/integrations/chat_loaders/example_data/langsmith_chat_dataset.json\"\n",
|
||||
"response = requests.get(url)\n",
|
||||
"response.raise_for_status()\n",
|
||||
@ -104,8 +106,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"_ = client.create_examples(\n",
|
||||
" inputs = [e['inputs'] for e in data],\n",
|
||||
" outputs = [e['outputs'] for e in data],\n",
|
||||
" inputs=[e[\"inputs\"] for e in data],\n",
|
||||
" outputs=[e[\"outputs\"] for e in data],\n",
|
||||
" dataset_id=ds.id,\n",
|
||||
")"
|
||||
]
|
||||
@ -184,13 +186,10 @@
|
||||
"\n",
|
||||
"my_file = BytesIO()\n",
|
||||
"for dialog in training_data:\n",
|
||||
" my_file.write((json.dumps({\"messages\": dialog}) + \"\\n\").encode('utf-8'))\n",
|
||||
" my_file.write((json.dumps({\"messages\": dialog}) + \"\\n\").encode(\"utf-8\"))\n",
|
||||
"\n",
|
||||
"my_file.seek(0)\n",
|
||||
"training_file = openai.File.create(\n",
|
||||
" file=my_file,\n",
|
||||
" purpose='fine-tune'\n",
|
||||
")\n",
|
||||
"training_file = openai.File.create(file=my_file, purpose=\"fine-tune\")\n",
|
||||
"\n",
|
||||
"job = openai.FineTuningJob.create(\n",
|
||||
" training_file=training_file.id,\n",
|
||||
|
@ -42,6 +42,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"uid = uuid.uuid4().hex[:6]\n",
|
||||
"project_name = f\"Run Fine-tuning Walkthrough {uid}\"\n",
|
||||
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
@ -79,8 +80,10 @@
|
||||
" multiply = \"*\"\n",
|
||||
" divide = \"/\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Calculator(BaseModel):\n",
|
||||
" \"\"\"A calculator function\"\"\"\n",
|
||||
"\n",
|
||||
" num1: float\n",
|
||||
" num2: float\n",
|
||||
" operation: Operation = Field(..., description=\"+,-,*,/\")\n",
|
||||
@ -197,7 +200,7 @@
|
||||
" \"What's 60-30?\",\n",
|
||||
" \"What's 50-25?\",\n",
|
||||
" \"What's 40-20?\",\n",
|
||||
" \"What's 30-15?\"\n",
|
||||
" \"What's 30-15?\",\n",
|
||||
"]\n",
|
||||
"results = chain.batch([{\"input\": q} for q in math_questions], return_exceptions=True)"
|
||||
]
|
||||
@ -241,7 +244,8 @@
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm_runs = [\n",
|
||||
" run for run in client.list_runs(\n",
|
||||
" run\n",
|
||||
" for run in client.list_runs(\n",
|
||||
" project_name=project_name,\n",
|
||||
" run_type=\"llm\",\n",
|
||||
" )\n",
|
||||
@ -323,13 +327,10 @@
|
||||
"\n",
|
||||
"my_file = BytesIO()\n",
|
||||
"for dialog in training_data:\n",
|
||||
" my_file.write((json.dumps({\"messages\": dialog}) + \"\\n\").encode('utf-8'))\n",
|
||||
" my_file.write((json.dumps({\"messages\": dialog}) + \"\\n\").encode(\"utf-8\"))\n",
|
||||
"\n",
|
||||
"my_file.seek(0)\n",
|
||||
"training_file = openai.File.create(\n",
|
||||
" file=my_file,\n",
|
||||
" purpose='fine-tune'\n",
|
||||
")\n",
|
||||
"training_file = openai.File.create(file=my_file, purpose=\"fine-tune\")\n",
|
||||
"\n",
|
||||
"job = openai.FineTuningJob.create(\n",
|
||||
" training_file=training_file.id,\n",
|
||||
|
@ -96,7 +96,9 @@
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
"merged_messages = merge_chat_runs(raw_messages)\n",
|
||||
"# Convert messages from \"U0500003428\" to AI messages\n",
|
||||
"messages: List[ChatSession] = list(map_ai_messages(merged_messages, sender=\"U0500003428\"))"
|
||||
"messages: List[ChatSession] = list(\n",
|
||||
" map_ai_messages(merged_messages, sender=\"U0500003428\")\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -134,7 +136,7 @@
|
||||
"\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"for chunk in llm.stream(messages[1]['messages']):\n",
|
||||
"for chunk in llm.stream(messages[1][\"messages\"]):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
}
|
||||
|
@ -145,7 +145,9 @@
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
"merged_messages = merge_chat_runs(raw_messages)\n",
|
||||
"# Convert messages from \"Jiminy Cricket\" to AI messages\n",
|
||||
"messages: List[ChatSession] = list(map_ai_messages(merged_messages, sender=\"Jiminy Cricket\"))"
|
||||
"messages: List[ChatSession] = list(\n",
|
||||
" map_ai_messages(merged_messages, sender=\"Jiminy Cricket\")\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -177,7 +179,7 @@
|
||||
"\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"for chunk in llm.stream(messages[0]['messages']):\n",
|
||||
"for chunk in llm.stream(messages[0][\"messages\"]):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
}
|
||||
|
@ -31,7 +31,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open('example_data/dataset_twitter-scraper_2023-08-23_22-13-19-740.json') as f:\n",
|
||||
"with open(\"example_data/dataset_twitter-scraper_2023-08-23_22-13-19-740.json\") as f:\n",
|
||||
" data = json.load(f)"
|
||||
]
|
||||
},
|
||||
@ -43,7 +43,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Filter out tweets that reference other tweets, because it's a bit weird\n",
|
||||
"tweets = [d[\"full_text\"] for d in data if \"t.co\" not in d['full_text']]\n",
|
||||
"tweets = [d[\"full_text\"] for d in data if \"t.co\" not in d[\"full_text\"]]\n",
|
||||
"# Create them as AI messages\n",
|
||||
"messages = [AIMessage(content=t) for t in tweets]\n",
|
||||
"# Add in a system message at the start\n",
|
||||
|
@ -85,7 +85,6 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"class WeChatChatLoader(chat_loaders.BaseChatLoader):\n",
|
||||
" \n",
|
||||
" def __init__(self, path: str):\n",
|
||||
" \"\"\"\n",
|
||||
" Initialize the Discord chat loader.\n",
|
||||
@ -143,15 +142,19 @@
|
||||
" if re.match(self._message_line_regex, line):\n",
|
||||
" if current_sender and current_content:\n",
|
||||
" results = self._append_message_to_results(\n",
|
||||
" results, current_sender, current_timestamp, current_content)\n",
|
||||
" current_sender, current_timestamp = re.match(self._message_line_regex, line).groups()\n",
|
||||
" results, current_sender, current_timestamp, current_content\n",
|
||||
" )\n",
|
||||
" current_sender, current_timestamp = re.match(\n",
|
||||
" self._message_line_regex, line\n",
|
||||
" ).groups()\n",
|
||||
" current_content = []\n",
|
||||
" else:\n",
|
||||
" current_content.append(line.strip())\n",
|
||||
"\n",
|
||||
" if current_sender and current_content:\n",
|
||||
" results = self._append_message_to_results(\n",
|
||||
" results, current_sender, current_timestamp, current_content)\n",
|
||||
" results, current_sender, current_timestamp, current_content\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return chat_loaders.ChatSession(messages=results)\n",
|
||||
"\n",
|
||||
@ -162,7 +165,7 @@
|
||||
" Yields:\n",
|
||||
" A `ChatSession` object containing the loaded chat messages.\n",
|
||||
" \"\"\"\n",
|
||||
" yield self._load_single_chat_session_from_txt(self.path)\n"
|
||||
" yield self._load_single_chat_session_from_txt(self.path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -263,7 +266,7 @@
|
||||
"\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"for chunk in llm.stream(messages[0]['messages']):\n",
|
||||
"for chunk in llm.stream(messages[0][\"messages\"]):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
|
@ -135,7 +135,9 @@
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
"merged_messages = merge_chat_runs(raw_messages)\n",
|
||||
"# Convert messages from \"Dr. Feather\" to AI messages\n",
|
||||
"messages: List[ChatSession] = list(map_ai_messages(merged_messages, sender=\"Dr. Feather\"))"
|
||||
"messages: List[ChatSession] = list(\n",
|
||||
" map_ai_messages(merged_messages, sender=\"Dr. Feather\")\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -167,7 +169,7 @@
|
||||
"\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"for chunk in llm.stream(messages[0]['messages']):\n",
|
||||
"for chunk in llm.stream(messages[0][\"messages\"]):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
|
@ -97,21 +97,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from langchain.document_loaders.airbyte import AirbyteCDKLoader\n",
|
||||
"from source_github.source import SourceGithub # plug in your own source here\n",
|
||||
"\n",
|
||||
"config = {\n",
|
||||
" # your github configuration\n",
|
||||
" \"credentials\": {\n",
|
||||
" \"api_url\": \"api.github.com\",\n",
|
||||
" \"personal_access_token\": \"<token>\"\n",
|
||||
" },\n",
|
||||
" \"credentials\": {\"api_url\": \"api.github.com\", \"personal_access_token\": \"<token>\"},\n",
|
||||
" \"repository\": \"<repo>\",\n",
|
||||
" \"start_date\": \"<date from which to start retrieving records from in ISO format, e.g. 2020-10-20T00:00:00Z>\"\n",
|
||||
" \"start_date\": \"<date from which to start retrieving records from in ISO format, e.g. 2020-10-20T00:00:00Z>\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"issues_loader = AirbyteCDKLoader(source_class=SourceGithub, config=config, stream_name=\"issues\")"
|
||||
"issues_loader = AirbyteCDKLoader(\n",
|
||||
" source_class=SourceGithub, config=config, stream_name=\"issues\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -167,10 +165,20 @@
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
" return Document(page_content=record.data[\"title\"] + \"\\n\" + (record.data[\"body\"] or \"\"), metadata=record.data)\n",
|
||||
"\n",
|
||||
"issues_loader = AirbyteCDKLoader(source_class=SourceGithub, config=config, stream_name=\"issues\", record_handler=handle_record)\n",
|
||||
"def handle_record(record, id):\n",
|
||||
" return Document(\n",
|
||||
" page_content=record.data[\"title\"] + \"\\n\" + (record.data[\"body\"] or \"\"),\n",
|
||||
" metadata=record.data,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"issues_loader = AirbyteCDKLoader(\n",
|
||||
" source_class=SourceGithub,\n",
|
||||
" config=config,\n",
|
||||
" stream_name=\"issues\",\n",
|
||||
" record_handler=handle_record,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = issues_loader.load()"
|
||||
]
|
||||
@ -196,7 +204,9 @@
|
||||
"source": [
|
||||
"last_state = issues_loader.last_state # store safely\n",
|
||||
"\n",
|
||||
"incremental_issue_loader = AirbyteCDKLoader(source_class=SourceGithub, config=config, stream_name=\"issues\", state=last_state)\n",
|
||||
"incremental_issue_loader = AirbyteCDKLoader(\n",
|
||||
" source_class=SourceGithub, config=config, stream_name=\"issues\", state=last_state\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"new_docs = incremental_issue_loader.load()"
|
||||
]
|
||||
|
@ -85,14 +85,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from langchain.document_loaders.airbyte import AirbyteGongLoader\n",
|
||||
"\n",
|
||||
"config = {\n",
|
||||
" # your gong configuration\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"loader = AirbyteGongLoader(config=config, stream_name=\"calls\") # check the documentation linked above for a list of all streams"
|
||||
"loader = AirbyteGongLoader(\n",
|
||||
" config=config, stream_name=\"calls\"\n",
|
||||
") # check the documentation linked above for a list of all streams"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -148,10 +149,14 @@
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
" return Document(page_content=record.data[\"title\"], metadata=record.data)\n",
|
||||
"\n",
|
||||
"loader = AirbyteGongLoader(config=config, record_handler=handle_record, stream_name=\"calls\")\n",
|
||||
"\n",
|
||||
"loader = AirbyteGongLoader(\n",
|
||||
" config=config, record_handler=handle_record, stream_name=\"calls\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
@ -176,7 +181,9 @@
|
||||
"source": [
|
||||
"last_state = loader.last_state # store safely\n",
|
||||
"\n",
|
||||
"incremental_loader = AirbyteGongLoader(config=config, stream_name=\"calls\", state=last_state)\n",
|
||||
"incremental_loader = AirbyteGongLoader(\n",
|
||||
" config=config, stream_name=\"calls\", state=last_state\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"new_docs = incremental_loader.load()"
|
||||
]
|
||||
|
@ -87,14 +87,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from langchain.document_loaders.airbyte import AirbyteHubspotLoader\n",
|
||||
"\n",
|
||||
"config = {\n",
|
||||
" # your hubspot configuration\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"loader = AirbyteHubspotLoader(config=config, stream_name=\"products\") # check the documentation linked above for a list of all streams"
|
||||
"loader = AirbyteHubspotLoader(\n",
|
||||
" config=config, stream_name=\"products\"\n",
|
||||
") # check the documentation linked above for a list of all streams"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -150,10 +151,14 @@
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
" return Document(page_content=record.data[\"title\"], metadata=record.data)\n",
|
||||
"\n",
|
||||
"loader = AirbyteHubspotLoader(config=config, record_handler=handle_record, stream_name=\"products\")\n",
|
||||
"\n",
|
||||
"loader = AirbyteHubspotLoader(\n",
|
||||
" config=config, record_handler=handle_record, stream_name=\"products\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
@ -178,7 +183,9 @@
|
||||
"source": [
|
||||
"last_state = loader.last_state # store safely\n",
|
||||
"\n",
|
||||
"incremental_loader = AirbyteHubspotLoader(config=config, stream_name=\"products\", state=last_state)\n",
|
||||
"incremental_loader = AirbyteHubspotLoader(\n",
|
||||
" config=config, stream_name=\"products\", state=last_state\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"new_docs = incremental_loader.load()"
|
||||
]
|
||||
|
@ -92,14 +92,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from langchain.document_loaders.airbyte import AirbyteSalesforceLoader\n",
|
||||
"\n",
|
||||
"config = {\n",
|
||||
" # your salesforce configuration\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"loader = AirbyteSalesforceLoader(config=config, stream_name=\"asset\") # check the documentation linked above for a list of all streams"
|
||||
"loader = AirbyteSalesforceLoader(\n",
|
||||
" config=config, stream_name=\"asset\"\n",
|
||||
") # check the documentation linked above for a list of all streams"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -155,10 +156,14 @@
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
" return Document(page_content=record.data[\"title\"], metadata=record.data)\n",
|
||||
"\n",
|
||||
"loader = AirbyteSalesforceLoader(config=config, record_handler=handle_record, stream_name=\"asset\")\n",
|
||||
"\n",
|
||||
"loader = AirbyteSalesforceLoader(\n",
|
||||
" config=config, record_handler=handle_record, stream_name=\"asset\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
@ -183,7 +188,9 @@
|
||||
"source": [
|
||||
"last_state = loader.last_state # store safely\n",
|
||||
"\n",
|
||||
"incremental_loader = AirbyteSalesforceLoader(config=config, stream_name=\"asset\", state=last_state)\n",
|
||||
"incremental_loader = AirbyteSalesforceLoader(\n",
|
||||
" config=config, stream_name=\"asset\", state=last_state\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"new_docs = incremental_loader.load()"
|
||||
]
|
||||
|
@ -88,14 +88,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from langchain.document_loaders.airbyte import AirbyteShopifyLoader\n",
|
||||
"\n",
|
||||
"config = {\n",
|
||||
" # your shopify configuration\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"loader = AirbyteShopifyLoader(config=config, stream_name=\"orders\") # check the documentation linked above for a list of all streams"
|
||||
"loader = AirbyteShopifyLoader(\n",
|
||||
" config=config, stream_name=\"orders\"\n",
|
||||
") # check the documentation linked above for a list of all streams"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -151,10 +152,14 @@
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
" return Document(page_content=record.data[\"title\"], metadata=record.data)\n",
|
||||
"\n",
|
||||
"loader = AirbyteShopifyLoader(config=config, record_handler=handle_record, stream_name=\"orders\")\n",
|
||||
"\n",
|
||||
"loader = AirbyteShopifyLoader(\n",
|
||||
" config=config, record_handler=handle_record, stream_name=\"orders\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
@ -179,7 +184,9 @@
|
||||
"source": [
|
||||
"last_state = loader.last_state # store safely\n",
|
||||
"\n",
|
||||
"incremental_loader = AirbyteShopifyLoader(config=config, stream_name=\"orders\", state=last_state)\n",
|
||||
"incremental_loader = AirbyteShopifyLoader(\n",
|
||||
" config=config, stream_name=\"orders\", state=last_state\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"new_docs = incremental_loader.load()"
|
||||
]
|
||||
|
@ -85,14 +85,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from langchain.document_loaders.airbyte import AirbyteStripeLoader\n",
|
||||
"\n",
|
||||
"config = {\n",
|
||||
" # your stripe configuration\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"loader = AirbyteStripeLoader(config=config, stream_name=\"invoices\") # check the documentation linked above for a list of all streams"
|
||||
"loader = AirbyteStripeLoader(\n",
|
||||
" config=config, stream_name=\"invoices\"\n",
|
||||
") # check the documentation linked above for a list of all streams"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -148,10 +149,14 @@
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
" return Document(page_content=record.data[\"title\"], metadata=record.data)\n",
|
||||
"\n",
|
||||
"loader = AirbyteStripeLoader(config=config, record_handler=handle_record, stream_name=\"invoices\")\n",
|
||||
"\n",
|
||||
"loader = AirbyteStripeLoader(\n",
|
||||
" config=config, record_handler=handle_record, stream_name=\"invoices\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
@ -176,7 +181,12 @@
|
||||
"source": [
|
||||
"last_state = loader.last_state # store safely\n",
|
||||
"\n",
|
||||
"incremental_loader = AirbyteStripeLoader(config=config, record_handler=handle_record, stream_name=\"invoices\", state=last_state)\n",
|
||||
"incremental_loader = AirbyteStripeLoader(\n",
|
||||
" config=config,\n",
|
||||
" record_handler=handle_record,\n",
|
||||
" stream_name=\"invoices\",\n",
|
||||
" state=last_state,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"new_docs = incremental_loader.load()"
|
||||
]
|
||||
|
@ -88,14 +88,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from langchain.document_loaders.airbyte import AirbyteTypeformLoader\n",
|
||||
"\n",
|
||||
"config = {\n",
|
||||
" # your typeform configuration\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"loader = AirbyteTypeformLoader(config=config, stream_name=\"forms\") # check the documentation linked above for a list of all streams"
|
||||
"loader = AirbyteTypeformLoader(\n",
|
||||
" config=config, stream_name=\"forms\"\n",
|
||||
") # check the documentation linked above for a list of all streams"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -151,10 +152,14 @@
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
" return Document(page_content=record.data[\"title\"], metadata=record.data)\n",
|
||||
"\n",
|
||||
"loader = AirbyteTypeformLoader(config=config, record_handler=handle_record, stream_name=\"forms\")\n",
|
||||
"\n",
|
||||
"loader = AirbyteTypeformLoader(\n",
|
||||
" config=config, record_handler=handle_record, stream_name=\"forms\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
@ -179,7 +184,9 @@
|
||||
"source": [
|
||||
"last_state = loader.last_state # store safely\n",
|
||||
"\n",
|
||||
"incremental_loader = AirbyteTypeformLoader(config=config, record_handler=handle_record, stream_name=\"forms\", state=last_state)\n",
|
||||
"incremental_loader = AirbyteTypeformLoader(\n",
|
||||
" config=config, record_handler=handle_record, stream_name=\"forms\", state=last_state\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"new_docs = incremental_loader.load()"
|
||||
]
|
||||
|
@ -89,14 +89,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from langchain.document_loaders.airbyte import AirbyteZendeskSupportLoader\n",
|
||||
"\n",
|
||||
"config = {\n",
|
||||
" # your zendesk-support configuration\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"loader = AirbyteZendeskSupportLoader(config=config, stream_name=\"tickets\") # check the documentation linked above for a list of all streams"
|
||||
"loader = AirbyteZendeskSupportLoader(\n",
|
||||
" config=config, stream_name=\"tickets\"\n",
|
||||
") # check the documentation linked above for a list of all streams"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -152,10 +153,14 @@
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
" return Document(page_content=record.data[\"title\"], metadata=record.data)\n",
|
||||
"\n",
|
||||
"loader = AirbyteZendeskSupportLoader(config=config, record_handler=handle_record, stream_name=\"tickets\")\n",
|
||||
"\n",
|
||||
"loader = AirbyteZendeskSupportLoader(\n",
|
||||
" config=config, record_handler=handle_record, stream_name=\"tickets\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
@ -180,7 +185,9 @@
|
||||
"source": [
|
||||
"last_state = loader.last_state # store safely\n",
|
||||
"\n",
|
||||
"incremental_loader = AirbyteZendeskSupportLoader(config=config, stream_name=\"tickets\", state=last_state)\n",
|
||||
"incremental_loader = AirbyteZendeskSupportLoader(\n",
|
||||
" config=config, stream_name=\"tickets\", state=last_state\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"new_docs = incremental_loader.load()"
|
||||
]
|
||||
|
@ -305,7 +305,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].metadata['geometry']"
|
||||
"docs[0].metadata[\"geometry\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -177,15 +177,11 @@
|
||||
"source": [
|
||||
"import assemblyai as aai\n",
|
||||
"\n",
|
||||
"config = aai.TranscriptionConfig(speaker_labels=True,\n",
|
||||
" auto_chapters=True,\n",
|
||||
" entity_detection=True\n",
|
||||
"config = aai.TranscriptionConfig(\n",
|
||||
" speaker_labels=True, auto_chapters=True, entity_detection=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"loader = AssemblyAIAudioTranscriptLoader(\n",
|
||||
" file_path=\"./your_file.mp3\",\n",
|
||||
" config=config\n",
|
||||
")"
|
||||
"loader = AssemblyAIAudioTranscriptLoader(file_path=\"./your_file.mp3\", config=config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -204,8 +200,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = AssemblyAIAudioTranscriptLoader(\n",
|
||||
" file_path=\"./your_file.mp3\",\n",
|
||||
" api_key=\"YOUR_KEY\"\n",
|
||||
" file_path=\"./your_file.mp3\", api_key=\"YOUR_KEY\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
|
@ -46,6 +46,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.document_loaders import AsyncChromiumLoader\n",
|
||||
"\n",
|
||||
"urls = [\"https://www.wsj.com\"]\n",
|
||||
"loader = AsyncChromiumLoader(urls)\n",
|
||||
"docs = loader.load()\n",
|
||||
@ -71,6 +72,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.document_transformers import Html2TextTransformer\n",
|
||||
"\n",
|
||||
"html2text = Html2TextTransformer()\n",
|
||||
"docs_transformed = html2text.transform_documents(docs)\n",
|
||||
"docs_transformed[0].page_content[0:500]"
|
||||
|
@ -119,7 +119,9 @@
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = S3DirectoryLoader(\"testing-hwc\", aws_access_key_id=\"xxxx\", aws_secret_access_key=\"yyyy\")"
|
||||
"loader = S3DirectoryLoader(\n",
|
||||
" \"testing-hwc\", aws_access_key_id=\"xxxx\", aws_secret_access_key=\"yyyy\"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {},
|
||||
"id": "f485ec8c"
|
||||
|
@ -82,7 +82,9 @@
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = S3FileLoader(\"testing-hwc\", \"fake.docx\", aws_access_key_id=\"xxxx\", aws_secret_access_key=\"yyyy\")"
|
||||
"loader = S3FileLoader(\n",
|
||||
" \"testing-hwc\", \"fake.docx\", aws_access_key_id=\"xxxx\", aws_secret_access_key=\"yyyy\"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {},
|
||||
"id": "43106ee8"
|
||||
|
@ -80,10 +80,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.pdf import DocumentIntelligenceLoader\n",
|
||||
"\n",
|
||||
"loader = DocumentIntelligenceLoader(\n",
|
||||
" \"<Local_filename>\",\n",
|
||||
" client=document_analysis_client,\n",
|
||||
" model=\"<model_name>\") # e.g. prebuilt-document\n",
|
||||
" \"<Local_filename>\", client=document_analysis_client, model=\"<model_name>\"\n",
|
||||
") # e.g. prebuilt-document\n",
|
||||
"\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
|
@ -27,7 +27,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = ConcurrentLoader.from_filesystem('example_data/', glob=\"**/*.txt\")"
|
||||
"loader = ConcurrentLoader.from_filesystem(\"example_data/\", glob=\"**/*.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -117,27 +117,27 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Given string containing page content\n",
|
||||
"page_content = 'Users View City, None'\n",
|
||||
"page_content = \"Users View City, None\"\n",
|
||||
"\n",
|
||||
"# Given dictionary containing metadata\n",
|
||||
"metadata = {\n",
|
||||
" 'table_name': 'users_view',\n",
|
||||
" 'column_name': 'users_view.city',\n",
|
||||
" 'column_data_type': 'string',\n",
|
||||
" 'column_title': 'Users View City',\n",
|
||||
" 'column_description': 'None',\n",
|
||||
" 'column_member_type': 'dimension',\n",
|
||||
" 'column_values': [\n",
|
||||
" 'Austin',\n",
|
||||
" 'Chicago',\n",
|
||||
" 'Los Angeles',\n",
|
||||
" 'Mountain View',\n",
|
||||
" 'New York',\n",
|
||||
" 'Palo Alto',\n",
|
||||
" 'San Francisco',\n",
|
||||
" 'Seattle'\n",
|
||||
" \"table_name\": \"users_view\",\n",
|
||||
" \"column_name\": \"users_view.city\",\n",
|
||||
" \"column_data_type\": \"string\",\n",
|
||||
" \"column_title\": \"Users View City\",\n",
|
||||
" \"column_description\": \"None\",\n",
|
||||
" \"column_member_type\": \"dimension\",\n",
|
||||
" \"column_values\": [\n",
|
||||
" \"Austin\",\n",
|
||||
" \"Chicago\",\n",
|
||||
" \"Los Angeles\",\n",
|
||||
" \"Mountain View\",\n",
|
||||
" \"New York\",\n",
|
||||
" \"Palo Alto\",\n",
|
||||
" \"San Francisco\",\n",
|
||||
" \"Seattle\",\n",
|
||||
" ],\n",
|
||||
" 'cube_data_obj_type': 'view'\n",
|
||||
" \"cube_data_obj_type\": \"view\",\n",
|
||||
"}"
|
||||
]
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user