mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-12 12:11:34 +00:00
Compare commits
238 Commits
eugene/pyd
...
langchain-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
093ae04d58 | ||
|
|
ff0c06b1e5 | ||
|
|
e271f75bee | ||
|
|
c6660df58e | ||
|
|
aa6415aa7d | ||
|
|
226802f0c4 | ||
|
|
01783d67fc | ||
|
|
616d06d7fe | ||
|
|
5564d9e404 | ||
|
|
9f791b6ad5 | ||
|
|
74c4cbb859 | ||
|
|
ddfbca38df | ||
|
|
931b41b30f | ||
|
|
6a66d8e2ca | ||
|
|
858ce264ef | ||
|
|
55705c0f5e | ||
|
|
62c8a67f56 | ||
|
|
3e835a1aa1 | ||
|
|
39f6c4169d | ||
|
|
e25a5966b5 | ||
|
|
a56ff199a7 | ||
|
|
60ba02f5db | ||
|
|
70761af8cf | ||
|
|
bf839676c7 | ||
|
|
f01f12ce1e | ||
|
|
7a0b36501f | ||
|
|
3b7b276f6f | ||
|
|
6605ae22f6 | ||
|
|
c2b2e3266c | ||
|
|
c5e0acf6f0 | ||
|
|
aacc6198b9 | ||
|
|
8235bae48e | ||
|
|
5ee6e22983 | ||
|
|
bd4b68cd54 | ||
|
|
d96f67b06f | ||
|
|
14f0cdad58 | ||
|
|
893299c3c9 | ||
|
|
dd25d08c06 | ||
|
|
158701ab3c | ||
|
|
a54deba6bc | ||
|
|
c6b7db6587 | ||
|
|
722c8f50ea | ||
|
|
56ac94e014 | ||
|
|
ea96133890 | ||
|
|
e2304ebcdb | ||
|
|
c437b1aab7 | ||
|
|
42a379c75c | ||
|
|
3e7bb7690c | ||
|
|
19356b6445 | ||
|
|
9ff249a38d | ||
|
|
892bd4c29b | ||
|
|
ada03dd273 | ||
|
|
e09c6bb58b | ||
|
|
1c661fd849 | ||
|
|
7a0af56177 | ||
|
|
6838804116 | ||
|
|
570d45b2a1 | ||
|
|
9944ad7f5f | ||
|
|
764f1958dd | ||
|
|
c374c98389 | ||
|
|
af65cac609 | ||
|
|
79a64207f5 | ||
|
|
c8c67dde6f | ||
|
|
fbeeb6da75 | ||
|
|
551640a030 | ||
|
|
c4f2bc9540 | ||
|
|
32966a08a9 | ||
|
|
9ef15691d6 | ||
|
|
338180f383 | ||
|
|
513e491ce9 | ||
|
|
694ae87748 | ||
|
|
c816d03699 | ||
|
|
8171efd07a | ||
|
|
b61de9728e | ||
|
|
c72bcda4f2 | ||
|
|
9a877c7adb | ||
|
|
4a77a3ab19 | ||
|
|
181a61982f | ||
|
|
f40b2c6f9d | ||
|
|
d1b7a934aa | ||
|
|
83643cbdfe | ||
|
|
b5e2ba3a47 | ||
|
|
e4279f80cd | ||
|
|
984c7a9d42 | ||
|
|
8e89178047 | ||
|
|
73c76b9628 | ||
|
|
7114aed78f | ||
|
|
e002c855bd | ||
|
|
c417803908 | ||
|
|
4160b700e6 | ||
|
|
1055b9a309 | ||
|
|
46c9784127 | ||
|
|
712aa0c529 | ||
|
|
f9a6d5c845 | ||
|
|
8bd368d07e | ||
|
|
75e966a2fa | ||
|
|
d1cdde267a | ||
|
|
ada1e5cc64 | ||
|
|
41e232cb82 | ||
|
|
57783c5e55 | ||
|
|
5bc774827b | ||
|
|
7234fd0f51 | ||
|
|
bcbb43480c | ||
|
|
bae82e966a | ||
|
|
a766815a99 | ||
|
|
8f7cc73817 | ||
|
|
77209f315e | ||
|
|
ce0b0f22a1 | ||
|
|
869523ad72 | ||
|
|
42257b120f | ||
|
|
345fd3a556 | ||
|
|
034257e9bf | ||
|
|
e832bbb486 | ||
|
|
b626c3ca23 | ||
|
|
e01e5d5a91 | ||
|
|
12eff6a130 | ||
|
|
cb654a3245 | ||
|
|
45b394268c | ||
|
|
00ad197502 | ||
|
|
276be6cdd4 | ||
|
|
d04e899b56 | ||
|
|
b6bf2bb234 | ||
|
|
5dbbdcbf8e | ||
|
|
265e650e64 | ||
|
|
32ba8cfab0 | ||
|
|
74e705250f | ||
|
|
3d6e8547f9 | ||
|
|
a1268d9e9a | ||
|
|
513f1d8037 | ||
|
|
08c466c603 | ||
|
|
23c22fcbc9 | ||
|
|
b45bf78d2e | ||
|
|
8203c1ff87 | ||
|
|
936aedd10c | ||
|
|
20e3662acf | ||
|
|
9212c9fcb8 | ||
|
|
86e8224cf1 | ||
|
|
232908a46d | ||
|
|
84dc2dd059 | ||
|
|
71811e0547 | ||
|
|
36cad5d25c | ||
|
|
912751e268 | ||
|
|
0908b01cb2 | ||
|
|
ce4e29ae42 | ||
|
|
ad101adec8 | ||
|
|
27b9ea14a5 | ||
|
|
1710423de3 | ||
|
|
7e115da16c | ||
|
|
55bd8e582b | ||
|
|
89804c3026 | ||
|
|
7f180f996b | ||
|
|
ea43f40daf | ||
|
|
2aaf86ddae | ||
|
|
5a7eac191a | ||
|
|
05d31a2f00 | ||
|
|
3237909221 | ||
|
|
2b5631a6be | ||
|
|
f0f4532579 | ||
|
|
cb79e80b0b | ||
|
|
d92f2251c8 | ||
|
|
76a193decc | ||
|
|
34edfe4a16 | ||
|
|
9eacce9356 | ||
|
|
4197c9c85f | ||
|
|
e4183cbc4e | ||
|
|
c3cce98d86 | ||
|
|
86a3f6edf1 | ||
|
|
f9fdca6cc2 | ||
|
|
058a64c563 | ||
|
|
28e956735c | ||
|
|
6f54abc252 | ||
|
|
2d4689d721 | ||
|
|
5ba1899cd7 | ||
|
|
3f76c9e908 | ||
|
|
c1fced9269 | ||
|
|
8f019e91d7 | ||
|
|
9aabb446c5 | ||
|
|
f2f0e0e13d | ||
|
|
6c07eb0c12 | ||
|
|
9b3ce16982 | ||
|
|
9e03864d64 | ||
|
|
02ff78deb8 | ||
|
|
c3a8716589 | ||
|
|
f32d57f6f0 | ||
|
|
235d91940d | ||
|
|
344adad056 | ||
|
|
be79ce9336 | ||
|
|
57c1239643 | ||
|
|
fe2e5a3b74 | ||
|
|
a24a9c6427 | ||
|
|
4367e89c9a | ||
|
|
28f744c1f5 | ||
|
|
835926153b | ||
|
|
035a9c9609 | ||
|
|
67e58fdc2e | ||
|
|
6b8963ad92 | ||
|
|
aa49163bdf | ||
|
|
ffe75d1e46 | ||
|
|
51005e2776 | ||
|
|
2904c50cd5 | ||
|
|
80560419b0 | ||
|
|
b57aa89f34 | ||
|
|
f26ab93df8 | ||
|
|
c1ef731503 | ||
|
|
05bf98b2f9 | ||
|
|
3999761201 | ||
|
|
e08879147b | ||
|
|
0d495f3f63 | ||
|
|
e0e40f3f63 | ||
|
|
feb73d4281 | ||
|
|
17b486a37b | ||
|
|
02495ae7c5 | ||
|
|
51942c03eb | ||
|
|
95883a99a9 | ||
|
|
12ddb4fc6f | ||
|
|
cfed68e06f | ||
|
|
1925bde32e | ||
|
|
35f4aa927b | ||
|
|
f23bec7be6 | ||
|
|
abb0cecb44 | ||
|
|
db7e7b69e3 | ||
|
|
8b40428f58 | ||
|
|
ba3e219d83 | ||
|
|
234394f631 | ||
|
|
5fc5ed463c | ||
|
|
148088a588 | ||
|
|
ef868bc24b | ||
|
|
62f13f95e4 | ||
|
|
29064848f9 | ||
|
|
c040dc7017 | ||
|
|
24fa17593f | ||
|
|
584a1e30ac | ||
|
|
1a911018bc | ||
|
|
67012c2558 | ||
|
|
af129974a3 | ||
|
|
51a0d4574e | ||
|
|
b2daba37c7 | ||
|
|
14f3014cce |
@@ -10,7 +10,7 @@ You can use the dev container configuration in this folder to build and run the
|
||||
You may use the button above, or follow these steps to open this repo in a Codespace:
|
||||
1. Click the **Code** drop-down menu at the top of https://github.com/langchain-ai/langchain.
|
||||
1. Click on the **Codespaces** tab.
|
||||
1. Click **Create codespace on master** .
|
||||
1. Click **Create codespace on master**.
|
||||
|
||||
For more info, check out the [GitHub documentation](https://docs.github.com/en/free-pro-team@latest/github/developing-online-with-codespaces/creating-a-codespace#creating-a-codespace).
|
||||
|
||||
|
||||
42
.github/scripts/check_diff.py
vendored
42
.github/scripts/check_diff.py
vendored
@@ -1,7 +1,11 @@
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
from typing import Dict
|
||||
from typing import Dict, List, Set
|
||||
|
||||
import tomllib
|
||||
from collections import defaultdict
|
||||
import glob
|
||||
|
||||
LANGCHAIN_DIRS = [
|
||||
"libs/core",
|
||||
@@ -11,6 +15,34 @@ LANGCHAIN_DIRS = [
|
||||
"libs/experimental",
|
||||
]
|
||||
|
||||
def dependents_graph() -> dict:
|
||||
dependents = defaultdict(set)
|
||||
|
||||
for path in glob.glob("./libs/**/pyproject.toml", recursive=True):
|
||||
if "template" in path:
|
||||
continue
|
||||
with open(path, "rb") as f:
|
||||
pyproject = tomllib.load(f)['tool']['poetry']
|
||||
pkg_dir = "libs" + "/".join(path.split("libs")[1].split("/")[:-1])
|
||||
for dep in pyproject['dependencies']:
|
||||
if "langchain" in dep:
|
||||
dependents[dep].add(pkg_dir)
|
||||
return dependents
|
||||
|
||||
|
||||
def add_dependents(dirs_to_eval: Set[str], dependents: dict) -> List[str]:
|
||||
updated = set()
|
||||
for dir_ in dirs_to_eval:
|
||||
# handle core manually because it has so many dependents
|
||||
if "core" in dir_:
|
||||
updated.add(dir_)
|
||||
continue
|
||||
pkg = "langchain-" + dir_.split("/")[-1]
|
||||
updated.update(dependents[pkg])
|
||||
updated.add(dir_)
|
||||
return list(updated)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
files = sys.argv[1:]
|
||||
|
||||
@@ -81,11 +113,13 @@ if __name__ == "__main__":
|
||||
docs_edited = True
|
||||
dirs_to_run["lint"].add(".")
|
||||
|
||||
dependents = dependents_graph()
|
||||
|
||||
outputs = {
|
||||
"dirs-to-lint": list(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"]
|
||||
"dirs-to-lint": add_dependents(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
|
||||
),
|
||||
"dirs-to-test": list(dirs_to_run["test"] | dirs_to_run["extended-test"]),
|
||||
"dirs-to-test": add_dependents(dirs_to_run["test"] | dirs_to_run["extended-test"], dependents),
|
||||
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
|
||||
"docs-edited": "true" if docs_edited else "",
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "poetry run pytest -m compile tests/integration_tests #${{ matrix.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
1
.github/workflows/_dependencies.yml
vendored
1
.github/workflows/_dependencies.yml
vendored
@@ -28,6 +28,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: dependency checks ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
8
.github/workflows/_integration_test.yml
vendored
8
.github/workflows/_integration_test.yml
vendored
@@ -12,7 +12,6 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
environment: Scheduled testing
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
@@ -53,8 +52,15 @@ jobs:
|
||||
shell: bash
|
||||
env:
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
2
.github/workflows/_lint.yml
vendored
2
.github/workflows/_lint.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
# so linting on fewer versions makes CI faster.
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
||||
1
.github/workflows/_test.yml
vendored
1
.github/workflows/_test.yml
vendored
@@ -28,6 +28,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "make test #${{ matrix.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
2
.github/workflows/_test_doc_imports.yml
vendored
2
.github/workflows/_test_doc_imports.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "check doc imports #${{ matrix.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
1
.github/workflows/check-broken-links.yml
vendored
1
.github/workflows/check-broken-links.yml
vendored
@@ -7,6 +7,7 @@ on:
|
||||
|
||||
jobs:
|
||||
check-links:
|
||||
if: github.repository_owner == 'langchain-ai'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
7
.github/workflows/check_diffs.yml
vendored
7
.github/workflows/check_diffs.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: '3.11'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
- id: set-matrix
|
||||
@@ -104,6 +104,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
@@ -123,7 +124,9 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing --with test
|
||||
poetry install --with test
|
||||
poetry run pip install uv
|
||||
poetry run uv pip install -r extended_testing_deps.txt
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
31
.github/workflows/check_new_docs.yml
vendored
Normal file
31
.github/workflows/check_new_docs.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
name: Integration docs lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
- name: Check new docs
|
||||
run: |
|
||||
python docs/scripts/check_templates.py ${{ steps.files.outputs.added }}
|
||||
9
.github/workflows/scheduled_test.yml
vendored
9
.github/workflows/scheduled_test.yml
vendored
@@ -10,6 +10,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository_owner == 'langchain-ai'
|
||||
name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
@@ -30,7 +31,6 @@ jobs:
|
||||
- "libs/partners/google-vertexai"
|
||||
- "libs/partners/google-genai"
|
||||
- "libs/partners/aws"
|
||||
- "libs/partners/nvidia-ai-endpoints"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -40,10 +40,6 @@ jobs:
|
||||
with:
|
||||
repository: langchain-ai/langchain-google
|
||||
path: langchain-google
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-nvidia
|
||||
path: langchain-nvidia
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-cohere
|
||||
@@ -58,11 +54,9 @@ jobs:
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/nvidia-ai-endpoints \
|
||||
langchain/libs/partners/cohere
|
||||
mv langchain-google/libs/genai langchain/libs/partners/google-genai
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-nvidia/libs/ai-endpoints langchain/libs/partners/nvidia-ai-endpoints
|
||||
mv langchain-cohere/libs/cohere langchain/libs/partners/cohere
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
@@ -122,7 +116,6 @@ jobs:
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/nvidia-ai-endpoints \
|
||||
langchain/libs/partners/cohere \
|
||||
langchain/libs/partners/aws
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ Retrieval Augmented Generation involves [loading data](https://python.langchain.
|
||||
|
||||
**🤖 Agents**
|
||||
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete done. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents) along with the [LangGraph](https://github.com/langchain-ai/langgraph) extension for building custom agents.
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents) along with the [LangGraph](https://github.com/langchain-ai/langgraph) extension for building custom agents.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# Needed synce jupyter runs an async eventloop\n",
|
||||
"# Needed since jupyter runs an async eventloop\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -273,7 +273,7 @@
|
||||
"source": [
|
||||
"# Tool schema for querying SQL db\n",
|
||||
"class create_df_from_sql(BaseModel):\n",
|
||||
" \"\"\"Execute a PostgreSQL SELECT statement and use the results to create a DataFrame with the given colum names.\"\"\"\n",
|
||||
" \"\"\"Execute a PostgreSQL SELECT statement and use the results to create a DataFrame with the given column names.\"\"\"\n",
|
||||
"\n",
|
||||
" select_query: str = Field(..., description=\"A PostgreSQL SELECT statement.\")\n",
|
||||
" # We're going to convert the results to a Pandas DataFrame that we pass\n",
|
||||
|
||||
497
cookbook/nomic_multimodal_rag.ipynb
Normal file
497
cookbook/nomic_multimodal_rag.ipynb
Normal file
@@ -0,0 +1,497 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9fc3897d-176f-4729-8fd1-cfb4add53abd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Nomic multi-modal RAG\n",
|
||||
"\n",
|
||||
"Many documents contain a mixture of content types, including text and images. \n",
|
||||
"\n",
|
||||
"Yet, information captured in images is lost in most RAG applications.\n",
|
||||
"\n",
|
||||
"With the emergence of multimodal LLMs, like [GPT-4V](https://openai.com/research/gpt-4v-system-card), it is worth considering how to utilize images in RAG:\n",
|
||||
"\n",
|
||||
"In this demo we\n",
|
||||
"\n",
|
||||
"* Use multimodal embeddings from Nomic Embed [Vision](https://huggingface.co/nomic-ai/nomic-embed-vision-v1.5) and [Text](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) to embed images and text\n",
|
||||
"* Retrieve both using similarity search\n",
|
||||
"* Pass raw images and text chunks to a multimodal LLM for answer synthesis \n",
|
||||
"\n",
|
||||
"## Signup\n",
|
||||
"\n",
|
||||
"Get your API token, then run:\n",
|
||||
"```\n",
|
||||
"! nomic login\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Then run with your generated API token \n",
|
||||
"```\n",
|
||||
"! nomic login < token > \n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Packages\n",
|
||||
"\n",
|
||||
"For `unstructured`, you will also need `poppler` ([installation instructions](https://pdf2image.readthedocs.io/en/latest/installation.html)) and `tesseract` ([installation instructions](https://tesseract-ocr.github.io/tessdoc/Installation.html)) in your system."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "54926b9b-75c2-4cd4-8f14-b3882a0d370b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! nomic login token"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "febbc459-ebba-4c1a-a52b-fed7731593f8",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "acbdc603-39e2-4a5f-836c-2bbaecd46b0b",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# lock to 0.10.19 due to a persistent bug in more recent versions\n",
|
||||
"! pip install \"unstructured[all-docs]==0.10.19\" pillow pydantic lxml pillow matplotlib tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e94b3fb-8e3e-4736-be0a-ad881626c7bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data Loading\n",
|
||||
"\n",
|
||||
"### Partition PDF text and images\n",
|
||||
" \n",
|
||||
"Let's look at an example pdfs containing interesting images.\n",
|
||||
"\n",
|
||||
"1/ Art from the J Paul Getty museum:\n",
|
||||
"\n",
|
||||
" * Here is a [zip file](https://drive.google.com/file/d/18kRKbq2dqAhhJ3DfZRnYcTBEUfYxe1YR/view?usp=sharing) with the PDF and the already extracted images. \n",
|
||||
"* https://www.getty.edu/publications/resources/virtuallibrary/0892360224.pdf\n",
|
||||
"\n",
|
||||
"2/ Famous photographs from library of congress:\n",
|
||||
"\n",
|
||||
"* https://www.loc.gov/lcm/pdf/LCM_2020_1112.pdf\n",
|
||||
"* We'll use this as an example below\n",
|
||||
"\n",
|
||||
"We can use `partition_pdf` below from [Unstructured](https://unstructured-io.github.io/unstructured/introduction.html#key-concepts) to extract text and images.\n",
|
||||
"\n",
|
||||
"To supply this to extract the images:\n",
|
||||
"```\n",
|
||||
"extract_images_in_pdf=True\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"If using this zip file, then you can simply process the text only with:\n",
|
||||
"```\n",
|
||||
"extract_images_in_pdf=False\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9646b524-71a7-4b2a-bdc8-0b81f77e968f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Folder with pdf and extracted images\n",
|
||||
"from pathlib import Path\n",
|
||||
"\n",
|
||||
"# replace with actual path to images\n",
|
||||
"path = Path(\"../art\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "77f096ab-a933-41d0-8f4e-1efc83998fc3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"path.resolve()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc4839c0-8773-4a07-ba59-5364501269b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Extract images, tables, and chunk text\n",
|
||||
"from unstructured.partition.pdf import partition_pdf\n",
|
||||
"\n",
|
||||
"raw_pdf_elements = partition_pdf(\n",
|
||||
" filename=str(path.resolve()) + \"/getty.pdf\",\n",
|
||||
" extract_images_in_pdf=False,\n",
|
||||
" infer_table_structure=True,\n",
|
||||
" chunking_strategy=\"by_title\",\n",
|
||||
" max_characters=4000,\n",
|
||||
" new_after_n_chars=3800,\n",
|
||||
" combine_text_under_n_chars=2000,\n",
|
||||
" image_output_dir_path=path,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "969545ad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Categorize text elements by type\n",
|
||||
"tables = []\n",
|
||||
"texts = []\n",
|
||||
"for element in raw_pdf_elements:\n",
|
||||
" if \"unstructured.documents.elements.Table\" in str(type(element)):\n",
|
||||
" tables.append(str(element))\n",
|
||||
" elif \"unstructured.documents.elements.CompositeElement\" in str(type(element)):\n",
|
||||
" texts.append(str(element))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5d8e6349-1547-4cbf-9c6f-491d8610ec10",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal embeddings with our document\n",
|
||||
"\n",
|
||||
"We will use [nomic-embed-vision-v1.5](https://huggingface.co/nomic-ai/nomic-embed-vision-v1.5) embeddings. This model is aligned \n",
|
||||
"to [nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) allowing for multimodal semantic search and Multimodal RAG!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4bc15842-cb95-4f84-9eb5-656b0282a800",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"import numpy as np\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
"from PIL import Image as _PILImage\n",
|
||||
"\n",
|
||||
"# Create chroma\n",
|
||||
"text_vectorstore = Chroma(\n",
|
||||
" collection_name=\"mm_rag_clip_photos_text\",\n",
|
||||
" embedding_function=NomicEmbeddings(\n",
|
||||
" vision_model=\"nomic-embed-vision-v1.5\", model=\"nomic-embed-text-v1.5\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"image_vectorstore = Chroma(\n",
|
||||
" collection_name=\"mm_rag_clip_photos_image\",\n",
|
||||
" embedding_function=NomicEmbeddings(\n",
|
||||
" vision_model=\"nomic-embed-vision-v1.5\", model=\"nomic-embed-text-v1.5\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Get image URIs with .jpg extension only\n",
|
||||
"image_uris = sorted(\n",
|
||||
" [\n",
|
||||
" os.path.join(path, image_name)\n",
|
||||
" for image_name in os.listdir(path)\n",
|
||||
" if image_name.endswith(\".jpg\")\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Add images\n",
|
||||
"image_vectorstore.add_images(uris=image_uris)\n",
|
||||
"\n",
|
||||
"# Add documents\n",
|
||||
"text_vectorstore.add_texts(texts=texts)\n",
|
||||
"\n",
|
||||
"# Make retriever\n",
|
||||
"image_retriever = image_vectorstore.as_retriever()\n",
|
||||
"text_retriever = text_vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "02a186d0-27e0-4820-8092-63b5349dd25d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## RAG\n",
|
||||
"\n",
|
||||
"`vectorstore.add_images` will store / retrieve images as base64 encoded strings.\n",
|
||||
"\n",
|
||||
"These can be passed to [GPT-4V](https://platform.openai.com/docs/guides/vision)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "344f56a8-0dc3-433e-851c-3f7600c7a72b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"import io\n",
|
||||
"from io import BytesIO\n",
|
||||
"\n",
|
||||
"import numpy as np\n",
|
||||
"from PIL import Image\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def resize_base64_image(base64_string, size=(128, 128)):\n",
|
||||
" \"\"\"\n",
|
||||
" Resize an image encoded as a Base64 string.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" base64_string (str): Base64 string of the original image.\n",
|
||||
" size (tuple): Desired size of the image as (width, height).\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" str: Base64 string of the resized image.\n",
|
||||
" \"\"\"\n",
|
||||
" # Decode the Base64 string\n",
|
||||
" img_data = base64.b64decode(base64_string)\n",
|
||||
" img = Image.open(io.BytesIO(img_data))\n",
|
||||
"\n",
|
||||
" # Resize the image\n",
|
||||
" resized_img = img.resize(size, Image.LANCZOS)\n",
|
||||
"\n",
|
||||
" # Save the resized image to a bytes buffer\n",
|
||||
" buffered = io.BytesIO()\n",
|
||||
" resized_img.save(buffered, format=img.format)\n",
|
||||
"\n",
|
||||
" # Encode the resized image to Base64\n",
|
||||
" return base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def is_base64(s):\n",
|
||||
" \"\"\"Check if a string is Base64 encoded\"\"\"\n",
|
||||
" try:\n",
|
||||
" return base64.b64encode(base64.b64decode(s)) == s.encode()\n",
|
||||
" except Exception:\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def split_image_text_types(docs):\n",
|
||||
" \"\"\"Split numpy array images and texts\"\"\"\n",
|
||||
" images = []\n",
|
||||
" text = []\n",
|
||||
" for doc in docs:\n",
|
||||
" doc = doc.page_content # Extract Document contents\n",
|
||||
" if is_base64(doc):\n",
|
||||
" # Resize image to avoid OAI server error\n",
|
||||
" images.append(\n",
|
||||
" resize_base64_image(doc, size=(250, 250))\n",
|
||||
" ) # base64 encoded str\n",
|
||||
" else:\n",
|
||||
" text.append(doc)\n",
|
||||
" return {\"images\": images, \"texts\": text}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "23a2c1d8-fea6-4152-b184-3172dd46c735",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Currently, we format the inputs using a `RunnableLambda` while we add image support to `ChatPromptTemplates`.\n",
|
||||
"\n",
|
||||
"Our runnable follows the classic RAG flow - \n",
|
||||
"\n",
|
||||
"* We first compute the context (both \"texts\" and \"images\" in this case) and the question (just a RunnablePassthrough here) \n",
|
||||
"* Then we pass this into our prompt template, which is a custom function that formats the message for the gpt-4-vision-preview model. \n",
|
||||
"* And finally we parse the output as a string."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5d8919dc-c238-4746-86ba-45d940a7d260",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4c93fab3-74c4-4f1d-958a-0bc4cdd0797e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def prompt_func(data_dict):\n",
|
||||
" # Joining the context texts into a single string\n",
|
||||
" formatted_texts = \"\\n\".join(data_dict[\"text_context\"][\"texts\"])\n",
|
||||
" messages = []\n",
|
||||
"\n",
|
||||
" # Adding image(s) to the messages if present\n",
|
||||
" if data_dict[\"image_context\"][\"images\"]:\n",
|
||||
" image_message = {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\n",
|
||||
" \"url\": f\"data:image/jpeg;base64,{data_dict['image_context']['images'][0]}\"\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" messages.append(image_message)\n",
|
||||
"\n",
|
||||
" # Adding the text message for analysis\n",
|
||||
" text_message = {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": (\n",
|
||||
" \"As an expert art critic and historian, your task is to analyze and interpret images, \"\n",
|
||||
" \"considering their historical and cultural significance. Alongside the images, you will be \"\n",
|
||||
" \"provided with related text to offer context. Both will be retrieved from a vectorstore based \"\n",
|
||||
" \"on user-input keywords. Please use your extensive knowledge and analytical skills to provide a \"\n",
|
||||
" \"comprehensive summary that includes:\\n\"\n",
|
||||
" \"- A detailed description of the visual elements in the image.\\n\"\n",
|
||||
" \"- The historical and cultural context of the image.\\n\"\n",
|
||||
" \"- An interpretation of the image's symbolism and meaning.\\n\"\n",
|
||||
" \"- Connections between the image and the related text.\\n\\n\"\n",
|
||||
" f\"User-provided keywords: {data_dict['question']}\\n\\n\"\n",
|
||||
" \"Text and / or tables:\\n\"\n",
|
||||
" f\"{formatted_texts}\"\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
" messages.append(text_message)\n",
|
||||
"\n",
|
||||
" return [HumanMessage(content=messages)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(temperature=0, model=\"gpt-4-vision-preview\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
"# RAG pipeline\n",
|
||||
"chain = (\n",
|
||||
" {\n",
|
||||
" \"text_context\": text_retriever | RunnableLambda(split_image_text_types),\n",
|
||||
" \"image_context\": image_retriever | RunnableLambda(split_image_text_types),\n",
|
||||
" \"question\": RunnablePassthrough(),\n",
|
||||
" }\n",
|
||||
" | RunnableLambda(prompt_func)\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1566096d-97c2-4ddc-ba4a-6ef88c525e4e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test retrieval and run RAG"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "90121e56-674b-473b-871d-6e4753fd0c45",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import HTML, display\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def plt_img_base64(img_base64):\n",
|
||||
" # Create an HTML img tag with the base64 string as the source\n",
|
||||
" image_html = f'<img src=\"data:image/jpeg;base64,{img_base64}\" />'\n",
|
||||
"\n",
|
||||
" # Display the image by rendering the HTML\n",
|
||||
" display(HTML(image_html))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"docs = text_retriever.invoke(\"Women with children\", k=5)\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
" plt_img_base64(doc.page_content)\n",
|
||||
" else:\n",
|
||||
" print(doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "44eaa532-f035-4c04-b578-02339d42554c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = image_retriever.invoke(\"Women with children\", k=5)\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
" plt_img_base64(doc.page_content)\n",
|
||||
" else:\n",
|
||||
" print(doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "69fb15fd-76fc-49b4-806d-c4db2990027d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain.invoke(\"Women with children\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "227f08b8-e732-4089-b65c-6eb6f9e48f15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see the images retrieved in the LangSmith trace:\n",
|
||||
"\n",
|
||||
"LangSmith [trace](https://smith.langchain.com/public/69c558a5-49dc-4c60-a49b-3adbb70f74c5/r/e872c2c8-528c-468f-aefd-8b5cd730a673)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -86,8 +86,7 @@
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"\n",
|
||||
"# please update with your username, password, hostname and service_name\n",
|
||||
"# please make sure this user has sufficient privileges to perform all below\n",
|
||||
"# Update with your username, password, hostname, and service_name\n",
|
||||
"username = \"\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
@@ -97,40 +96,45 @@
|
||||
" print(\"Connection successful!\")\n",
|
||||
"\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" begin\n",
|
||||
" -- drop user\n",
|
||||
" begin\n",
|
||||
" execute immediate 'drop user testuser cascade';\n",
|
||||
" exception\n",
|
||||
" when others then\n",
|
||||
" dbms_output.put_line('Error setting up user.');\n",
|
||||
" end;\n",
|
||||
" execute immediate 'create user testuser identified by testuser';\n",
|
||||
" execute immediate 'grant connect, unlimited tablespace, create credential, create procedure, create any index to testuser';\n",
|
||||
" execute immediate 'create or replace directory DEMO_PY_DIR as ''/scratch/hroy/view_storage/hroy_devstorage/demo/orachain''';\n",
|
||||
" execute immediate 'grant read, write on directory DEMO_PY_DIR to public';\n",
|
||||
" execute immediate 'grant create mining model to testuser';\n",
|
||||
"\n",
|
||||
" -- network access\n",
|
||||
" begin\n",
|
||||
" DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(\n",
|
||||
" host => '*',\n",
|
||||
" ace => xs$ace_type(privilege_list => xs$name_list('connect'),\n",
|
||||
" principal_name => 'testuser',\n",
|
||||
" principal_type => xs_acl.ptype_db));\n",
|
||||
" end;\n",
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" print(\"User setup done!\")\n",
|
||||
" cursor.close()\n",
|
||||
" try:\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" begin\n",
|
||||
" -- Drop user\n",
|
||||
" begin\n",
|
||||
" execute immediate 'drop user testuser cascade';\n",
|
||||
" exception\n",
|
||||
" when others then\n",
|
||||
" dbms_output.put_line('Error dropping user: ' || SQLERRM);\n",
|
||||
" end;\n",
|
||||
" \n",
|
||||
" -- Create user and grant privileges\n",
|
||||
" execute immediate 'create user testuser identified by testuser';\n",
|
||||
" execute immediate 'grant connect, unlimited tablespace, create credential, create procedure, create any index to testuser';\n",
|
||||
" execute immediate 'create or replace directory DEMO_PY_DIR as ''/scratch/hroy/view_storage/hroy_devstorage/demo/orachain''';\n",
|
||||
" execute immediate 'grant read, write on directory DEMO_PY_DIR to public';\n",
|
||||
" execute immediate 'grant create mining model to testuser';\n",
|
||||
" \n",
|
||||
" -- Network access\n",
|
||||
" begin\n",
|
||||
" DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(\n",
|
||||
" host => '*',\n",
|
||||
" ace => xs$ace_type(privilege_list => xs$name_list('connect'),\n",
|
||||
" principal_name => 'testuser',\n",
|
||||
" principal_type => xs_acl.ptype_db)\n",
|
||||
" );\n",
|
||||
" end;\n",
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" print(\"User setup done!\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"User setup failed with error: {e}\")\n",
|
||||
" finally:\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"User setup failed!\")\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
" print(f\"Connection failed with error: {e}\")\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -38,6 +38,8 @@ generate-files:
|
||||
|
||||
$(PYTHON) scripts/model_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/document_loader_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/copy_templates.py $(INTERMEDIATE_DIR)
|
||||
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O $(INTERMEDIATE_DIR)/langserve.md
|
||||
|
||||
@@ -128,11 +128,11 @@ def _load_package_modules(
|
||||
of the modules/packages are part of the package vs. 3rd party or built-in.
|
||||
|
||||
Parameters:
|
||||
package_directory: Path to the package directory.
|
||||
submodule: Optional name of submodule to load.
|
||||
package_directory (Union[str, Path]): Path to the package directory.
|
||||
submodule (Optional[str]): Optional name of submodule to load.
|
||||
|
||||
Returns:
|
||||
list: A list of loaded module objects.
|
||||
Dict[str, ModuleMembers]: A dictionary where keys are module names and values are ModuleMembers objects.
|
||||
"""
|
||||
package_path = (
|
||||
Path(package_directory)
|
||||
|
||||
@@ -4,6 +4,9 @@ LangChain implements the latest research in the field of Natural Language Proces
|
||||
This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference,
|
||||
Templates, and Cookbooks.
|
||||
|
||||
From the opposite direction, scientists use LangChain in research and reference LangChain in the research papers.
|
||||
Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header).
|
||||
|
||||
## Summary
|
||||
|
||||
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation|
|
||||
@@ -25,17 +28,18 @@ This page contains `arXiv` papers referenced in the LangChain Documentation, API
|
||||
| `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023-03-31 | `Cookbook:` [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
|
||||
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
|
||||
| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)
|
||||
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core.example_selectors...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), `Cookbook:` [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), `Cookbook:` [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022-10-06 | `Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), `API:` [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
|
||||
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community.embeddings...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
|
||||
## Self-Discover: Large Language Models Self-Compose Reasoning Structures
|
||||
@@ -537,7 +541,7 @@ more than 1/1,000th the compute of GPT-4.
|
||||
- **URL:** http://arxiv.org/abs/2301.10226v4
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Potential harms of large language models can be mitigated by watermarking
|
||||
model output, i.e., embedding signals into generated text that are invisible to
|
||||
@@ -562,7 +566,7 @@ family, and discuss robustness and security.
|
||||
- **URL:** http://arxiv.org/abs/2212.10496v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
|
||||
- **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
|
||||
- **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)
|
||||
- **Cookbook:** [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
|
||||
@@ -626,7 +630,7 @@ further work on logical fallacy identification.
|
||||
- **URL:** http://arxiv.org/abs/2211.13892v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_core.example_selectors...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
- **API Reference:** [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in
|
||||
learning from explanations in prompts, but there has been limited understanding
|
||||
@@ -654,7 +658,7 @@ performance across three real-world tasks on multiple LLMs.
|
||||
- **URL:** http://arxiv.org/abs/2211.10435v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)
|
||||
- **API Reference:** [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
|
||||
- **Cookbook:** [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability
|
||||
@@ -680,6 +684,41 @@ accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B
|
||||
which uses chain-of-thought by absolute 15% top-1. Our code and data are
|
||||
publicly available at http://reasonwithpal.com/ .
|
||||
|
||||
## ReAct: Synergizing Reasoning and Acting in Language Models
|
||||
|
||||
- **arXiv id:** 2210.03629v3
|
||||
- **Title:** ReAct: Synergizing Reasoning and Acting in Language Models
|
||||
- **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.
|
||||
- **Published Date:** 2022-10-06
|
||||
- **URL:** http://arxiv.org/abs/2210.03629v3
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)
|
||||
- **API Reference:** [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
|
||||
|
||||
**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities
|
||||
across tasks in language understanding and interactive decision making, their
|
||||
abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g.
|
||||
action plan generation) have primarily been studied as separate topics. In this
|
||||
paper, we explore the use of LLMs to generate both reasoning traces and
|
||||
task-specific actions in an interleaved manner, allowing for greater synergy
|
||||
between the two: reasoning traces help the model induce, track, and update
|
||||
action plans as well as handle exceptions, while actions allow it to interface
|
||||
with external sources, such as knowledge bases or environments, to gather
|
||||
additional information. We apply our approach, named ReAct, to a diverse set of
|
||||
language and decision making tasks and demonstrate its effectiveness over
|
||||
state-of-the-art baselines, as well as improved human interpretability and
|
||||
trustworthiness over methods without reasoning or acting components.
|
||||
Concretely, on question answering (HotpotQA) and fact verification (Fever),
|
||||
ReAct overcomes issues of hallucination and error propagation prevalent in
|
||||
chain-of-thought reasoning by interacting with a simple Wikipedia API, and
|
||||
generates human-like task-solving trajectories that are more interpretable than
|
||||
baselines without reasoning traces. On two interactive decision making
|
||||
benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and
|
||||
reinforcement learning methods by an absolute success rate of 34% and 10%
|
||||
respectively, while being prompted with only one or two in-context examples.
|
||||
Project site with code: https://react-lm.github.io
|
||||
|
||||
## Deep Lake: a Lakehouse for Deep Learning
|
||||
|
||||
- **arXiv id:** 2209.10785v2
|
||||
@@ -717,7 +756,7 @@ TensorFlow, JAX, and integrate with numerous MLOps tools.
|
||||
- **URL:** http://arxiv.org/abs/2205.12654v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community.embeddings...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
- **API Reference:** [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
|
||||
**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent
|
||||
languages is challenging, in particular to cover the long tail of low-resource
|
||||
@@ -746,7 +785,7 @@ encoders, mine bitexts, and validate the bitexts by training NMT systems.
|
||||
- **URL:** http://arxiv.org/abs/2204.00498v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
- **API Reference:** [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
|
||||
**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
|
||||
language model. We find that, without any finetuning, Codex is a strong
|
||||
@@ -765,7 +804,7 @@ few-shot examples.
|
||||
- **URL:** http://arxiv.org/abs/2202.00666v5
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Today's probabilistic language generators fall short when it comes to
|
||||
producing coherent and fluent text despite the fact that the underlying models
|
||||
@@ -829,7 +868,7 @@ https://github.com/OpenAI/CLIP.
|
||||
- **URL:** http://arxiv.org/abs/1909.05858v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Large-scale language models show promising text generation capabilities, but
|
||||
users cannot easily control particular aspects of the generated text. We
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)
|
||||
### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)
|
||||
### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)
|
||||
### [by BobLin (Chinese language)](https://www.youtube.com/playlist?list=PLbd7ntv6PxC3QMFQvtWfk55p-Op_syO1C)
|
||||
|
||||
## Courses
|
||||
|
||||
@@ -45,7 +46,6 @@
|
||||
- [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
|
||||
- [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
|
||||
- [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
|
||||
- [Dive into Langchain (Chinese language)](https://langchain.boblin.app/)
|
||||
|
||||
---------------------
|
||||
|
||||
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
---
|
||||
keywords: [prompt, documents, chatprompttemplate, prompttemplate, invoke, lcel, tool, tools, embedding, embeddings, vector, vectorstore, llm, loader, retriever, retrievers]
|
||||
---
|
||||
|
||||
# Conceptual guide
|
||||
|
||||
import ThemedImage from '@theme/ThemedImage';
|
||||
@@ -15,7 +11,7 @@ LangChain as a framework consists of a number of packages.
|
||||
|
||||
### `langchain-core`
|
||||
This package contains base abstractions of different components and ways to compose them together.
|
||||
The interfaces for core components like LLMs, vectorstores, retrievers and more are defined here.
|
||||
The interfaces for core components like LLMs, vector stores, retrievers and more are defined here.
|
||||
No third party integrations are defined here.
|
||||
The dependencies are kept purposefully very lightweight.
|
||||
|
||||
@@ -34,7 +30,7 @@ All chains, agents, and retrieval strategies here are NOT specific to any one in
|
||||
|
||||
This package contains third party integrations that are maintained by the LangChain community.
|
||||
Key partner packages are separated out (see below).
|
||||
This contains all integrations for various components (LLMs, vectorstores, retrievers).
|
||||
This contains all integrations for various components (LLMs, vector stores, retrievers).
|
||||
All dependencies in this package are optional to keep the package as lightweight as possible.
|
||||
|
||||
### [`langgraph`](https://langchain-ai.github.io/langgraph)
|
||||
@@ -62,6 +58,7 @@ A developer platform that lets you debug, test, evaluate, and monitor LLM applic
|
||||
/>
|
||||
|
||||
## LangChain Expression Language (LCEL)
|
||||
<span data-heading-keywords="lcel"></span>
|
||||
|
||||
LangChain Expression Language, or LCEL, is a declarative way to chain LangChain components.
|
||||
LCEL was designed from day 1 to **support putting prototypes in production, with no code changes**, from the simplest “prompt + LLM” chain to the most complex chains (we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL:
|
||||
@@ -92,15 +89,16 @@ With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.sm
|
||||
Any chain created with LCEL can be easily deployed using [LangServe](/docs/langserve).
|
||||
|
||||
### Runnable interface
|
||||
<span data-heading-keywords="invoke"></span>
|
||||
|
||||
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
|
||||
|
||||
This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way.
|
||||
The standard interface includes:
|
||||
|
||||
- [`stream`](#stream): stream back chunks of the response
|
||||
- [`invoke`](#invoke): call the chain on an input
|
||||
- [`batch`](#batch): call the chain on a list of inputs
|
||||
- `stream`: stream back chunks of the response
|
||||
- `invoke`: call the chain on an input
|
||||
- `batch`: call the chain on a list of inputs
|
||||
|
||||
These also have corresponding async methods that should be used with [asyncio](https://docs.python.org/3/library/asyncio.html) `await` syntax for concurrency:
|
||||
|
||||
@@ -132,16 +130,17 @@ LangChain provides standard, extendable interfaces and external integrations for
|
||||
Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix.
|
||||
|
||||
### Chat models
|
||||
<span data-heading-keywords="chat model,chat models"></span>
|
||||
|
||||
Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text).
|
||||
These are traditionally newer models (older models are generally `LLMs`, see above).
|
||||
These are traditionally newer models (older models are generally `LLMs`, see below).
|
||||
Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages.
|
||||
|
||||
Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input. This means you can easily use chat models in place of LLMs.
|
||||
|
||||
When a string is passed in as input, it is converted to a HumanMessage and then passed to the underlying model.
|
||||
When a string is passed in as input, it is converted to a `HumanMessage` and then passed to the underlying model.
|
||||
|
||||
LangChain does not provide any ChatModels, rather we rely on third party integrations.
|
||||
LangChain does not host any Chat Models, rather we rely on third party integrations.
|
||||
|
||||
We have some standardized parameters when constructing ChatModels:
|
||||
- `model`: the name of the model
|
||||
@@ -154,16 +153,31 @@ Generally, such models are better at tool calling than non-fine-tuned models, an
|
||||
Please see the [tool calling section](/docs/concepts/#functiontool-calling) for more information.
|
||||
:::
|
||||
|
||||
For specifics on how to use chat models, see the [relevant how-to guides here](/docs/how_to/#chat-models).
|
||||
|
||||
#### Multimodality
|
||||
|
||||
Some chat models are multimodal, accepting images, audio and even video as inputs. These are still less common, meaning model providers haven't standardized on the "best" way to define the API. Multimodal **outputs** are even less common. As such, we've kept our multimodal abstractions fairly light weight and plan to further solidify the multimodal APIs and interaction patterns as the field matures.
|
||||
|
||||
In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format. So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.
|
||||
|
||||
For specifics on how to use multimodal models, see the [relevant how-to guides here](/docs/how_to/#multimodal).
|
||||
|
||||
For a full list of LangChain model providers with multimodal models, [check out this table](/docs/integrations/chat/#advanced-features).
|
||||
|
||||
### LLMs
|
||||
<span data-heading-keywords="llm,llms"></span>
|
||||
|
||||
Language models that takes a string as input and returns a string.
|
||||
These are traditionally older models (newer models generally are `ChatModels`, see below).
|
||||
These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see below).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input.
|
||||
This makes them interchangeable with ChatModels.
|
||||
This gives them the same interface as [Chat Models](/docs/concepts/#chat-models).
|
||||
When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model.
|
||||
|
||||
LangChain does not provide any LLMs, rather we rely on third party integrations.
|
||||
LangChain does not host any LLMs, rather we rely on third party integrations.
|
||||
|
||||
For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms).
|
||||
|
||||
### Messages
|
||||
|
||||
@@ -218,6 +232,8 @@ This represents the result of a tool call. This is distinct from a FunctionMessa
|
||||
|
||||
|
||||
### Prompt templates
|
||||
<span data-heading-keywords="prompt,prompttemplate,chatprompttemplate"></span>
|
||||
|
||||
Prompt templates help to translate user input and parameters into instructions for a language model.
|
||||
This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output.
|
||||
|
||||
@@ -226,7 +242,7 @@ Prompt Templates take as input a dictionary, where each key represents a variabl
|
||||
Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or a list of messages.
|
||||
The reason this PromptValue exists is to make it easy to switch between strings and messages.
|
||||
|
||||
There are a few different types of prompt templates
|
||||
There are a few different types of prompt templates:
|
||||
|
||||
#### String PromptTemplates
|
||||
|
||||
@@ -262,6 +278,7 @@ The first is a system message, that has no variables to format.
|
||||
The second is a HumanMessage, and will be formatted by the `topic` variable the user passes in.
|
||||
|
||||
#### MessagesPlaceholder
|
||||
<span data-heading-keywords="messagesplaceholder"></span>
|
||||
|
||||
This prompt template is responsible for adding a list of messages in a particular place.
|
||||
In the above ChatPromptTemplate, we saw how we could format two messages, each one a string.
|
||||
@@ -293,14 +310,18 @@ prompt_template = ChatPromptTemplate.from_messages([
|
||||
])
|
||||
```
|
||||
|
||||
For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates).
|
||||
|
||||
### Example selectors
|
||||
One common prompting technique for achieving better performance is to include examples as part of the prompt.
|
||||
This gives the language model concrete examples of how it should behave.
|
||||
Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them.
|
||||
Example Selectors are classes responsible for selecting and then formatting examples into prompts.
|
||||
|
||||
For specifics on how to use example selectors, see the [relevant how-to guides here](/docs/how_to/#example-selectors).
|
||||
|
||||
### Output parsers
|
||||
<span data-heading-keywords="output parser"></span>
|
||||
|
||||
:::note
|
||||
|
||||
@@ -344,16 +365,19 @@ LangChain has lots of different types of output parsers. This is a list of outpu
|
||||
| [Datetime](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html#langchain.output_parsers.datetime.DatetimeOutputParser) | | ✅ | | `str` \| `Message` | `datetime.datetime` | Parses response into a datetime string. |
|
||||
| [Structured](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.structured.StructuredOutputParser.html#langchain.output_parsers.structured.StructuredOutputParser) | | ✅ | | `str` \| `Message` | `Dict[str, str]` | An output parser that returns structured information. It is less powerful than other output parsers since it only allows for fields to be strings. This can be useful when you are working with smaller LLMs. |
|
||||
|
||||
For specifics on how to use output parsers, see the [relevant how-to guides here](/docs/how_to/#output-parsers).
|
||||
|
||||
### Chat history
|
||||
Most LLM applications have a conversational interface.
|
||||
An essential component of a conversation is being able to refer to information introduced earlier in the conversation.
|
||||
At bare minimum, a conversational system should be able to access some window of past messages directly.
|
||||
|
||||
The concept of `ChatHistory` refers to a class in LangChain which can be used to wrap an arbitrary chain.
|
||||
This `ChatHistory` will keep track of inputs and outputs of the underlying chain, and append them as messages to a message database
|
||||
This `ChatHistory` will keep track of inputs and outputs of the underlying chain, and append them as messages to a message database.
|
||||
Future interactions will then load those messages and pass them into the chain as part of the input.
|
||||
|
||||
### Documents
|
||||
<span data-heading-keywords="document,documents"></span>
|
||||
|
||||
A Document object in LangChain contains information about some data. It has two attributes:
|
||||
|
||||
@@ -361,6 +385,7 @@ A Document object in LangChain contains information about some data. It has two
|
||||
- `metadata: dict`: Arbitrary metadata associated with this document. Can track the document id, file name, etc.
|
||||
|
||||
### Document loaders
|
||||
<span data-heading-keywords="document loader,document loaders"></span>
|
||||
|
||||
These classes load Document objects. LangChain has hundreds of integrations with various data sources to load data from: Slack, Notion, Google Drive, etc.
|
||||
|
||||
@@ -376,6 +401,8 @@ loader = CSVLoader(
|
||||
data = loader.load()
|
||||
```
|
||||
|
||||
For specifics on how to use document loaders, see the [relevant how-to guides here](/docs/how_to/#document-loaders).
|
||||
|
||||
### Text splitters
|
||||
|
||||
Once you've loaded documents, you'll often want to transform them to better suit your application. The simplest example is you may want to split a long document into smaller chunks that can fit into your model's context window. LangChain has a number of built-in document transformers that make it easy to split, combine, filter, and otherwise manipulate documents.
|
||||
@@ -393,18 +420,34 @@ That means there are two different axes along which you can customize your text
|
||||
1. How the text is split
|
||||
2. How the chunk size is measured
|
||||
|
||||
### Embedding models
|
||||
The Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them.
|
||||
For specifics on how to use text splitters, see the [relevant how-to guides here](/docs/how_to/#text-splitters).
|
||||
|
||||
Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.
|
||||
### Embedding models
|
||||
<span data-heading-keywords="embedding,embeddings"></span>
|
||||
|
||||
Embedding models create a vector representation of a piece of text. You can think of a vector as an array of numbers that captures the semantic meaning of the text.
|
||||
By representing the text in this way, you can perform mathematical operations that allow you to do things like search for other pieces of text that are most similar in meaning.
|
||||
These natural language search capabilities underpin many types of [context retrieval](/docs/concepts/#retrieval),
|
||||
where we provide an LLM with the relevant data it needs to effectively respond to a query.
|
||||
|
||||

|
||||
|
||||
The `Embeddings` class is a class designed for interfacing with text embedding models. There are many different embedding model providers (OpenAI, Cohere, Hugging Face, etc) and local models, and this class is designed to provide a standard interface for all of them.
|
||||
|
||||
The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).
|
||||
|
||||
For specifics on how to use embedding models, see the [relevant how-to guides here](/docs/how_to/#embedding-models).
|
||||
|
||||
### Vector stores
|
||||
<span data-heading-keywords="vector,vectorstore,vectorstores,vector store,vector stores"></span>
|
||||
|
||||
One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors,
|
||||
and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query.
|
||||
A vector store takes care of storing embedded data and performing vector search for you.
|
||||
|
||||
Most vector stores can also store metadata about embedded vectors and support filtering on that metadata before
|
||||
similarity search, allowing you more control over returned documents.
|
||||
|
||||
Vector stores can be converted to the retriever interface by doing:
|
||||
|
||||
```python
|
||||
@@ -412,15 +455,22 @@ vectorstore = MyVectorStore()
|
||||
retriever = vectorstore.as_retriever()
|
||||
```
|
||||
|
||||
For specifics on how to use vector stores, see the [relevant how-to guides here](/docs/how_to/#vector-stores).
|
||||
|
||||
### Retrievers
|
||||
<span data-heading-keywords="retriever,retrievers"></span>
|
||||
|
||||
A retriever is an interface that returns documents given an unstructured query.
|
||||
It is more general than a vector store.
|
||||
A retriever does not need to be able to store documents, only to return (or retrieve) them.
|
||||
Retrievers can be created from vectorstores, but are also broad enough to include [Wikipedia search](/docs/integrations/retrievers/wikipedia/) and [Amazon Kendra](/docs/integrations/retrievers/amazon_kendra_retriever/).
|
||||
Retrievers can be created from vector stores, but are also broad enough to include [Wikipedia search](/docs/integrations/retrievers/wikipedia/) and [Amazon Kendra](/docs/integrations/retrievers/amazon_kendra_retriever/).
|
||||
|
||||
Retrievers accept a string query as input and return a list of Document's as output.
|
||||
|
||||
For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers).
|
||||
|
||||
### Tools
|
||||
<span data-heading-keywords="tool,tools"></span>
|
||||
|
||||
Tools are interfaces that an agent, a chain, or a chat model / LLM can use to interact with the world.
|
||||
|
||||
@@ -446,6 +496,8 @@ Generally, when designing tools to be used by a chat model or LLM, it is importa
|
||||
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas.
|
||||
- Simpler tools are generally easier for models to use than more complex tools.
|
||||
|
||||
For specifics on how to use tools, see the [relevant how-to guides here](/docs/how_to/#tools).
|
||||
|
||||
### Toolkits
|
||||
|
||||
Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.
|
||||
@@ -465,7 +517,7 @@ tools = toolkit.get_tools()
|
||||
|
||||
By themselves, language models can't take actions - they just output text.
|
||||
A big use case for LangChain is creating **agents**.
|
||||
Agents are systems that use an LLM as a reasoning enginer to determine which actions to take and what the inputs to those actions should be.
|
||||
Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be.
|
||||
The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish.
|
||||
|
||||
[LangGraph](https://github.com/langchain-ai/langgraph) is an extension of LangChain specifically aimed at creating highly controllable and customizable agents.
|
||||
@@ -478,13 +530,7 @@ In order to solve that we built LangGraph to be this flexible, highly-controllab
|
||||
|
||||
If you are still using AgentExecutor, do not fear: we still have a guide on [how to use AgentExecutor](/docs/how_to/agent_executor).
|
||||
It is recommended, however, that you start to transition to LangGraph.
|
||||
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent)
|
||||
|
||||
### Multimodal
|
||||
|
||||
Some models are multimodal, accepting images, audio and even video as inputs. These are still less common, meaning model providers haven't standardized on the "best" way to define the API. Multimodal **outputs** are even less common. As such, we've kept our multimodal abstractions fairly light weight and plan to further solidify the multimodal APIs and interaction patterns as the field matures.
|
||||
|
||||
In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format. So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.
|
||||
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent).
|
||||
|
||||
### Callbacks
|
||||
|
||||
@@ -556,15 +602,224 @@ This is a common reason why you may fail to see events being emitted from custom
|
||||
runnables or tools.
|
||||
:::
|
||||
|
||||
For specifics on how to use callbacks, see the [relevant how-to guides here](/docs/how_to/#callbacks).
|
||||
|
||||
## Techniques
|
||||
|
||||
### Function/tool calling
|
||||
### Streaming
|
||||
<span data-heading-keywords="stream,streaming"></span>
|
||||
|
||||
Individual LLM calls often run for much longer than traditional resource requests.
|
||||
This compounds when you build more complex chains or agents that require multiple reasoning steps.
|
||||
|
||||
Fortunately, LLMs generate output iteratively, which means it's possible to show sensible intermediate results
|
||||
before the final response is ready. Consuming output as soon as it becomes available has therefore become a vital part of the UX
|
||||
around building apps with LLMs to help alleviate latency issues, and LangChain aims to have first-class support for streaming.
|
||||
|
||||
Below, we'll discuss some concepts and considerations around streaming in LangChain.
|
||||
|
||||
#### `.stream()` and `.astream()`
|
||||
|
||||
Most modules in LangChain include the `.stream()` method (and the equivalent `.astream()` method for [async](https://docs.python.org/3/library/asyncio.html) environments) as an ergonomic streaming interface.
|
||||
`.stream()` returns an iterator, which you can consume with a simple `for` loop. Here's an example with a chat model:
|
||||
|
||||
```python
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
model = ChatAnthropic(model="claude-3-sonnet-20240229")
|
||||
|
||||
for chunk in model.stream("what color is the sky?"):
|
||||
print(chunk.content, end="|", flush=True)
|
||||
```
|
||||
|
||||
For models (or other components) that don't support streaming natively, this iterator would just yield a single chunk, but
|
||||
you could still use the same general pattern when calling them. Using `.stream()` will also automatically call the model in streaming mode
|
||||
without the need to provide additional config.
|
||||
|
||||
The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html).
|
||||
Because this method is part of [LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel),
|
||||
you can handle formatting differences from different outputs using an [output parser](/docs/concepts/#output-parsers) to transform
|
||||
each yielded chunk.
|
||||
|
||||
You can check out [this guide](/docs/how_to/streaming/#using-stream) for more detail on how to use `.stream()`.
|
||||
|
||||
#### `.astream_events()`
|
||||
<span data-heading-keywords="astream_events,stream_events,stream events"></span>
|
||||
|
||||
While the `.stream()` method is intuitive, it can only return the final generated value of your chain. This is fine for single LLM calls,
|
||||
but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of
|
||||
the chain alongside the final output - for example, returning sources alongside the final generation when building a chat
|
||||
over documents app.
|
||||
|
||||
There are ways to do this [using callbacks](/docs/concepts/#callbacks-1), or by constructing your chain in such a way that it passes intermediate
|
||||
values to the end with something like chained [`.assign()`](/docs/how_to/passthrough/) calls, but LangChain also includes an
|
||||
`.astream_events()` method that combines the flexibility of callbacks with the ergonomics of `.stream()`. When called, it returns an iterator
|
||||
which yields [various types of events](/docs/how_to/streaming/#event-reference) that you can filter and process according
|
||||
to the needs of your project.
|
||||
|
||||
Here's one small example that prints just events containing streamed chat model output:
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
model = ChatAnthropic(model="claude-3-sonnet-20240229")
|
||||
|
||||
prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
|
||||
parser = StrOutputParser()
|
||||
chain = prompt | model | parser
|
||||
|
||||
async for event in chain.astream_events({"topic": "parrot"}, version="v2"):
|
||||
kind = event["event"]
|
||||
if kind == "on_chat_model_stream":
|
||||
print(event, end="|", flush=True)
|
||||
```
|
||||
|
||||
You can roughly think of it as an iterator over callback events (though the format differs) - and you can use it on almost all LangChain components!
|
||||
|
||||
See [this guide](/docs/how_to/streaming/#using-stream-events) for more detailed information on how to use `.astream_events()`,
|
||||
including a table listing available events.
|
||||
|
||||
#### Callbacks
|
||||
|
||||
The lowest level way to stream outputs from LLMs in LangChain is via the [callbacks](/docs/concepts/#callbacks) system. You can pass a
|
||||
callback handler that handles the [`on_llm_new_token`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_new_token) event into LangChain components. When that component is invoked, any
|
||||
[LLM](/docs/concepts/#llms) or [chat model](/docs/concepts/#chat-models) contained in the component calls
|
||||
the callback with the generated token. Within the callback, you could pipe the tokens into some other destination, e.g. a HTTP response.
|
||||
You can also handle the [`on_llm_end`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_end) event to perform any necessary cleanup.
|
||||
|
||||
You can see [this how-to section](/docs/how_to/#callbacks) for more specifics on using callbacks.
|
||||
|
||||
Callbacks were the first technique for streaming introduced in LangChain. While powerful and generalizable,
|
||||
they can be unwieldy for developers. For example:
|
||||
|
||||
- You need to explicitly initialize and manage some aggregator or other stream to collect results.
|
||||
- The execution order isn't explicitly guaranteed, and you could theoretically have a callback run after the `.invoke()` method finishes.
|
||||
- Providers would often make you pass an additional parameter to stream outputs instead of returning them all at once.
|
||||
- You would often ignore the result of the actual model call in favor of callback results.
|
||||
|
||||
#### Tokens
|
||||
|
||||
The unit that most model providers use to measure input and output is via a unit called a **token**.
|
||||
Tokens are the basic units that language models read and generate when processing or producing text.
|
||||
The exact definition of a token can vary depending on the specific way the model was trained -
|
||||
for instance, in English, a token could be a single word like "apple", or a part of a word like "app".
|
||||
|
||||
When you send a model a prompt, the words and characters in the prompt are encoded into tokens using a **tokenizer**.
|
||||
The model then streams back generated output tokens, which the tokenizer decodes into human-readable text.
|
||||
The below example shows how OpenAI models tokenize `LangChain is cool!`:
|
||||
|
||||

|
||||
|
||||
You can see that it gets split into 5 different tokens, and that the boundaries between tokens are not exactly the same as word boundaries.
|
||||
|
||||
The reason language models use tokens rather than something more immediately intuitive like "characters"
|
||||
has to do with how they process and understand text. At a high-level, language models iteratively predict their next generated output based on
|
||||
the initial input and their previous generations. Training the model using tokens language models to handle linguistic
|
||||
units (like words or subwords) that carry meaning, rather than individual characters, which makes it easier for the model
|
||||
to learn and understand the structure of the language, including grammar and context.
|
||||
Furthermore, using tokens can also improve efficiency, since the model processes fewer units of text compared to character-level processing.
|
||||
|
||||
### Structured output
|
||||
|
||||
LLMs are capable of generating arbitrary text. This enables the model to respond appropriately to a wide
|
||||
range of inputs, but for some use-cases, it can be useful to constrain the LLM's output
|
||||
to a specific format or structure. This is referred to as **structured output**.
|
||||
|
||||
For example, if the output is to be stored in a relational database,
|
||||
it is much easier if the model generates output that adheres to a defined schema or format.
|
||||
[Extracting specific information](/docs/tutorials/extraction/) from unstructured text is another
|
||||
case where this is particularly useful. Most commonly, the output format will be JSON,
|
||||
though other formats such as [YAML](/docs/how_to/output_parser_yaml/) can be useful too. Below, we'll discuss
|
||||
a few ways to get structured output from models in LangChain.
|
||||
|
||||
#### `.with_structured_output()`
|
||||
|
||||
For convenience, some LangChain chat models support a `.with_structured_output()` method.
|
||||
This method only requires a schema as input, and returns a dict or Pydantic object.
|
||||
Generally, this method is only present on models that support one of the more advanced methods described below,
|
||||
and will use one of them under the hood. It takes care of importing a suitable output parser and
|
||||
formatting the schema in the right format for the model.
|
||||
|
||||
For more information, check out this [how-to guide](/docs/how_to/structured_output/#the-with_structured_output-method).
|
||||
|
||||
#### Raw prompting
|
||||
|
||||
The most intuitive way to get a model to structure output is to ask nicely.
|
||||
In addition to your query, you can give instructions describing what kind of output you'd like, then
|
||||
parse the output using an [output parser](/docs/concepts/#output-parsers) to convert the raw
|
||||
model message or string output into something more easily manipulated.
|
||||
|
||||
The biggest benefit to raw prompting is its flexibility:
|
||||
|
||||
- Raw prompting does not require any special model features, only sufficient reasoning capability to understand
|
||||
the passed schema.
|
||||
- You can prompt for any format you'd like, not just JSON. This can be useful if the model you
|
||||
are using is more heavily trained on a certain type of data, such as XML or YAML.
|
||||
|
||||
However, there are some drawbacks too:
|
||||
|
||||
- LLMs are non-deterministic, and prompting a LLM to consistently output data in the exactly correct format
|
||||
for smooth parsing can be surprisingly difficult and model-specific.
|
||||
- Individual models have quirks depending on the data they were trained on, and optimizing prompts can be quite difficult.
|
||||
Some may be better at interpreting [JSON schema](https://json-schema.org/), others may be best with TypeScript definitions,
|
||||
and still others may prefer XML.
|
||||
|
||||
While we'll next go over some ways that you can take advantage of features offered by
|
||||
model providers to increase reliability, prompting techniques remain important for tuning your
|
||||
results no matter what method you choose.
|
||||
|
||||
#### JSON mode
|
||||
<span data-heading-keywords="json mode"></span>
|
||||
|
||||
Some models, such as [Mistral](/docs/integrations/chat/mistralai/), [OpenAI](/docs/integrations/chat/openai/),
|
||||
[Together AI](/docs/integrations/chat/together/) and [Ollama](/docs/integrations/chat/ollama/),
|
||||
support a feature called **JSON mode**, usually enabled via config.
|
||||
|
||||
When enabled, JSON mode will constrain the model's output to always be some sort of valid JSON.
|
||||
Often they require some custom prompting, but it's usually much less burdensome and along the lines of,
|
||||
`"you must always return JSON"`, and the [output is easier to parse](/docs/how_to/output_parser_json/).
|
||||
|
||||
It's also generally simpler and more commonly available than tool calling.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.output_parsers.json import SimpleJsonOutputParser
|
||||
|
||||
model = ChatOpenAI(
|
||||
model="gpt-4o",
|
||||
model_kwargs={ "response_format": { "type": "json_object" } },
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_template(
|
||||
"Answer the user's question to the best of your ability."
|
||||
'You must always output a JSON object with an "answer" key and a "followup_question" key.'
|
||||
"{question}"
|
||||
)
|
||||
|
||||
chain = prompt | model | SimpleJsonOutputParser()
|
||||
|
||||
chain.invoke({ "question": "What is the powerhouse of the cell?" })
|
||||
```
|
||||
|
||||
```
|
||||
{'answer': 'The powerhouse of the cell is the mitochondrion. It is responsible for producing energy in the form of ATP through cellular respiration.',
|
||||
'followup_question': 'Would you like to know more about how mitochondria produce energy?'}
|
||||
```
|
||||
|
||||
For a full list of model providers that support JSON mode, see [this table](/docs/integrations/chat/#advanced-features).
|
||||
|
||||
#### Function/tool calling
|
||||
|
||||
:::info
|
||||
We use the term tool calling interchangeably with function calling. Although
|
||||
function calling is sometimes meant to refer to invocations of a single function,
|
||||
we treat all models as though they can return multiple tool or function calls in
|
||||
each message.
|
||||
each message
|
||||
:::
|
||||
|
||||
Tool calling allows a model to respond to a given prompt by generating output that
|
||||
@@ -576,8 +831,10 @@ from unstructured text, you could give the model an "extraction" tool that takes
|
||||
parameters matching the desired schema, then treat the generated output as your final
|
||||
result.
|
||||
|
||||
A tool call includes a name, arguments dict, and an optional identifier. The
|
||||
arguments dict is structured `{argument_name: argument_value}`.
|
||||
For models that support it, tool calling can be very convenient. It removes the
|
||||
guesswork around how best to prompt schemas in favor of a built-in model feature. It can also
|
||||
more naturally support agentic flows, since you can just pass multiple tool schemas instead
|
||||
of fiddling with enums or unions.
|
||||
|
||||
Many LLM providers, including [Anthropic](https://www.anthropic.com/),
|
||||
[Cohere](https://cohere.com/), [Google](https://cloud.google.com/vertex-ai),
|
||||
@@ -594,39 +851,174 @@ LangChain provides a standardized interface for tool calling that is consistent
|
||||
|
||||
The standard interface consists of:
|
||||
|
||||
* `ChatModel.bind_tools()`: a method for specifying which tools are available for a model to call.
|
||||
* `ChatModel.bind_tools()`: a method for specifying which tools are available for a model to call. This method accepts [LangChain tools](/docs/concepts/#tools) here.
|
||||
* `AIMessage.tool_calls`: an attribute on the `AIMessage` returned from the model for accessing the tool calls requested by the model.
|
||||
|
||||
There are two main use cases for function/tool calling:
|
||||
The following how-to guides are good practical resources for using function/tool calling:
|
||||
|
||||
- [How to return structured data from an LLM](/docs/how_to/structured_output/)
|
||||
- [How to use a model to call tools](/docs/how_to/tool_calling/)
|
||||
|
||||
For a full list of model providers that support tool calling, [see this table](/docs/integrations/chat/#advanced-features).
|
||||
|
||||
### Retrieval
|
||||
|
||||
LangChain provides several advanced retrieval types. A full list is below, along with the following information:
|
||||
LLMs are trained on a large but fixed dataset, limiting their ability to reason over private or recent information. Fine-tuning an LLM with specific facts is one way to mitigate this, but is often [poorly suited for factual recall](https://www.anyscale.com/blog/fine-tuning-is-for-form-not-facts) and [can be costly](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise).
|
||||
Retrieval is the process of providing relevant information to an LLM to improve its response for a given input. Retrieval augmented generation (RAG) is the process of grounding the LLM generation (output) using the retrieved information.
|
||||
|
||||
**Name**: Name of the retrieval algorithm.
|
||||
:::tip
|
||||
|
||||
**Index Type**: Which index type (if any) this relies on.
|
||||
* See our RAG from Scratch [code](https://github.com/langchain-ai/rag-from-scratch) and [video series](https://youtube.com/playlist?list=PLfaIDFEXuae2LXbO1_PKyVJiQ23ZztA0x&feature=shared).
|
||||
* For a high-level guide on retrieval, see this [tutorial on RAG](/docs/tutorials/rag/).
|
||||
|
||||
**Uses an LLM**: Whether this retrieval method uses an LLM.
|
||||
:::
|
||||
|
||||
**When to Use**: Our commentary on when you should considering using this retrieval method.
|
||||
RAG is only as good as the retrieved documents’ relevance and quality. Fortunately, an emerging set of techniques can be employed to design and improve RAG systems. We've focused on taxonomizing and summarizing many of these techniques (see below figure) and will share some high-level strategic guidance in the following sections.
|
||||
You can and should experiment with using different pieces together. You might also find [this LangSmith guide](https://docs.smith.langchain.com/how_to_guides/evaluation/evaluate_llm_application) useful for showing how to evaluate different iterations of your app.
|
||||
|
||||
**Description**: Description of what this retrieval algorithm is doing.
|
||||

|
||||
|
||||
#### Query Translation
|
||||
|
||||
First, consider the user input(s) to your RAG system. Ideally, a RAG system can handle a wide range of inputs, from poorly worded questions to complex multi-part queries.
|
||||
**Using an LLM to review and optionally modify the input is the central idea behind query translation.** This serves as a general buffer, optimizing raw user inputs for your retrieval system.
|
||||
For example, this can be as simple as extracting keywords or as complex as generating multiple sub-questions for a complex query.
|
||||
|
||||
| Name | When to use | Description |
|
||||
|---------------|-------------|-------------|
|
||||
| [Multi-query](/docs/how_to/MultiQueryRetriever/) | When you need to cover multiple perspectives of a question. | Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, return the unique documents for all queries. |
|
||||
| [Decomposition](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a question can be broken down into smaller subproblems. | Decompose a question into a set of subproblems / questions, which can either be solved sequentially (use the answer from first + retrieval to answer the second) or in parallel (consolidate each answer into final answer). |
|
||||
| [Step-back](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a higher-level conceptual understanding is required. | First prompt the LLM to ask a generic step-back question about higher-level concepts or principles, and retrieve relevant facts about them. Use this grounding to help answer the user question. |
|
||||
| [HyDE](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | If you have challenges retrieving relevant documents using the raw user inputs. | Use an LLM to convert questions into hypothetical documents that answer the question. Use the embedded hypothetical documents to retrieve real documents with the premise that doc-doc similarity search can produce more relevant matches. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch videos for a few different specific approaches:
|
||||
- [Multi-query](https://youtu.be/JChPi0CRnDY?feature=shared)
|
||||
- [Decomposition](https://youtu.be/h0OPWlEOank?feature=shared)
|
||||
- [Step-back](https://youtu.be/xn1jEjRyJ2U?feature=shared)
|
||||
- [HyDE](https://youtu.be/SaDzIVkYqyY?feature=shared)
|
||||
|
||||
:::
|
||||
|
||||
#### Routing
|
||||
|
||||
Second, consider the data sources available to your RAG system. You want to query across more than one database or across structured and unstructured data sources. **Using an LLM to review the input and route it to the appropriate data source is a simple and effective approach for querying across sources.**
|
||||
|
||||
| Name | When to use | Description |
|
||||
|------------------|--------------------------------------------|-------------|
|
||||
| [Logical routing](/docs/how_to/routing/) | When you can prompt an LLM with rules to decide where to route the input. | Logical routing can use an LLM to reason about the query and choose which datastore is most appropriate. |
|
||||
| [Semantic routing](/docs/how_to/routing/#routing-by-semantic-similarity) | When semantic similarity is an effective way to determine where to route the input. | Semantic routing embeds both query and, typically a set of prompts. It then chooses the appropriate prompt based upon similarity. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch video on [routing](https://youtu.be/pfpIndq7Fi8?feature=shared).
|
||||
|
||||
:::
|
||||
|
||||
#### Query Construction
|
||||
|
||||
Third, consider whether any of your data sources require specific query formats. Many structured databases use SQL. Vector stores often have specific syntax for applying keyword filters to document metadata. **Using an LLM to convert a natural language query into a query syntax is a popular and powerful approach.**
|
||||
In particular, [text-to-SQL](/docs/tutorials/sql_qa/), [text-to-Cypher](/docs/tutorials/graph/), and [query analysis for metadata filters](/docs/tutorials/query_analysis/#query-analysis) are useful ways to interact with structured, graph, and vector databases respectively.
|
||||
|
||||
| Name | When to Use | Description |
|
||||
|---------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Text to SQL](/docs/tutorials/sql_qa/) | If users are asking questions that require information housed in a relational database, accessible via SQL. | This uses an LLM to transform user input into a SQL query. |
|
||||
| [Text-to-Cypher](/docs/tutorials/graph/) | If users are asking questions that require information housed in a graph database, accessible via Cypher. | This uses an LLM to transform user input into a Cypher query. |
|
||||
| [Self Query](/docs/how_to/self_query/) | If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. | This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filter to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). |
|
||||
|
||||
:::tip
|
||||
|
||||
See our [blog post overview](https://blog.langchain.dev/query-construction/) and RAG from Scratch video on [query construction](https://youtu.be/kl6NwWYxvbM?feature=shared), the process of text-to-DSL where DSL is a domain specific language required to interact with a given database. This converts user questions into structured queries.
|
||||
|
||||
:::
|
||||
|
||||
#### Indexing
|
||||
|
||||
Fouth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
|
||||
|
||||
Many RAG approaches focus on splitting documents into chunks and retrieving some number based on similarity to an input question for the LLM. But chunk size and chunk number can be difficult to set and affect results if they do not provide full context for the LLM to answer a question. Furthermore, LLMs are increasingly capable of processing millions of tokens.
|
||||
|
||||
Two approaches can address this tension: (1) [Multi Vector](/docs/how_to/multi_vector/) retriever using an LLM to translate documents into any form (e.g., often into a summary) that is well-suited for indexing, but returns full documents to the LLM for generation. (2) [ParentDocument](/docs/how_to/parent_document_retriever/) retriever embeds document chunks, but also returns full documents. The idea is to get the best of both worlds: use concise representations (summaries or chunks) for retrieval, but use the full documents for answer generation.
|
||||
|
||||
| Name | Index Type | Uses an LLM | When to Use | Description |
|
||||
|---------------------------|------------------------------|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Vectorstore](/docs/how_to/vectorstore_retriever/) | Vectorstore | No | If you are just getting started and looking for something quick and easy. | This is the simplest method and the one that is easiest to get started with. It involves creating embeddings for each piece of text. |
|
||||
| [ParentDocument](/docs/how_to/parent_document_retriever/) | Vectorstore + Document Store | No | If your pages have lots of smaller pieces of distinct information that are best indexed by themselves, but best retrieved all together. | This involves indexing multiple chunks for each document. Then you find the chunks that are most similar in embedding space, but you retrieve the whole parent document and return that (rather than individual chunks). |
|
||||
| [Multi Vector](/docs/how_to/multi_vector/) | Vectorstore + Document Store | Sometimes during indexing | If you are able to extract information from documents that you think is more relevant to index than the text itself. | This involves creating multiple vectors for each document. Each vector could be created in a myriad of ways - examples include summaries of the text and hypothetical questions. |
|
||||
| [Self Query](/docs/how_to/self_query/) | Vectorstore | Yes | If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. | This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filer to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). |
|
||||
| [Contextual Compression](/docs/how_to/contextual_compression/) | Any | Sometimes | If you are finding that your retrieved documents contain too much irrelevant information and are distracting the LLM. | This puts a post-processing step on top of another retriever and extracts only the most relevant information from retrieved documents. This can be done with embeddings or an LLM. |
|
||||
| [Time-Weighted Vectorstore](/docs/how_to/time_weighted_vectorstore/) | Vectorstore | No | If you have timestamps associated with your documents, and you want to retrieve the most recent ones | This fetches documents based on a combination of semantic similarity (as in normal vector retrieval) and recency (looking at timestamps of indexed documents) |
|
||||
| [Multi-Query Retriever](/docs/how_to/MultiQueryRetriever/) | Any | Yes | If users are asking questions that are complex and require multiple pieces of distinct information to respond | This uses an LLM to generate multiple queries from the original one. This is useful when the original query needs pieces of information about multiple topics to be properly answered. By generating multiple queries, we can then fetch documents for each of them. |
|
||||
| [Ensemble](/docs/how_to/ensemble_retriever/) | Any | No | If you have multiple retrieval methods and want to try combining them. | This fetches documents from multiple retrievers and then combines them. |
|
||||
| [Vector store](/docs/how_to/vectorstore_retriever/) | Vector store | No | If you are just getting started and looking for something quick and easy. | This is the simplest method and the one that is easiest to get started with. It involves creating embeddings for each piece of text. |
|
||||
| [ParentDocument](/docs/how_to/parent_document_retriever/) | Vector store + Document Store | No | If your pages have lots of smaller pieces of distinct information that are best indexed by themselves, but best retrieved all together. | This involves indexing multiple chunks for each document. Then you find the chunks that are most similar in embedding space, but you retrieve the whole parent document and return that (rather than individual chunks). |
|
||||
| [Multi Vector](/docs/how_to/multi_vector/) | Vector store + Document Store | Sometimes during indexing | If you are able to extract information from documents that you think is more relevant to index than the text itself. | This involves creating multiple vectors for each document. Each vector could be created in a myriad of ways - examples include summaries of the text and hypothetical questions. |
|
||||
| [Time-Weighted Vector store](/docs/how_to/time_weighted_vectorstore/) | Vector store | No | If you have timestamps associated with your documents, and you want to retrieve the most recent ones | This fetches documents based on a combination of semantic similarity (as in normal vector retrieval) and recency (looking at timestamps of indexed documents) |
|
||||
|
||||
:::tip
|
||||
|
||||
- See our RAG from Scratch video on [indexing fundamentals](https://youtu.be/bjb_EMsTDKI?feature=shared)
|
||||
- See our RAG from Scratch video on [multi vector retriever](https://youtu.be/gTCU9I6QqCE?feature=shared)
|
||||
|
||||
:::
|
||||
|
||||
Fifth, consider ways to improve the quality of your similarity search itself. Embedding models compress text into fixed-length (vector) representations that capture the semantic content of the document. This compression is useful for search / retrieval, but puts a heavy burden on that single vector representation to capture the semantic nuance / detail of the document. In some cases, irrelevant or redundant content can dilute the semantic usefulness of the embedding.
|
||||
|
||||
[ColBERT](https://docs.google.com/presentation/d/1IRhAdGjIevrrotdplHNcc4aXgIYyKamUKTWtB3m3aMU/edit?usp=sharing) is an interesting approach to address this with a higher granularity embeddings: (1) produce a contextually influenced embedding for each token in the document and query, (2) score similarity between each query token and all document tokens, (3) take the max, (4) do this for all query tokens, and (5) take the sum of the max scores (in step 3) for all query tokens to get a query-document similarity score; this token-wise scoring can yield strong results.
|
||||
|
||||

|
||||
|
||||
There are some additional tricks to improve the quality of your retrieval. Embeddings excel at capturing semantic information, but may struggle with keyword-based queries. Many [vector stores](/docs/integrations/retrievers/pinecone_hybrid_search/) offer built-in [hybrid-search](https://docs.pinecone.io/guides/data/understanding-hybrid-search) to combine keyword and semantic similarity, which marries the benefits of both approaches. Furthermore, many vector stores have [maximal marginal relevance](https://python.langchain.com/v0.1/docs/modules/model_io/prompts/example_selectors/mmr/), which attempts to diversify the results of a search to avoid returning similar and redundant documents.
|
||||
|
||||
| Name | When to use | Description |
|
||||
|-------------------|----------------------------------------------------------|-------------|
|
||||
| [ColBERT](/docs/integrations/providers/ragatouille/#using-colbert-as-a-reranker) | When higher granularity embeddings are needed. | ColBERT uses contextually influenced embeddings for each token in the document and query to get a granular query-document similarity score. |
|
||||
| [Hybrid search](/docs/integrations/retrievers/pinecone_hybrid_search/) | When combining keyword-based and semantic similarity. | Hybrid search combines keyword and semantic similarity, marrying the benefits of both approaches. |
|
||||
| [Maximal Marginal Relevance (MMR)](/docs/integrations/vectorstores/pinecone/#maximal-marginal-relevance-searches) | When needing to diversify search results. | MMR attempts to diversify the results of a search to avoid returning similar and redundant documents. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch video on [ColBERT](https://youtu.be/cN6S0Ehm7_8?feature=shared>).
|
||||
|
||||
:::
|
||||
|
||||
#### Post-processing
|
||||
|
||||
Sixth, consider ways to filter or rank retrieved documents. This is very useful if you are [combining documents returned from multiple sources](/docs/integrations/retrievers/cohere-reranker/#doing-reranking-with-coherererank), since it can can down-rank less relevant documents and / or [compress similar documents](/docs/how_to/contextual_compression/#more-built-in-compressors-filters).
|
||||
|
||||
| Name | Index Type | Uses an LLM | When to Use | Description |
|
||||
|---------------------------|------------------------------|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Contextual Compression](/docs/how_to/contextual_compression/) | Any | Sometimes | If you are finding that your retrieved documents contain too much irrelevant information and are distracting the LLM. | This puts a post-processing step on top of another retriever and extracts only the most relevant information from retrieved documents. This can be done with embeddings or an LLM. |
|
||||
| [Ensemble](/docs/how_to/ensemble_retriever/) | Any | No | If you have multiple retrieval methods and want to try combining them. | This fetches documents from multiple retrievers and then combines them. |
|
||||
| [Re-ranking](/docs/integrations/retrievers/cohere-reranker/) | Any | Yes | If you want to rank retrieved documents based upon relevance, especially if you want to combine results from multiple retrieval methods . | Given a query and a list of documents, Rerank indexes the documents from most to least semantically relevant to the query. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch video on [RAG-Fusion](https://youtu.be/77qELPbNgxA?feature=shared), on approach for post-processing across multiple queries: Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, and combine the ranks of multiple search result lists to produce a single, unified ranking with [Reciprocal Rank Fusion (RRF)](https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1).
|
||||
|
||||
:::
|
||||
|
||||
#### Generation
|
||||
|
||||
**Finally, consider ways to build self-correction into your RAG system.** RAG systems can suffer from low quality retrieval (e.g., if a user question is out of the domain for the index) and / or hallucinations in generation. A naive retrieve-generate pipeline has no ability to detect or self-correct from these kinds of errors. The concept of ["flow engineering"](https://x.com/karpathy/status/1748043513156272416) has been introduced [in the context of code generation](https://arxiv.org/abs/2401.08500): iteratively build an answer to a code question with unit tests to check and self-correct errors. Several works have applied this RAG, such as Self-RAG and Corrective-RAG. In both cases, checks for document relevance, hallucinations, and / or answer quality are performed in the RAG answer generation flow.
|
||||
|
||||
We've found that graphs are a great way to reliably express logical flows and have implemented ideas from several of these papers [using LangGraph](https://github.com/langchain-ai/langgraph/tree/main/examples/rag), as shown in the figure below (red - routing, blue - fallback, green - self-correction):
|
||||
- **Routing:** Adaptive RAG ([paper](https://arxiv.org/abs/2403.14403)). Route questions to different retrieval approaches, as discussed above
|
||||
- **Fallback:** Corrective RAG ([paper](https://arxiv.org/pdf/2401.15884.pdf)). Fallback to web search if docs are not relevant to query
|
||||
- **Self-correction:** Self-RAG ([paper](https://arxiv.org/abs/2310.11511)). Fix answers w/ hallucinations or don’t address question
|
||||
|
||||

|
||||
|
||||
| Name | When to use | Description |
|
||||
|-------------------|-----------------------------------------------------------|-------------|
|
||||
| Self-RAG | When needing to fix answers with hallucinations or irrelevant content. | Self-RAG performs checks for document relevance, hallucinations, and answer quality during the RAG answer generation flow, iteratively building an answer and self-correcting errors. |
|
||||
| Corrective-RAG | When needing a fallback mechanism for low relevance docs. | Corrective-RAG includes a fallback (e.g., to web search) if the retrieved documents are not relevant to the query, ensuring higher quality and more relevant retrieval. |
|
||||
|
||||
:::tip
|
||||
|
||||
See several videos and cookbooks showcasing RAG with LangGraph:
|
||||
- [LangGraph Corrective RAG](https://www.youtube.com/watch?v=E2shqsYwxck)
|
||||
- [LangGraph combining Adaptive, Self-RAG, and Corrective RAG](https://www.youtube.com/watch?v=-ROS6gfYIts)
|
||||
- [Cookbooks for RAG using LangGraph](https://github.com/langchain-ai/langgraph/tree/main/examples/rag)
|
||||
|
||||
See our LangGraph RAG recipes with partners:
|
||||
- [Meta](https://github.com/meta-llama/llama-recipes/tree/main/recipes/use_cases/agents/langchain)
|
||||
- [Mistral](https://github.com/mistralai/cookbook/tree/main/third_party/langchain)
|
||||
|
||||
:::
|
||||
|
||||
### Text splitting
|
||||
|
||||
@@ -653,6 +1045,19 @@ Table columns:
|
||||
| Semantic Chunker (Experimental) | [SemanticChunker](/docs/how_to/semantic-chunker/) | Sentences | | First splits on sentences. Then combines ones next to each other if they are semantically similar enough. Taken from [Greg Kamradt](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) |
|
||||
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
|
||||
|
||||
### Evaluation
|
||||
<span data-heading-keywords="evaluation,evaluate"></span>
|
||||
|
||||
Evaluation is the process of assessing the performance and effectiveness of your LLM-powered applications.
|
||||
It involves testing the model's responses against a set of predefined criteria or benchmarks to ensure it meets the desired quality standards and fulfills the intended purpose.
|
||||
This process is vital for building reliable applications.
|
||||
|
||||

|
||||
|
||||
[LangSmith](https://docs.smith.langchain.com/) helps with this process in a few ways:
|
||||
|
||||
- It makes it easier to create and curate datasets via its tracing and annotation features
|
||||
- It provides an evaluation framework that helps you define metrics and run your app against your dataset
|
||||
- It allows you to track results over time and automatically run your evaluators on a schedule or as part of CI/Code
|
||||
|
||||
To learn more, check out [this LangSmith guide](https://docs.smith.langchain.com/concepts/evaluation).
|
||||
|
||||
@@ -206,9 +206,7 @@ ignore-words-list = 'momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogy
|
||||
|
||||
`langchain-core` and partner packages **do not use** optional dependencies in this way.
|
||||
|
||||
You only need to add a new dependency if a **unit test** relies on the package.
|
||||
If your package is only required for **integration tests**, then you can skip these
|
||||
steps and leave all pyproject.toml and poetry.lock files alone.
|
||||
You'll notice that `pyproject.toml` and `poetry.lock` are **not** touched when you add optional dependencies below.
|
||||
|
||||
If you're adding a new dependency to Langchain, assume that it will be an optional dependency, and
|
||||
that most users won't have it installed.
|
||||
@@ -216,20 +214,12 @@ that most users won't have it installed.
|
||||
Users who do not have the dependency installed should be able to **import** your code without
|
||||
any side effects (no warnings, no errors, no exceptions).
|
||||
|
||||
To introduce the dependency to the pyproject.toml file correctly, please do the following:
|
||||
To introduce the dependency to a library, please do the following:
|
||||
|
||||
1. Add the dependency to the main group as an optional dependency
|
||||
```bash
|
||||
poetry add --optional [package_name]
|
||||
```
|
||||
2. Open pyproject.toml and add the dependency to the `extended_testing` extra
|
||||
3. Relock the poetry file to update the extra.
|
||||
```bash
|
||||
poetry lock --no-update
|
||||
```
|
||||
4. Add a unit test that the very least attempts to import the new code. Ideally, the unit
|
||||
1. Open extended_testing_deps.txt and add the dependency
|
||||
2. Add a unit test that the very least attempts to import the new code. Ideally, the unit
|
||||
test makes use of lightweight fixtures to test the logic of the code.
|
||||
5. Please use the `@pytest.mark.requires(package_name)` decorator for any tests that require the dependency.
|
||||
3. Please use the `@pytest.mark.requires(package_name)` decorator for any unit tests that require the dependency.
|
||||
|
||||
## Adding a Jupyter Notebook
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ The below sections are listed roughly in order of increasing level of abstractio
|
||||
|
||||
### Expression Language
|
||||
|
||||
[LangChain Expression Language (LCEL)](/docs/concepts#langchain-expression-language) is the fundamental way that most LangChain components fit together, and this section is designed to teach
|
||||
[LangChain Expression Language (LCEL)](/docs/concepts#langchain-expression-language-lcel) is the fundamental way that most LangChain components fit together, and this section is designed to teach
|
||||
developers how to use it to build with LangChain's primitives effectively.
|
||||
|
||||
This section should contains **Tutorials** that teach how to stream and use LCEL primitives for more abstract tasks, **Explanations** of specific behaviors,
|
||||
|
||||
@@ -48,7 +48,7 @@ In a similar vein, we do enforce certain linting, formatting, and documentation
|
||||
If you are finding these difficult (or even just annoying) to work with, feel free to contact a maintainer for help -
|
||||
we do not want these to get in the way of getting good code into the codebase.
|
||||
|
||||
# 🌟 Recognition
|
||||
### 🌟 Recognition
|
||||
|
||||
If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)!
|
||||
If you have a Twitter account you would like us to mention, please let us know in the PR or through another means.
|
||||
If you have a Twitter account you would like us to mention, please let us know in the PR or through another means.
|
||||
|
||||
@@ -15,12 +15,22 @@ Here's the structure visualized as a tree:
|
||||
├── cookbook # Tutorials and examples
|
||||
├── docs # Contains content for the documentation here: https://python.langchain.com/
|
||||
├── libs
|
||||
│ ├── langchain # Main package
|
||||
│ ├── langchain
|
||||
│ │ ├── langchain
|
||||
│ │ ├── tests/unit_tests # Unit tests (present in each package not shown for brevity)
|
||||
│ │ ├── tests/integration_tests # Integration tests (present in each package not shown for brevity)
|
||||
│ ├── langchain-community # Third-party integrations
|
||||
│ ├── langchain-core # Base interfaces for key abstractions
|
||||
│ ├── langchain-experimental # Experimental components and chains
|
||||
│ ├── community # Third-party integrations
|
||||
│ │ ├── langchain-community
|
||||
│ ├── core # Base interfaces for key abstractions
|
||||
│ │ ├── langchain-core
|
||||
│ ├── experimental # Experimental components and chains
|
||||
│ │ ├── langchain-experimental
|
||||
| ├── cli # Command line interface
|
||||
│ │ ├── langchain-cli
|
||||
│ ├── text-splitters
|
||||
│ │ ├── langchain-text-splitters
|
||||
│ ├── standard-tests
|
||||
│ │ ├── langchain-standard-tests
|
||||
│ ├── partners
|
||||
│ ├── langchain-partner-1
|
||||
│ ├── langchain-partner-2
|
||||
|
||||
BIN
docs/docs/example_data/nke-10k-2023.pdf
Normal file
BIN
docs/docs/example_data/nke-10k-2023.pdf
Normal file
Binary file not shown.
@@ -15,7 +15,11 @@
|
||||
"id": "f4c03f40-1328-412d-8a48-1db0cd481b77",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Build an Agent\n",
|
||||
"# Build an Agent with AgentExecutor (Legacy)\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"This section will cover building with the legacy LangChain AgentExecutor. These are fine for getting started, but past a certain point, you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd recommend checking out [LangGraph Agents](/docs/concepts/#langgraph) or the [migration guide](/docs/how_to/migrate_agent/)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"By themselves, language models can't take actions - they just output text.\n",
|
||||
"A big use case for LangChain is creating **agents**.\n",
|
||||
@@ -24,10 +28,6 @@
|
||||
"\n",
|
||||
"In this tutorial, we will build an agent that can interact with multiple different tools: one being a local database, the other being a search engine. You will be able to ask this agent questions, watch it call tools, and have conversations with it.\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"This section will cover building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point, you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd reccommend checking out [LangGraph](/docs/concepts/#langgraph)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
"Concepts we will cover are:\n",
|
||||
|
||||
157
docs/docs/how_to/chat_models_universal_init.ipynb
Normal file
157
docs/docs/how_to/chat_models_universal_init.ipynb
Normal file
@@ -0,0 +1,157 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cfdf4f09-8125-4ed1-8063-6feed57da8a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to init any model in one line\n",
|
||||
"\n",
|
||||
"Many LLM applications let end users specify what model provider and model they want the application to be powered by. This requires writing some logic to initialize different ChatModels based on some user configuration. The `init_chat_model()` helper method makes it easy to initialize a number of different model integrations without having to worry about import paths and class names.\n",
|
||||
"\n",
|
||||
":::tip Supported models\n",
|
||||
"\n",
|
||||
"See the [init_chat_model()](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) API reference for a full list of supported integrations.\n",
|
||||
"\n",
|
||||
"Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "165b0de6-9ae3-4e3d-aa98-4fc8a97c4a06",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-openai langchain-anthropic langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea2c9f57-a796-45f8-b6f4-3efd3f361a9b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "79e14913-803c-4382-9009-5c6af3d75d35",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. You can call me Assistant! How can I help you today?\n",
|
||||
"\n",
|
||||
"Claude Opus: My name is Claude. It's nice to meet you!\n",
|
||||
"\n",
|
||||
"Gemini 1.5: I am a large language model, trained by Google. I do not have a name. \n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"# Returns a langchain_openai.ChatOpenAI instance.\n",
|
||||
"gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)\n",
|
||||
"# Returns a langchain_anthropic.ChatAnthropic instance.\n",
|
||||
"claude_opus = init_chat_model(\n",
|
||||
" \"claude-3-opus-20240229\", model_provider=\"anthropic\", temperature=0\n",
|
||||
")\n",
|
||||
"# Returns a langchain_google_vertexai.ChatVertexAI instance.\n",
|
||||
"gemini_15 = init_chat_model(\n",
|
||||
" \"gemini-1.5-pro\", model_provider=\"google_vertexai\", temperature=0\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Since all model integrations implement the ChatModel interface, you can use them in the same way.\n",
|
||||
"print(\"GPT-4o: \" + gpt_4o.invoke(\"what's your name\").content + \"\\n\")\n",
|
||||
"print(\"Claude Opus: \" + claude_opus.invoke(\"what's your name\").content + \"\\n\")\n",
|
||||
"print(\"Gemini 1.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fff9a4c8-b6ee-4a1a-8d3d-0ecaa312d4ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Simple config example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "75c25d39-bf47-4b51-a6c6-64d9c572bfd6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"user_config = {\n",
|
||||
" \"model\": \"...user-specified...\",\n",
|
||||
" \"model_provider\": \"...user-specified...\",\n",
|
||||
" \"temperature\": 0,\n",
|
||||
" \"max_tokens\": 1000,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(**user_config)\n",
|
||||
"llm.invoke(\"what's your name\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f811f219-5e78-4b62-b495-915d52a22532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Inferring model provider\n",
|
||||
"\n",
|
||||
"For common and distinct model names `init_chat_model()` will attempt to infer the model provider. See the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) for a full list of inference behavior. E.g. any model that starts with `gpt-3...` or `gpt-4...` will be inferred as using model provider `openai`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "0378ccc6-95bc-4d50-be50-fccc193f0a71",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"gpt_4o = init_chat_model(\"gpt-4o\", temperature=0)\n",
|
||||
"claude_opus = init_chat_model(\"claude-3-opus-20240229\", temperature=0)\n",
|
||||
"gemini_15 = init_chat_model(\"gemini-1.5-pro\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "da07b5c0-d2e6-42e4-bfcd-2efcfaae6221",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -60,7 +60,7 @@
|
||||
"source": [
|
||||
"examples = [\n",
|
||||
" {\"input\": \"hi\", \"output\": \"ciao\"},\n",
|
||||
" {\"input\": \"bye\", \"output\": \"arrivaderci\"},\n",
|
||||
" {\"input\": \"bye\", \"output\": \"arrivederci\"},\n",
|
||||
" {\"input\": \"soccer\", \"output\": \"calcio\"},\n",
|
||||
"]"
|
||||
]
|
||||
@@ -133,7 +133,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'input': 'bye', 'output': 'arrivaderci'}]"
|
||||
"[{'input': 'bye', 'output': 'arrivederci'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 39,
|
||||
@@ -209,7 +209,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Translate the following words from English to Italain:\n",
|
||||
"Translate the following words from English to Italian:\n",
|
||||
"\n",
|
||||
"Input: hand -> Output: mano\n",
|
||||
"\n",
|
||||
@@ -222,7 +222,7 @@
|
||||
" example_selector=example_selector,\n",
|
||||
" example_prompt=example_prompt,\n",
|
||||
" suffix=\"Input: {input} -> Output:\",\n",
|
||||
" prefix=\"Translate the following words from English to Italain:\",\n",
|
||||
" prefix=\"Translate the following words from English to Italian:\",\n",
|
||||
" input_variables=[\"input\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
|
||||
@@ -128,7 +128,7 @@
|
||||
" # Having a good description can help improve extraction results.\n",
|
||||
" name: Optional[str] = Field(..., description=\"The name of the person\")\n",
|
||||
" hair_color: Optional[str] = Field(\n",
|
||||
" ..., description=\"The color of the peron's eyes if known\"\n",
|
||||
" ..., description=\"The color of the person's hair if known\"\n",
|
||||
" )\n",
|
||||
" height_in_meters: Optional[str] = Field(..., description=\"Height in METERs\")\n",
|
||||
"\n",
|
||||
|
||||
203
docs/docs/how_to/filter_messages.ipynb
Normal file
203
docs/docs/how_to/filter_messages.ipynb
Normal file
@@ -0,0 +1,203 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e389175d-8a65-4f0d-891c-dbdfabb3c3ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to filter messages\n",
|
||||
"\n",
|
||||
"In more complex chains and agents we might track state with a list of messages. This list can start to accumulate messages from multiple different models, speakers, sub-chains, etc., and we may only want to pass subsets of this full list of messages to each model call in the chain/agent.\n",
|
||||
"\n",
|
||||
"The `filter_messages` utility makes it easy to filter messages by type, id, or name.\n",
|
||||
"\n",
|
||||
"## Basic usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f4ad2fd3-3cab-40d4-a989-972115865b8b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='example input', name='example_user', id='2'),\n",
|
||||
" HumanMessage(content='real input', name='bob', id='4')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" filter_messages,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\"you are a good assistant\", id=\"1\"),\n",
|
||||
" HumanMessage(\"example input\", id=\"2\", name=\"example_user\"),\n",
|
||||
" AIMessage(\"example output\", id=\"3\", name=\"example_assistant\"),\n",
|
||||
" HumanMessage(\"real input\", id=\"4\", name=\"bob\"),\n",
|
||||
" AIMessage(\"real output\", id=\"5\", name=\"alice\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"filter_messages(messages, include_types=\"human\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7b663a1e-a8ae-453e-a072-8dd75dfab460",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content='you are a good assistant', id='1'),\n",
|
||||
" HumanMessage(content='real input', name='bob', id='4'),\n",
|
||||
" AIMessage(content='real output', name='alice', id='5')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filter_messages(messages, exclude_names=[\"example_user\", \"example_assistant\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "db170e46-03f8-4710-b967-23c70c3ac054",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='example input', name='example_user', id='2'),\n",
|
||||
" HumanMessage(content='real input', name='bob', id='4'),\n",
|
||||
" AIMessage(content='real output', name='alice', id='5')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filter_messages(messages, include_types=[HumanMessage, AIMessage], exclude_ids=[\"3\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b7c4e5ad-d1b4-4c18-b250-864adde8f0dd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"`filter_messages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "675f8f79-db39-401c-a582-1df2478cba30",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=[], response_metadata={'id': 'msg_01Wz7gBHahAwkZ1KCBNtXmwA', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 3}}, id='run-b5d8a3fe-004f-4502-a071-a6c025031827-0', usage_metadata={'input_tokens': 16, 'output_tokens': 3, 'total_tokens': 19})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# pip install -U langchain-anthropic\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)\n",
|
||||
"# Notice we don't pass in messages. This creates\n",
|
||||
"# a RunnableLambda that takes messages as input\n",
|
||||
"filter_ = filter_messages(exclude_names=[\"example_user\", \"example_assistant\"])\n",
|
||||
"chain = filter_ | llm\n",
|
||||
"chain.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4133ab28-f49c-480f-be92-b51eb6559153",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace we can see that before the messages are passed to the model they are filtered: https://smith.langchain.com/public/f808a724-e072-438e-9991-657cc9e7e253/r\n",
|
||||
"\n",
|
||||
"Looking at just the filter_, we can see that it's a Runnable object that can be invoked like all Runnables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "c090116a-1fef-43f6-a178-7265dff9db00",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='real input', name='bob', id='4'),\n",
|
||||
" AIMessage(content='real output', name='alice', id='5')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filter_.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ff339066-d424-4042-8cca-cd4b007c1a8e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For a complete description of all arguments head to the API reference: https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.filter_messages.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -14,6 +14,7 @@ For comprehensive descriptions of every class and function see the [API Referenc
|
||||
## Installation
|
||||
|
||||
- [How to: install LangChain packages](/docs/how_to/installation/)
|
||||
- [How to: use LangChain with different Pydantic versions](/docs/how_to/pydantic_compatibility)
|
||||
|
||||
## Key features
|
||||
|
||||
@@ -49,7 +50,7 @@ These are the core building blocks you can use when building applications.
|
||||
|
||||
### Prompt templates
|
||||
|
||||
Prompt Templates are responsible for formatting user input into a format that can be passed to a language model.
|
||||
[Prompt Templates](/docs/concepts/#prompt-templates) are responsible for formatting user input into a format that can be passed to a language model.
|
||||
|
||||
- [How to: use few shot examples](/docs/how_to/few_shot_examples)
|
||||
- [How to: use few shot examples in chat models](/docs/how_to/few_shot_examples_chat/)
|
||||
@@ -58,7 +59,7 @@ Prompt Templates are responsible for formatting user input into a format that ca
|
||||
|
||||
### Example selectors
|
||||
|
||||
Example Selectors are responsible for selecting the correct few shot examples to pass to the prompt.
|
||||
[Example Selectors](/docs/concepts/#example-selectors) are responsible for selecting the correct few shot examples to pass to the prompt.
|
||||
|
||||
- [How to: use example selectors](/docs/how_to/example_selectors)
|
||||
- [How to: select examples by length](/docs/how_to/example_selectors_length_based)
|
||||
@@ -68,7 +69,7 @@ Example Selectors are responsible for selecting the correct few shot examples to
|
||||
|
||||
### Chat models
|
||||
|
||||
Chat Models are newer forms of language models that take messages in and output a message.
|
||||
[Chat Models](/docs/concepts/#chat-models) are newer forms of language models that take messages in and output a message.
|
||||
|
||||
- [How to: do function/tool calling](/docs/how_to/tool_calling)
|
||||
- [How to: get models to return structured output](/docs/how_to/structured_output)
|
||||
@@ -78,10 +79,19 @@ Chat Models are newer forms of language models that take messages in and output
|
||||
- [How to: stream a response back](/docs/how_to/chat_streaming)
|
||||
- [How to: track token usage](/docs/how_to/chat_token_usage_tracking)
|
||||
- [How to: track response metadata across providers](/docs/how_to/response_metadata)
|
||||
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
|
||||
|
||||
### Messages
|
||||
|
||||
[Messages](/docs/concepts/#messages) are the input and output of chat models. They have some `content` and a `role`, which describes the source of the message.
|
||||
|
||||
- [How to: trim messages](/docs/how_to/trim_messages/)
|
||||
- [How to: filter messages](/docs/how_to/filter_messages/)
|
||||
- [How to: merge consecutive messages of the same type](/docs/how_to/merge_message_runs/)
|
||||
|
||||
### LLMs
|
||||
|
||||
What LangChain calls LLMs are older forms of language models that take a string in and output a string.
|
||||
What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language models that take a string in and output a string.
|
||||
|
||||
- [How to: cache model responses](/docs/how_to/llm_caching)
|
||||
- [How to: create a custom LLM class](/docs/how_to/custom_llm)
|
||||
@@ -91,7 +101,7 @@ What LangChain calls LLMs are older forms of language models that take a string
|
||||
|
||||
### Output parsers
|
||||
|
||||
Output Parsers are responsible for taking the output of an LLM and parsing into more structured format.
|
||||
[Output Parsers](/docs/concepts/#output-parsers) are responsible for taking the output of an LLM and parsing into more structured format.
|
||||
|
||||
- [How to: use output parsers to parse an LLM response into structured format](/docs/how_to/output_parser_structured)
|
||||
- [How to: parse JSON output](/docs/how_to/output_parser_json)
|
||||
@@ -103,7 +113,7 @@ Output Parsers are responsible for taking the output of an LLM and parsing into
|
||||
|
||||
### Document loaders
|
||||
|
||||
Document Loaders are responsible for loading documents from a variety of sources.
|
||||
[Document Loaders](/docs/concepts/#document-loaders) are responsible for loading documents from a variety of sources.
|
||||
|
||||
- [How to: load CSV data](/docs/how_to/document_loader_csv)
|
||||
- [How to: load data from a directory](/docs/how_to/document_loader_directory)
|
||||
@@ -116,7 +126,7 @@ Document Loaders are responsible for loading documents from a variety of sources
|
||||
|
||||
### Text splitters
|
||||
|
||||
Text Splitters take a document and split into chunks that can be used for retrieval.
|
||||
[Text Splitters](/docs/concepts/#text-splitters) take a document and split into chunks that can be used for retrieval.
|
||||
|
||||
- [How to: recursively split text](/docs/how_to/recursive_text_splitter)
|
||||
- [How to: split by HTML headers](/docs/how_to/HTML_header_metadata_splitter)
|
||||
@@ -130,20 +140,20 @@ Text Splitters take a document and split into chunks that can be used for retrie
|
||||
|
||||
### Embedding models
|
||||
|
||||
Embedding Models take a piece of text and create a numerical representation of it.
|
||||
[Embedding Models](/docs/concepts/#embedding-models) take a piece of text and create a numerical representation of it.
|
||||
|
||||
- [How to: embed text data](/docs/how_to/embed_text)
|
||||
- [How to: cache embedding results](/docs/how_to/caching_embeddings)
|
||||
|
||||
### Vector stores
|
||||
|
||||
Vector stores are databases that can efficiently store and retrieve embeddings.
|
||||
[Vector stores](/docs/concepts/#vector-stores) are databases that can efficiently store and retrieve embeddings.
|
||||
|
||||
- [How to: use a vector store to retrieve data](/docs/how_to/vectorstores)
|
||||
|
||||
### Retrievers
|
||||
|
||||
Retrievers are responsible for taking a query and returning relevant documents.
|
||||
[Retrievers](/docs/concepts/#retrievers) are responsible for taking a query and returning relevant documents.
|
||||
|
||||
- [How to: use a vector store to retrieve data](/docs/how_to/vectorstore_retriever)
|
||||
- [How to: generate multiple queries to retrieve data for](/docs/how_to/MultiQueryRetriever)
|
||||
@@ -166,12 +176,13 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
|
||||
|
||||
### Tools
|
||||
|
||||
LangChain Tools contain a description of the tool (to pass to the language model) as well as the implementation of the function to call).
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call).
|
||||
|
||||
- [How to: create custom tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
|
||||
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
|
||||
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
|
||||
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
|
||||
|
||||
@@ -194,6 +205,8 @@ For in depth how-to guides for agents, please check out [LangGraph](https://gith
|
||||
|
||||
### Callbacks
|
||||
|
||||
[Callbacks](/docs/concepts/#callbacks) allow you to hook into the various stages of your LLM application's execution.
|
||||
|
||||
- [How to: pass in callbacks at runtime](/docs/how_to/callbacks_runtime)
|
||||
- [How to: attach callbacks to a module](/docs/how_to/callbacks_attach)
|
||||
- [How to: pass callbacks into a module constructor](/docs/how_to/callbacks_constructor)
|
||||
@@ -220,6 +233,7 @@ These guides cover use-case specific details.
|
||||
### Q&A with RAG
|
||||
|
||||
Retrieval Augmented Generation (RAG) is a way to connect LLMs to external sources of data.
|
||||
For a high-level tutorial on RAG, check out [this guide](/docs/tutorials/rag/).
|
||||
|
||||
- [How to: add chat history](/docs/how_to/qa_chat_history_how_to/)
|
||||
- [How to: stream](/docs/how_to/qa_streaming/)
|
||||
@@ -231,6 +245,7 @@ Retrieval Augmented Generation (RAG) is a way to connect LLMs to external source
|
||||
### Extraction
|
||||
|
||||
Extraction is when you use LLMs to extract structured information from unstructured text.
|
||||
For a high level tutorial on extraction, check out [this guide](/docs/tutorials/extraction/).
|
||||
|
||||
- [How to: use reference examples](/docs/how_to/extraction_examples/)
|
||||
- [How to: handle long text](/docs/how_to/extraction_long_text/)
|
||||
@@ -239,14 +254,17 @@ Extraction is when you use LLMs to extract structured information from unstructu
|
||||
### Chatbots
|
||||
|
||||
Chatbots involve using an LLM to have a conversation.
|
||||
For a high-level tutorial on building chatbots, check out [this guide](/docs/tutorials/chatbot/).
|
||||
|
||||
- [How to: manage memory](/docs/how_to/chatbots_memory)
|
||||
- [How to: do retrieval](/docs/how_to/chatbots_retrieval)
|
||||
- [How to: use tools](/docs/how_to/chatbots_tools)
|
||||
- [How to: manage large chat history](/docs/how_to/trim_messages/)
|
||||
|
||||
### Query analysis
|
||||
|
||||
Query Analysis is the task of using an LLM to generate a query to send to a retriever.
|
||||
For a high-level tutorial on query analysis, check out [this guide](/docs/tutorials/query_analysis/).
|
||||
|
||||
- [How to: add examples to the prompt](/docs/how_to/query_few_shot)
|
||||
- [How to: handle cases where no queries are generated](/docs/how_to/query_no_queries)
|
||||
@@ -258,6 +276,7 @@ Query Analysis is the task of using an LLM to generate a query to send to a retr
|
||||
### Q&A over SQL + CSV
|
||||
|
||||
You can use LLMs to do question answering over tabular data.
|
||||
For a high-level tutorial, check out [this guide](/docs/tutorials/sql_qa/).
|
||||
|
||||
- [How to: use prompting to improve results](/docs/how_to/sql_prompting)
|
||||
- [How to: do query validation](/docs/how_to/sql_query_checking)
|
||||
@@ -267,8 +286,33 @@ You can use LLMs to do question answering over tabular data.
|
||||
### Q&A over graph databases
|
||||
|
||||
You can use an LLM to do question answering over graph databases.
|
||||
For a high-level tutorial, check out [this guide](/docs/tutorials/graph/).
|
||||
|
||||
- [How to: map values to a database](/docs/how_to/graph_mapping)
|
||||
- [How to: add a semantic layer over the database](/docs/how_to/graph_semantic)
|
||||
- [How to: improve results with prompting](/docs/how_to/graph_prompting)
|
||||
- [How to: construct knowledge graphs](/docs/how_to/graph_constructing)
|
||||
|
||||
## [LangGraph](https://langchain-ai.github.io/langgraph)
|
||||
|
||||
LangGraph is an extension of LangChain aimed at
|
||||
building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
|
||||
LangGraph documentation is currently hosted on a separate site.
|
||||
You can peruse [LangGraph how-to guides here](https://langchain-ai.github.io/langgraph/how-tos/).
|
||||
|
||||
## [LangSmith](https://docs.smith.langchain.com/)
|
||||
|
||||
LangSmith allows you to closely trace, monitor and evaluate your LLM application.
|
||||
It seamlessly integrates with LangChain and LangGraph, and you can use it to inspect and debug individual steps of your chains and agents as you build.
|
||||
|
||||
LangSmith documentation is hosted on a separate site.
|
||||
You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/).
|
||||
|
||||
### Evaluation
|
||||
<span data-heading-keywords="evaluation,evaluate"></span>
|
||||
|
||||
Evaluating performance is a vital part of building LLM-powered applications.
|
||||
LangSmith helps with every step of the process from creating a dataset to defining metrics to running evaluators.
|
||||
|
||||
To learn more, check out the [LangSmith evaluation how-to guides](https://docs.smith.langchain.com/how_to_guides/evaluation).
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
" * document addition by id (`add_documents` method with `ids` argument)\n",
|
||||
" * delete by id (`delete` method with `ids` argument)\n",
|
||||
"\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
" \n",
|
||||
"## Caution\n",
|
||||
"\n",
|
||||
|
||||
170
docs/docs/how_to/merge_message_runs.ipynb
Normal file
170
docs/docs/how_to/merge_message_runs.ipynb
Normal file
@@ -0,0 +1,170 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ac47bfab-0f4f-42ce-8bb6-898ef22a0338",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to merge consecutive messages of the same type\n",
|
||||
"\n",
|
||||
"Certain models do not support passing in consecutive messages of the same type (a.k.a. \"runs\" of the same message type).\n",
|
||||
"\n",
|
||||
"The `merge_message_runs` utility makes it easy to merge consecutive messages of the same type.\n",
|
||||
"\n",
|
||||
"## Basic usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "1a215bbb-c05c-40b0-a6fd-d94884d517df",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\")\n",
|
||||
"\n",
|
||||
"HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways'])\n",
|
||||
"\n",
|
||||
"AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" merge_message_runs,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\"you're a good assistant.\"),\n",
|
||||
" SystemMessage(\"you always respond with a joke.\"),\n",
|
||||
" HumanMessage([{\"type\": \"text\", \"text\": \"i wonder why it's called langchain\"}]),\n",
|
||||
" HumanMessage(\"and who is harrison chasing anyways\"),\n",
|
||||
" AIMessage(\n",
|
||||
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
|
||||
" ),\n",
|
||||
" AIMessage(\"Why, he's probably chasing after the last cup of coffee in the office!\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"merged = merge_message_runs(messages)\n",
|
||||
"print(\"\\n\\n\".join([repr(x) for x in merged]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0544c811-7112-4b76-8877-cc897407c738",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Notice that if the contents of one of the messages to merge is a list of content blocks then the merged message will have a list of content blocks. And if both messages to merge have string contents then those are concatenated with a newline character."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b2eee74-71c8-4168-b968-bca580c25d18",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"`merge_message_runs` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6d5a0283-11f8-435b-b27b-7b18f7693592",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=[], response_metadata={'id': 'msg_01D6R8Naum57q8qBau9vLBUX', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 84, 'output_tokens': 3}}, id='run-ac0c465b-b54f-4b8b-9295-e5951250d653-0', usage_metadata={'input_tokens': 84, 'output_tokens': 3, 'total_tokens': 87})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# pip install -U langchain-anthropic\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)\n",
|
||||
"# Notice we don't pass in messages. This creates\n",
|
||||
"# a RunnableLambda that takes messages as input\n",
|
||||
"merger = merge_message_runs()\n",
|
||||
"chain = merger | llm\n",
|
||||
"chain.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72e90dce-693c-4842-9526-ce6460fe956b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace we can see that before the messages are passed to the model they are merged: https://smith.langchain.com/public/ab558677-cac9-4c59-9066-1ecce5bcd87c/r\n",
|
||||
"\n",
|
||||
"Looking at just the merger, we can see that it's a Runnable object that can be invoked like all Runnables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "460817a6-c327-429d-958e-181a8c46059c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\"),\n",
|
||||
" HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways']),\n",
|
||||
" AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"merger.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4548d916-ce21-4dc6-8f19-eedb8003ace6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For a complete description of all arguments head to the API reference: https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.merge_message_runs.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -898,7 +898,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
"source": [
|
||||
"## LCEL\n",
|
||||
"\n",
|
||||
"Output parsers implement the [Runnable interface](/docs/concepts#interface), the basic building block of the [LangChain Expression Language (LCEL)](/docs/concepts#langchain-expression-language). This means they support `invoke`, `ainvoke`, `stream`, `astream`, `batch`, `abatch`, `astream_log` calls.\n",
|
||||
"Output parsers implement the [Runnable interface](/docs/concepts#interface), the basic building block of the [LangChain Expression Language (LCEL)](/docs/concepts#langchain-expression-language-lcel). This means they support `invoke`, `ainvoke`, `stream`, `astream`, `batch`, `abatch`, `astream_log` calls.\n",
|
||||
"\n",
|
||||
"Output parsers accept a string or `BaseMessage` as input and can return an arbitrary type."
|
||||
]
|
||||
|
||||
107
docs/docs/how_to/pydantic_compatibility.md
Normal file
107
docs/docs/how_to/pydantic_compatibility.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# How to use LangChain with different Pydantic versions
|
||||
|
||||
- Pydantic v2 was released in June, 2023 (https://docs.pydantic.dev/2.0/blog/pydantic-v2-final/)
|
||||
- v2 contains has a number of breaking changes (https://docs.pydantic.dev/2.0/migration/)
|
||||
- Pydantic v2 and v1 are under the same package name, so both versions cannot be installed at the same time
|
||||
|
||||
## LangChain Pydantic migration plan
|
||||
|
||||
As of `langchain>=0.0.267`, LangChain will allow users to install either Pydantic V1 or V2.
|
||||
* Internally LangChain will continue to [use V1](https://docs.pydantic.dev/latest/migration/#continue-using-pydantic-v1-features).
|
||||
* During this time, users can pin their pydantic version to v1 to avoid breaking changes, or start a partial
|
||||
migration using pydantic v2 throughout their code, but avoiding mixing v1 and v2 code for LangChain (see below).
|
||||
|
||||
User can either pin to pydantic v1, and upgrade their code in one go once LangChain has migrated to v2 internally, or they can start a partial migration to v2, but must avoid mixing v1 and v2 code for LangChain.
|
||||
|
||||
Below are two examples of showing how to avoid mixing pydantic v1 and v2 code in
|
||||
the case of inheritance and in the case of passing objects to LangChain.
|
||||
|
||||
**Example 1: Extending via inheritance**
|
||||
|
||||
**YES**
|
||||
|
||||
```python
|
||||
from pydantic.v1 import root_validator, validator
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
class CustomTool(BaseTool): # BaseTool is v1 code
|
||||
x: int = Field(default=1)
|
||||
|
||||
def _run(*args, **kwargs):
|
||||
return "hello"
|
||||
|
||||
@validator('x') # v1 code
|
||||
@classmethod
|
||||
def validate_x(cls, x: int) -> int:
|
||||
return 1
|
||||
|
||||
|
||||
CustomTool(
|
||||
name='custom_tool',
|
||||
description="hello",
|
||||
x=1,
|
||||
)
|
||||
```
|
||||
|
||||
Mixing Pydantic v2 primitives with Pydantic v1 primitives can raise cryptic errors
|
||||
|
||||
**NO**
|
||||
|
||||
```python
|
||||
from pydantic import Field, field_validator # pydantic v2
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
class CustomTool(BaseTool): # BaseTool is v1 code
|
||||
x: int = Field(default=1)
|
||||
|
||||
def _run(*args, **kwargs):
|
||||
return "hello"
|
||||
|
||||
@field_validator('x') # v2 code
|
||||
@classmethod
|
||||
def validate_x(cls, x: int) -> int:
|
||||
return 1
|
||||
|
||||
|
||||
CustomTool(
|
||||
name='custom_tool',
|
||||
description="hello",
|
||||
x=1,
|
||||
)
|
||||
```
|
||||
|
||||
**Example 2: Passing objects to LangChain**
|
||||
|
||||
**YES**
|
||||
|
||||
```python
|
||||
from langchain_core.tools import Tool
|
||||
from pydantic.v1 import BaseModel, Field # <-- Uses v1 namespace
|
||||
|
||||
class CalculatorInput(BaseModel):
|
||||
question: str = Field()
|
||||
|
||||
Tool.from_function( # <-- tool uses v1 namespace
|
||||
func=lambda question: 'hello',
|
||||
name="Calculator",
|
||||
description="useful for when you need to answer questions about math",
|
||||
args_schema=CalculatorInput
|
||||
)
|
||||
```
|
||||
|
||||
**NO**
|
||||
|
||||
```python
|
||||
from langchain_core.tools import Tool
|
||||
from pydantic import BaseModel, Field # <-- Uses v2 namespace
|
||||
|
||||
class CalculatorInput(BaseModel):
|
||||
question: str = Field()
|
||||
|
||||
Tool.from_function( # <-- tool uses v1 namespace
|
||||
func=lambda question: 'hello',
|
||||
name="Calculator",
|
||||
description="useful for when you need to answer questions about math",
|
||||
args_schema=CalculatorInput
|
||||
)
|
||||
```
|
||||
@@ -14,7 +14,7 @@
|
||||
"We will cover two approaches:\n",
|
||||
"\n",
|
||||
"1. Using the built-in [create_retrieval_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html), which returns sources by default;\n",
|
||||
"2. Using a simple [LCEL](/docs/concepts#langchain-expression-language) implementation, to show the operating principle."
|
||||
"2. Using a simple [LCEL](/docs/concepts#langchain-expression-language-lcel) implementation, to show the operating principle."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -323,7 +323,7 @@
|
||||
"id": "fa0f589d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Routing by semantic similarity\n",
|
||||
"## Routing by semantic similarity\n",
|
||||
"\n",
|
||||
"One especially useful technique is to use embeddings to route a query to the most relevant prompt. Here's an example."
|
||||
]
|
||||
@@ -371,7 +371,7 @@
|
||||
"chain = (\n",
|
||||
" {\"query\": RunnablePassthrough()}\n",
|
||||
" | RunnableLambda(prompt_router)\n",
|
||||
" | ChatAnthropic(model_name=\"claude-3-haiku-20240307\")\n",
|
||||
" | ChatAnthropic(model=\"claude-3-haiku-20240307\")\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
|
||||
@@ -297,13 +297,67 @@
|
||||
"print(len(docs))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"### Gradient\n",
|
||||
"\n",
|
||||
"In this method, the gradient of distance is used to split chunks along with the percentile method.\n",
|
||||
"This method is useful when chunks are highly correlated with each other or specific to a domain e.g. legal or medical. The idea is to apply anomaly detection on gradient array so that the distribution become wider and easy to identify boundaries in highly semantic data."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "423c6e099e94ca69"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1f65472",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"text_splitter = SemanticChunker(\n",
|
||||
" OpenAIEmbeddings(), breakpoint_threshold_type=\"gradient\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = text_splitter.create_documents([state_of_the_union])\n",
|
||||
"print(docs[0].page_content)"
|
||||
],
|
||||
"metadata": {},
|
||||
"id": "e9f393d316ce1f6c"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"26\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(len(docs))"
|
||||
],
|
||||
"metadata": {},
|
||||
"id": "a407cd57f02a0db4"
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -41,6 +41,10 @@
|
||||
"\n",
|
||||
"Let's take a look at both approaches, and try to understand how to use them.\n",
|
||||
"\n",
|
||||
":::info\n",
|
||||
"For a higher-level overview of streaming techniques in LangChain, see [this section of the conceptual guide](/docs/concepts/#streaming).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Using Stream\n",
|
||||
"\n",
|
||||
"All `Runnable` objects implement a sync method called `stream` and an async variant called `astream`. \n",
|
||||
@@ -1003,7 +1007,7 @@
|
||||
"id": "798ea891-997c-454c-bf60-43124f40ee1b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Because both the model and the parser support streaming, we see sreaming events from both components in real time! Kind of cool isn't it? 🦜"
|
||||
"Because both the model and the parser support streaming, we see streaming events from both components in real time! Kind of cool isn't it? 🦜"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -33,6 +33,8 @@
|
||||
"\n",
|
||||
"## The `.with_structured_output()` method\n",
|
||||
"\n",
|
||||
"<span data-heading-keywords=\"with_structured_output\"></span>\n",
|
||||
"\n",
|
||||
":::info Supported models\n",
|
||||
"\n",
|
||||
"You can find a [list of models that support this method here](/docs/integrations/chat/).\n",
|
||||
@@ -74,7 +76,7 @@
|
||||
"id": "a808a401-be1f-49f9-ad13-58dd68f7db5f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want the model to return a Pydantic object, we just need to pass in desired the Pydantic class:"
|
||||
"If we want the model to return a Pydantic object, we just need to pass in the desired Pydantic class:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -167,13 +167,83 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also use the `tool_choice` parameter to ensure certain behavior. For example, we can force our tool to call the multiply tool by using the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_9cViskmLvPnHjXk9tbVla5HA', 'function': {'arguments': '{\"a\":2,\"b\":4}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 103, 'total_tokens': 112}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-095b827e-2bdd-43bb-8897-c843f4504883-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 2, 'b': 4}, 'id': 'call_9cViskmLvPnHjXk9tbVla5HA'}], usage_metadata={'input_tokens': 103, 'output_tokens': 9, 'total_tokens': 112})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_multiply = llm.bind_tools(tools, tool_choice=\"Multiply\")\n",
|
||||
"llm_forced_to_multiply.invoke(\"what is 2 + 4\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Even if we pass it something that doesn't require multiplcation - it will still call the tool!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also just force our tool to select at least one of our tools by passing in the \"any\" (or \"required\" which is OpenAI specific) keyword to the `tool_choice` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W', 'function': {'arguments': '{\"a\":1,\"b\":2}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 94, 'total_tokens': 109}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-28f75260-9900-4bed-8cd3-f1579abb65e5-0', tool_calls=[{'name': 'Add', 'args': {'a': 1, 'b': 2}, 'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W'}], usage_metadata={'input_tokens': 94, 'output_tokens': 15, 'total_tokens': 109})"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_use_tool = llm.bind_tools(tools, tool_choice=\"any\")\n",
|
||||
"llm_forced_to_use_tool.invoke(\"What day is today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, even though the prompt didn't really suggest a tool call, our LLM made one since it was forced to do so. You can look at the docs for [`bind_tool`](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.BaseChatOpenAI.html#langchain_openai.chat_models.base.BaseChatOpenAI.bind_tools) to learn about all the ways to customize how your LLM selects tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -711,7 +781,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
256
docs/docs/how_to/tool_runtime.ipynb
Normal file
256
docs/docs/how_to/tool_runtime.ipynb
Normal file
@@ -0,0 +1,256 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass run time values to a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [How to create tools](/docs/how_to/custom_tools)\n",
|
||||
"- [How to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Supported models\n",
|
||||
"\n",
|
||||
"This how-to guide uses models with native tool calling capability.\n",
|
||||
"You can find a [list of all models that support tool calling](/docs/integrations/chat/).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Using with LangGraph\n",
|
||||
"\n",
|
||||
"If you're using LangGraph, please refer to [this how-to guide](https://langchain-ai.github.io/langgraph/how-tos/pass-run-time-values-to-tools/)\n",
|
||||
"which shows how to create an agent that keeps track of a given user's favorite pets.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n",
|
||||
"\n",
|
||||
"Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.\n",
|
||||
"\n",
|
||||
"Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic.\n",
|
||||
"\n",
|
||||
"This how-to guide shows a simple design pattern that creates the tool dynamically at run time and binds to them appropriate values."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can bind them to chat models as follows:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
" fireworksParams={`model=\"accounts/fireworks/models/firefunction-v1\", temperature=0`}\n",
|
||||
"/>\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Passing request time information\n",
|
||||
"\n",
|
||||
"The idea is to create the tool dynamically at request time, and bind to it the appropriate information. For example,\n",
|
||||
"this information may be the user ID as resolved from the request itself."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"from langchain_core.tools import BaseTool, tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"user_to_pets = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_tools_for_user(user_id: str) -> List[BaseTool]:\n",
|
||||
" \"\"\"Generate a set of tools that have a user id associated with them.\"\"\"\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def update_favorite_pets(pets: List[str]) -> None:\n",
|
||||
" \"\"\"Add the list of favorite pets.\"\"\"\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def delete_favorite_pets() -> None:\n",
|
||||
" \"\"\"Delete the list of favorite pets.\"\"\"\n",
|
||||
" if user_id in user_to_pets:\n",
|
||||
" del user_to_pets[user_id]\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def list_favorite_pets() -> None:\n",
|
||||
" \"\"\"List favorite pets if any.\"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])\n",
|
||||
"\n",
|
||||
" return [update_favorite_pets, delete_favorite_pets, list_favorite_pets]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Verify that the tools work correctly"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'eugene': ['cat', 'dog']}\n",
|
||||
"['cat', 'dog']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_pets, delete_pets, list_pets = generate_tools_for_user(\"eugene\")\n",
|
||||
"update_pets.invoke({\"pets\": [\"cat\", \"dog\"]})\n",
|
||||
"print(user_to_pets)\n",
|
||||
"print(list_pets.invoke({}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_run_time_request(user_id: str, query: str):\n",
|
||||
" \"\"\"Handle run time request.\"\"\"\n",
|
||||
" tools = generate_tools_for_user(user_id)\n",
|
||||
" llm_with_tools = llm.bind_tools(tools)\n",
|
||||
" prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", \"You are a helpful assistant.\")],\n",
|
||||
" )\n",
|
||||
" chain = prompt | llm_with_tools\n",
|
||||
" return llm_with_tools.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This code will allow the LLM to invoke the tools, but the LLM is **unaware** of the fact that a **user ID** even exists!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'update_favorite_pets',\n",
|
||||
" 'args': {'pets': ['cats', 'parrots']},\n",
|
||||
" 'id': 'call_jJvjPXsNbFO5MMgW0q84iqCN'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_message = handle_run_time_request(\n",
|
||||
" \"eugene\", \"my favorite animals are cats and parrots.\"\n",
|
||||
")\n",
|
||||
"ai_message.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::{.callout-important}\n",
|
||||
"\n",
|
||||
"Chat models only output requests to invoke tools, they don't actually invoke the underlying tools.\n",
|
||||
"\n",
|
||||
"To see how to invoke the tools, please refer to [how to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/).\n",
|
||||
":::"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -36,7 +36,7 @@
|
||||
"\n",
|
||||
"When using 3rd party tools, make sure that you understand how the tool works, what permissions\n",
|
||||
"it has. Read over its documentation and check if anything is required from you\n",
|
||||
"from a security point of view. Please see our [security](https://python.langchain.com/v0.1/docs/security/) \n",
|
||||
"from a security point of view. Please see our [security](https://python.langchain.com/v0.2/docs/security/) \n",
|
||||
"guidelines for more information.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
|
||||
473
docs/docs/how_to/trim_messages.ipynb
Normal file
473
docs/docs/how_to/trim_messages.ipynb
Normal file
@@ -0,0 +1,473 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b5ee5b75-6876-4d62-9ade-5a7a808ae5a2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to trim messages\n",
|
||||
"\n",
|
||||
"All models have finite context windows, meaning there's a limit to how many tokens they can take as input. If you have very long messages or a chain/agent that accumulates a long message is history, you'll need to manage the length of the messages you're passing in to the model.\n",
|
||||
"\n",
|
||||
"The `trim_messages` util provides some basic strategies for trimming a list of messages to be of a certain token length.\n",
|
||||
"\n",
|
||||
"## Getting the last `max_tokens` tokens\n",
|
||||
"\n",
|
||||
"To get the last `max_tokens` in the list of Messages we can set `strategy=\"last\"`. Notice that for our `token_counter` we can pass in a function (more on that below) or a language model (since language models have a message token counting method). It makes sense to pass in a model when you're trimming your messages to fit into the context window of that specific model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c974633b-3bd0-4844-8a8f-85e3e25f13fe",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content=\"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# pip install -U langchain-openai\n",
|
||||
"from langchain_core.messages import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" trim_messages,\n",
|
||||
")\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" HumanMessage(\"i wonder why it's called langchain\"),\n",
|
||||
" AIMessage(\n",
|
||||
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\"and who is harrison chasing anyways\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\"what do you call a speechless parrot\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"trim_messages(\n",
|
||||
" messages,\n",
|
||||
" max_tokens=45,\n",
|
||||
" strategy=\"last\",\n",
|
||||
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d3f46654-c4b2-4136-b995-91c3febe5bf9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want to always keep the initial system message we can specify `include_system=True`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "589b0223-3a73-44ec-8315-2dba3ee6117d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"trim_messages(\n",
|
||||
" messages,\n",
|
||||
" max_tokens=45,\n",
|
||||
" strategy=\"last\",\n",
|
||||
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
|
||||
" include_system=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a8b542c-04d1-4515-8d82-b999ea4fac4f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want to allow splitting up the contents of a message we can specify `allow_partial=True`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8c46a209-dddd-4d01-81f6-f6ae55d3225c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" AIMessage(content=\"\\nWhy, he's probably chasing after the last cup of coffee in the office!\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"trim_messages(\n",
|
||||
" messages,\n",
|
||||
" max_tokens=56,\n",
|
||||
" strategy=\"last\",\n",
|
||||
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
|
||||
" include_system=True,\n",
|
||||
" allow_partial=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "306adf9c-41cd-495c-b4dc-e4f43dd7f8f8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we need to make sure that our first message (excluding the system message) is always of a specific type, we can specify `start_on`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "878a730b-fe44-4e9d-ab65-7b8f7b069de8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"trim_messages(\n",
|
||||
" messages,\n",
|
||||
" max_tokens=60,\n",
|
||||
" strategy=\"last\",\n",
|
||||
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
|
||||
" include_system=True,\n",
|
||||
" start_on=\"human\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7f5d391d-235b-4091-b2de-c22866b478f3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Getting the first `max_tokens` tokens\n",
|
||||
"\n",
|
||||
"We can perform the flipped operation of getting the *first* `max_tokens` by specifying `strategy=\"first\"`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "5f56ae54-1a39-4019-9351-3b494c003d5b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" HumanMessage(content=\"i wonder why it's called langchain\")]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"trim_messages(\n",
|
||||
" messages,\n",
|
||||
" max_tokens=45,\n",
|
||||
" strategy=\"first\",\n",
|
||||
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ab70bf70-1e5a-4d51-b9b8-a823bf2cf532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Writing a custom token counter\n",
|
||||
"\n",
|
||||
"We can write a custom token counter function that takes in a list of messages and returns an int."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "1c1c3b1e-2ece-49e7-a3b6-e69877c1633b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content=\"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"# pip install tiktoken\n",
|
||||
"import tiktoken\n",
|
||||
"from langchain_core.messages import BaseMessage, ToolMessage\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def str_token_counter(text: str) -> int:\n",
|
||||
" enc = tiktoken.get_encoding(\"o200k_base\")\n",
|
||||
" return len(enc.encode(text))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def tiktoken_counter(messages: List[BaseMessage]) -> int:\n",
|
||||
" \"\"\"Approximately reproduce https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb\n",
|
||||
"\n",
|
||||
" For simplicity only supports str Message.contents.\n",
|
||||
" \"\"\"\n",
|
||||
" num_tokens = 3 # every reply is primed with <|start|>assistant<|message|>\n",
|
||||
" tokens_per_message = 3\n",
|
||||
" tokens_per_name = 1\n",
|
||||
" for msg in messages:\n",
|
||||
" if isinstance(msg, HumanMessage):\n",
|
||||
" role = \"user\"\n",
|
||||
" elif isinstance(msg, AIMessage):\n",
|
||||
" role = \"assistant\"\n",
|
||||
" elif isinstance(msg, ToolMessage):\n",
|
||||
" role = \"tool\"\n",
|
||||
" elif isinstance(msg, SystemMessage):\n",
|
||||
" role = \"system\"\n",
|
||||
" else:\n",
|
||||
" raise ValueError(f\"Unsupported messages type {msg.__class__}\")\n",
|
||||
" num_tokens += (\n",
|
||||
" tokens_per_message\n",
|
||||
" + str_token_counter(role)\n",
|
||||
" + str_token_counter(msg.content)\n",
|
||||
" )\n",
|
||||
" if msg.name:\n",
|
||||
" num_tokens += tokens_per_name + str_token_counter(msg.name)\n",
|
||||
" return num_tokens\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"trim_messages(\n",
|
||||
" messages,\n",
|
||||
" max_tokens=45,\n",
|
||||
" strategy=\"last\",\n",
|
||||
" token_counter=tiktoken_counter,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b2a672b-c007-47c5-9105-617944dc0a6a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"`trim_messages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "96aa29b2-01e0-437c-a1ab-02fb0141cb57",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='A \"polygon\"! Because it\\'s a \"poly-gone\" silent!', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 32, 'total_tokens': 46}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_319be4768e', 'finish_reason': 'stop', 'logprobs': None}, id='run-64cc4575-14d1-4f3f-b4af-97f24758f703-0', usage_metadata={'input_tokens': 32, 'output_tokens': 14, 'total_tokens': 46})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-4o\")\n",
|
||||
"\n",
|
||||
"# Notice we don't pass in messages. This creates\n",
|
||||
"# a RunnableLambda that takes messages as input\n",
|
||||
"trimmer = trim_messages(\n",
|
||||
" max_tokens=45,\n",
|
||||
" strategy=\"last\",\n",
|
||||
" token_counter=llm,\n",
|
||||
" include_system=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = trimmer | llm\n",
|
||||
"chain.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4d91d390-e7f7-467b-ad87-d100411d7a21",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace we can see that before the messages are passed to the model they are first trimmed: https://smith.langchain.com/public/65af12c4-c24d-4824-90f0-6547566e59bb/r\n",
|
||||
"\n",
|
||||
"Looking at just the trimmer, we can see that it's a Runnable object that can be invoked like all Runnables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "1ff02d0a-353d-4fac-a77c-7c2c5262abd9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"trimmer.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dc4720c8-4062-4ebc-9385-58411202ce6e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using with ChatMessageHistory\n",
|
||||
"\n",
|
||||
"Trimming messages is especially useful when [working with chat histories](/docs/how_to/message_history/), which can get arbitrarily long:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "a9517858-fc2f-4dc3-898d-bf98a0e905a0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run c87e2f1b-81ad-4fa7-bfd9-ce6edb29a482 not found for run 7892ee8f-0669-4d6b-a2ca-ef8aae81042a. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"A polygon! Because it's a parrot gone quiet!\", response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 32, 'total_tokens': 43}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_319be4768e', 'finish_reason': 'stop', 'logprobs': None}, id='run-72dad96e-8b58-45f4-8c08-21f9f1a6b68f-0', usage_metadata={'input_tokens': 32, 'output_tokens': 11, 'total_tokens': 43})"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.chat_history import InMemoryChatMessageHistory\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"\n",
|
||||
"chat_history = InMemoryChatMessageHistory(messages=messages[:-1])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def dummy_get_session_history(session_id):\n",
|
||||
" if session_id != \"1\":\n",
|
||||
" raise InMemoryChatMessageHistory()\n",
|
||||
" return chat_history\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o\")\n",
|
||||
"\n",
|
||||
"trimmer = trim_messages(\n",
|
||||
" max_tokens=45,\n",
|
||||
" strategy=\"last\",\n",
|
||||
" token_counter=llm,\n",
|
||||
" include_system=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = trimmer | llm\n",
|
||||
"chain_with_history = RunnableWithMessageHistory(chain, dummy_get_session_history)\n",
|
||||
"chain_with_history.invoke(\n",
|
||||
" [HumanMessage(\"what do you call a speechless parrot\")],\n",
|
||||
" config={\"configurable\": {\"session_id\": \"1\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "556b7b4c-43cb-41de-94fc-1a41f4ec4d2e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace we can see that we retrieve all of our messages but before the messages are passed to the model they are trimmed to be just the system message and last human message: https://smith.langchain.com/public/17dd700b-9994-44ca-930c-116e00997315/r"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "75dc7b84-b92f-44e7-8beb-ba22398e4efb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For a complete description of all arguments head to the API reference: https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.trim_messages.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -110,7 +110,7 @@ with identify("user-123"):
|
||||
llm.invoke("Tell me a joke")
|
||||
|
||||
with identify("user-456", user_props={"email": "user456@test.com"}):
|
||||
agen.run("Who is Leo DiCaprio's girlfriend?")
|
||||
agent.run("Who is Leo DiCaprio's girlfriend?")
|
||||
```
|
||||
## Support
|
||||
|
||||
|
||||
245
docs/docs/integrations/callbacks/upstash_ratelimit.ipynb
Normal file
245
docs/docs/integrations/callbacks/upstash_ratelimit.ipynb
Normal file
@@ -0,0 +1,245 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Upstash Ratelimit Callback\n",
|
||||
"\n",
|
||||
"In this guide, we will go over how to add rate limiting based on number of requests or the number of tokens using `UpstashRatelimitHandler`. This handler uses [ratelimit library of Upstash](https://github.com/upstash/ratelimit-py/), which utilizes [Upstash Redis](https://upstash.com/docs/redis/overall/getstarted).\n",
|
||||
"\n",
|
||||
"Upstash Ratelimit works by sending an HTTP request to Upstash Redis everytime the `limit` method is called. Remaining tokens/requests of the user are checked and updated. Based on the remaining tokens, we can stop the execution of costly operations like invoking an LLM or querying a vector store:\n",
|
||||
"\n",
|
||||
"```py\n",
|
||||
"response = ratelimit.limit()\n",
|
||||
"if response.allowed:\n",
|
||||
" execute_costly_operation()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"`UpstashRatelimitHandler` allows you to incorporate the ratelimit logic into your chain in a few minutes.\n",
|
||||
"\n",
|
||||
"First, you will need to go to [the Upstash Console](https://console.upstash.com/login) and create a redis database ([see our docs](https://upstash.com/docs/redis/overall/getstarted)). After creating a database, you will need to set the environment variables:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"UPSTASH_REDIS_REST_URL=\"****\"\n",
|
||||
"UPSTASH_REDIS_REST_TOKEN=\"****\"\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Next, you will need to install Upstash Ratelimit and Redis library with:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"pip install upstash-ratelimit upstash-redis\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You are now ready to add rate limiting to your chain!\n",
|
||||
"\n",
|
||||
"## Ratelimiting Per Request\n",
|
||||
"\n",
|
||||
"Let's imagine that we want to allow our users to invoke our chain 10 times per minute. Achieving this is as simple as:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in UpstashRatelimitHandler.on_chain_start callback: UpstashRatelimitError('Request limit reached!')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Handling ratelimit. <class 'langchain_community.callbacks.upstash_ratelimit_callback.UpstashRatelimitError'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# set env variables\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"UPSTASH_REDIS_REST_URL\"] = \"****\"\n",
|
||||
"os.environ[\"UPSTASH_REDIS_REST_TOKEN\"] = \"****\"\n",
|
||||
"\n",
|
||||
"from langchain_community.callbacks import UpstashRatelimitError, UpstashRatelimitHandler\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from upstash_ratelimit import FixedWindow, Ratelimit\n",
|
||||
"from upstash_redis import Redis\n",
|
||||
"\n",
|
||||
"# create ratelimit\n",
|
||||
"ratelimit = Ratelimit(\n",
|
||||
" redis=Redis.from_env(),\n",
|
||||
" # 10 requests per window, where window size is 60 seconds:\n",
|
||||
" limiter=FixedWindow(max_requests=10, window=60),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# create handler\n",
|
||||
"user_id = \"user_id\" # should be a method which gets the user id\n",
|
||||
"handler = UpstashRatelimitHandler(identifier=user_id, request_ratelimit=ratelimit)\n",
|
||||
"\n",
|
||||
"# create mock chain\n",
|
||||
"chain = RunnableLambda(str)\n",
|
||||
"\n",
|
||||
"# invoke chain with handler:\n",
|
||||
"try:\n",
|
||||
" result = chain.invoke(\"Hello world!\", config={\"callbacks\": [handler]})\n",
|
||||
"except UpstashRatelimitError:\n",
|
||||
" print(\"Handling ratelimit.\", UpstashRatelimitError)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we pass the handler to the `invoke` method instead of passing the handler when defining the chain.\n",
|
||||
"\n",
|
||||
"For rate limiting algorithms other than `FixedWindow`, see [upstash-ratelimit docs](https://github.com/upstash/ratelimit-py?tab=readme-ov-file#ratelimiting-algorithms).\n",
|
||||
"\n",
|
||||
"Before executing any steps in our pipeline, ratelimit will check whether the user has passed the request limit. If so, `UpstashRatelimitError` is raised.\n",
|
||||
"\n",
|
||||
"## Ratelimiting Per Token\n",
|
||||
"\n",
|
||||
"Another option is to rate limit chain invokations based on:\n",
|
||||
"1. number of tokens in prompt\n",
|
||||
"2. number of tokens in prompt and LLM completion\n",
|
||||
"\n",
|
||||
"This only works if you have an LLM in your chain. Another requirement is that the LLM you are using should return the token usage in it's `LLMOutput`.\n",
|
||||
"\n",
|
||||
"### How it works\n",
|
||||
"\n",
|
||||
"The handler will get the remaining tokens before calling the LLM. If the remaining tokens is more than 0, LLM will be called. Otherwise `UpstashRatelimitError` will be raised.\n",
|
||||
"\n",
|
||||
"After LLM is called, token usage information will be used to subtracted from the remaining tokens of the user. No error is raised at this stage of the chain.\n",
|
||||
"\n",
|
||||
"### Configuration\n",
|
||||
"\n",
|
||||
"For the first configuration, simply initialize the handler like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ratelimit = Ratelimit(\n",
|
||||
" redis=Redis.from_env(),\n",
|
||||
" # 1000 tokens per window, where window size is 60 seconds:\n",
|
||||
" limiter=FixedWindow(max_requests=1000, window=60),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"handler = UpstashRatelimitHandler(identifier=user_id, token_ratelimit=ratelimit)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For the second configuration, here is how to initialize the handler:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ratelimit = Ratelimit(\n",
|
||||
" redis=Redis.from_env(),\n",
|
||||
" # 1000 tokens per window, where window size is 60 seconds:\n",
|
||||
" limiter=FixedWindow(max_requests=1000, window=60),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"handler = UpstashRatelimitHandler(\n",
|
||||
" identifier=user_id,\n",
|
||||
" token_ratelimit=ratelimit,\n",
|
||||
" include_output_tokens=True, # set to True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also employ ratelimiting based on requests and tokens at the same time, simply by passing both `request_ratelimit` and `token_ratelimit` parameters.\n",
|
||||
"\n",
|
||||
"Here is an example with a chain utilizing an LLM:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in UpstashRatelimitHandler.on_llm_start callback: UpstashRatelimitError('Token limit reached!')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Handling ratelimit. <class 'langchain_community.callbacks.upstash_ratelimit_callback.UpstashRatelimitError'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# set env variables\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"UPSTASH_REDIS_REST_URL\"] = \"****\"\n",
|
||||
"os.environ[\"UPSTASH_REDIS_REST_TOKEN\"] = \"****\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"****\"\n",
|
||||
"\n",
|
||||
"from langchain_community.callbacks import UpstashRatelimitError, UpstashRatelimitHandler\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from upstash_ratelimit import FixedWindow, Ratelimit\n",
|
||||
"from upstash_redis import Redis\n",
|
||||
"\n",
|
||||
"# create ratelimit\n",
|
||||
"ratelimit = Ratelimit(\n",
|
||||
" redis=Redis.from_env(),\n",
|
||||
" # 500 tokens per window, where window size is 60 seconds:\n",
|
||||
" limiter=FixedWindow(max_requests=500, window=60),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# create handler\n",
|
||||
"user_id = \"user_id\" # should be a method which gets the user id\n",
|
||||
"handler = UpstashRatelimitHandler(identifier=user_id, token_ratelimit=ratelimit)\n",
|
||||
"\n",
|
||||
"# create mock chain\n",
|
||||
"as_str = RunnableLambda(str)\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"chain = as_str | model\n",
|
||||
"\n",
|
||||
"# invoke chain with handler:\n",
|
||||
"try:\n",
|
||||
" result = chain.invoke(\"Hello world!\", config={\"callbacks\": [handler]})\n",
|
||||
"except UpstashRatelimitError:\n",
|
||||
" print(\"Handling ratelimit.\", UpstashRatelimitError)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "lc39",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.19"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -23,13 +23,11 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d83ba7de",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
|
||||
@@ -201,7 +201,7 @@
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts#langchain-expression-language)"
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts#langchain-expression-language-lcel)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -98,6 +98,78 @@
|
||||
")\n",
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "466c3cb41ace1410",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tool Calling\n",
|
||||
"\n",
|
||||
"DeepInfra currently supports only invoke and async invoke tool calling.\n",
|
||||
"\n",
|
||||
"For a complete list of models that support tool calling, please refer to our [tool calling documentation](https://deepinfra.com/docs/advanced/function_calling)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ddc4f4299763651c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"from dotenv import find_dotenv, load_dotenv\n",
|
||||
"from langchain_community.chat_models import ChatDeepInfra\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"model_name = \"meta-llama/Meta-Llama-3-70B-Instruct\"\n",
|
||||
"\n",
|
||||
"_ = load_dotenv(find_dotenv())\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Langchain tool\n",
|
||||
"@tool\n",
|
||||
"def foo(something):\n",
|
||||
" \"\"\"\n",
|
||||
" Called when foo\n",
|
||||
" \"\"\"\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Pydantic class\n",
|
||||
"class Bar(BaseModel):\n",
|
||||
" \"\"\"\n",
|
||||
" Called when Bar\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatDeepInfra(model=model_name)\n",
|
||||
"tools = [foo, Bar]\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\"Foo and bar, please.\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"response = llm_with_tools.invoke(messages)\n",
|
||||
"print(response.tool_calls)\n",
|
||||
"# [{'name': 'foo', 'args': {'something': None}, 'id': 'call_Mi4N4wAtW89OlbizFE1aDxDj'}, {'name': 'Bar', 'args': {}, 'id': 'call_daiE0mW454j2O1KVbmET4s2r'}]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def call_ainvoke():\n",
|
||||
" result = await llm_with_tools.ainvoke(messages)\n",
|
||||
" print(result.tool_calls)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Async call\n",
|
||||
"asyncio.run(call_ainvoke())\n",
|
||||
"# [{'name': 'foo', 'args': {'something': None}, 'id': 'call_ZH7FetmgSot4LHcMU6CEb8tI'}, {'name': 'Bar', 'args': {}, 'id': 'call_2MQhDifAJVoijZEvH8PeFSVB'}]"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -147,7 +147,7 @@
|
||||
"source": [
|
||||
"# Tool Calling\n",
|
||||
"\n",
|
||||
"Fireworks offers the [`FireFunction-v1` tool calling model](https://fireworks.ai/blog/firefunction-v1-gpt-4-level-function-calling). You can use it for structured output and function calling use cases:"
|
||||
"Fireworks offers the `FireFunction-v2` tool calling model. You can use it for structured output and function calling use cases:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -180,7 +180,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"chat = ChatFireworks(\n",
|
||||
" model=\"accounts/fireworks/models/firefunction-v1\",\n",
|
||||
" model=\"accounts/fireworks/models/firefunction-v2\",\n",
|
||||
").bind_tools([ExtractFields])\n",
|
||||
"\n",
|
||||
"result = chat.invoke(\"I am a 27 year old named Erick\")\n",
|
||||
|
||||
@@ -2,33 +2,50 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google Cloud Vertex AI\n",
|
||||
"keywords: [gemini, vertex, ChatVertexAI, gemini-pro]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatVertexAI\n",
|
||||
"\n",
|
||||
"Note: This is separate from the Google PaLM integration. Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there. \n",
|
||||
"This page provides a quick overview for getting started with VertexAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatVertexAI features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html).\n",
|
||||
"\n",
|
||||
"ChatVertexAI exposes all foundational models available in Google Cloud:\n",
|
||||
"ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n",
|
||||
"\n",
|
||||
"- Gemini (`gemini-pro` and `gemini-pro-vision`)\n",
|
||||
"- PaLM 2 for Text (`text-bison`)\n",
|
||||
"- Codey for Code Generation (`codechat-bison`)\n",
|
||||
":::info Google Cloud VertexAI vs Google PaLM\n",
|
||||
"\n",
|
||||
"For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n",
|
||||
"The Google Cloud VertexAI integration is separate from the [Google PaLM integration](/docs/integrations/chat/google_generative_ai/). Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there. \n",
|
||||
"\n",
|
||||
"By default, Google Cloud [does not use](https://cloud.google.com/vertex-ai/docs/generative-ai/data-governance#foundation_model_development) customer data to train its foundation models as part of Google Cloud`s AI/ML Privacy Commitment. More details about how Google processes data can also be found in [Google's Customer Data Processing Addendum (CDPA)](https://cloud.google.com/terms/data-processing-addendum).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"To use `Google Cloud Vertex AI` PaLM you must have the `langchain-google-vertexai` Python package installed and either:\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/google_vertex_ai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatVertexAI](https://api.python.langchain.com/en/latest/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html) | [langchain-google-vertexai](https://api.python.langchain.com/en/latest/google_vertexai_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access VertexAI models you'll need to create a Google Cloud Platform account, set up credentials, and install the `langchain-google-vertexai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"To use the integration you must:\n",
|
||||
"- Have credentials configured for your environment (gcloud, workload identity, etc...)\n",
|
||||
"- Store the path to a service account JSON file as the GOOGLE_APPLICATION_CREDENTIALS environment variable\n",
|
||||
"\n",
|
||||
@@ -37,432 +54,156 @@
|
||||
"For more information, see: \n",
|
||||
"- https://cloud.google.com/docs/authentication/application-default-credentials#GAC\n",
|
||||
"- https://googleapis.dev/python/google-auth/latest/reference/google.auth.html#module-google.auth\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-google-vertexai"
|
||||
"\n",
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_google_vertexai import ChatVertexAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\" J'aime la programmation.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = \"You are a helpful assistant who translate English to French\"\n",
|
||||
"human = \"Translate this sentence from English to French. I love programming.\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chat = ChatVertexAI()\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke({})"
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Gemini doesn't support SystemMessage at the moment, but it can be added to the first human message in the row. If you want such behavior, just set the `convert_system_message_to_human` to `True`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'aime la programmation.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = \"You are a helpful assistant who translate English to French\"\n",
|
||||
"human = \"Translate this sentence from English to French. I love programming.\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"chat = ChatVertexAI(model=\"gemini-pro\", convert_system_message_to_human=True)\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want to construct a simple chain that takes user specified parameters:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' プログラミングが大好きです')"
|
||||
]
|
||||
},
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = (\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
|
||||
")\n",
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chat = ChatVertexAI()\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"Japanese\",\n",
|
||||
" \"text\": \"I love programming\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Code generation chat models\n",
|
||||
"You can now leverage the Codey API for code chat within Vertex AI. The model available is:\n",
|
||||
"- `codechat-bison`: for code assistance"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" ```python\n",
|
||||
"def is_prime(n):\n",
|
||||
" \"\"\"\n",
|
||||
" Check if a number is prime.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" n: The number to check.\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" True if n is prime, False otherwise.\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" # If n is 1, it is not prime.\n",
|
||||
" if n == 1:\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
" # Iterate over all numbers from 2 to the square root of n.\n",
|
||||
" for i in range(2, int(n ** 0.5) + 1):\n",
|
||||
" # If n is divisible by any number from 2 to its square root, it is not prime.\n",
|
||||
" if n % i == 0:\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
" # If n is divisible by no number from 2 to its square root, it is prime.\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def find_prime_numbers(n):\n",
|
||||
" \"\"\"\n",
|
||||
" Find all prime numbers up to a given number.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" n: The upper bound for the prime numbers to find.\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" A list of all prime numbers up to n.\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" # Create a list of all numbers from 2 to n.\n",
|
||||
" numbers = list(range(2, n + 1))\n",
|
||||
"\n",
|
||||
" # Iterate over the list of numbers and remove any that are not prime.\n",
|
||||
" for number in numbers:\n",
|
||||
" if not is_prime(number):\n",
|
||||
" numbers.remove(number)\n",
|
||||
"\n",
|
||||
" # Return the list of prime numbers.\n",
|
||||
" return numbers\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatVertexAI(model=\"codechat-bison\", max_tokens=1000, temperature=0.5)\n",
|
||||
"\n",
|
||||
"message = chat.invoke(\"Write a Python function generating all prime numbers\")\n",
|
||||
"print(message.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Full generation info\n",
|
||||
"\n",
|
||||
"We can use the `generate` method to get back extra metadata like [safety attributes](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/responsible-ai#safety_attribute_confidence_scoring) and not just chat completions\n",
|
||||
"\n",
|
||||
"Note that the `generation_info` will be different depending if you're using a gemini model or not.\n",
|
||||
"\n",
|
||||
"### Gemini model\n",
|
||||
"\n",
|
||||
"`generation_info` will include:\n",
|
||||
"\n",
|
||||
"- `is_blocked`: whether generation was blocked or not\n",
|
||||
"- `safety_ratings`: safety ratings' categories and probability labels"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pprint import pprint\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_google_vertexai import HarmBlockThreshold, HarmCategory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'citation_metadata': None,\n",
|
||||
" 'is_blocked': False,\n",
|
||||
" 'safety_ratings': [{'blocked': False,\n",
|
||||
" 'category': 'HARM_CATEGORY_HATE_SPEECH',\n",
|
||||
" 'probability_label': 'NEGLIGIBLE'},\n",
|
||||
" {'blocked': False,\n",
|
||||
" 'category': 'HARM_CATEGORY_DANGEROUS_CONTENT',\n",
|
||||
" 'probability_label': 'NEGLIGIBLE'},\n",
|
||||
" {'blocked': False,\n",
|
||||
" 'category': 'HARM_CATEGORY_HARASSMENT',\n",
|
||||
" 'probability_label': 'NEGLIGIBLE'},\n",
|
||||
" {'blocked': False,\n",
|
||||
" 'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n",
|
||||
" 'probability_label': 'NEGLIGIBLE'}],\n",
|
||||
" 'usage_metadata': {'candidates_token_count': 6,\n",
|
||||
" 'prompt_token_count': 12,\n",
|
||||
" 'total_token_count': 18}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"human = \"Translate this sentence from English to French. I love programming.\"\n",
|
||||
"messages = [HumanMessage(content=human)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chat = ChatVertexAI(\n",
|
||||
" model_name=\"gemini-pro\",\n",
|
||||
" safety_settings={\n",
|
||||
" HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"result = chat.generate([messages])\n",
|
||||
"pprint(result.generations[0][0].generation_info)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Non-gemini model\n",
|
||||
"\n",
|
||||
"`generation_info` will include:\n",
|
||||
"\n",
|
||||
"- `is_blocked`: whether generation was blocked or not\n",
|
||||
"- `safety_attributes`: a dictionary mapping safety attributes to their scores"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'errors': (),\n",
|
||||
" 'grounding_metadata': {'citations': [], 'search_queries': []},\n",
|
||||
" 'is_blocked': False,\n",
|
||||
" 'safety_attributes': [{'Derogatory': 0.1, 'Insult': 0.1, 'Sexual': 0.2}],\n",
|
||||
" 'usage_metadata': {'candidates_billable_characters': 88.0,\n",
|
||||
" 'candidates_token_count': 24.0,\n",
|
||||
" 'prompt_billable_characters': 58.0,\n",
|
||||
" 'prompt_token_count': 12.0}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatVertexAI() # default is `chat-bison`\n",
|
||||
"\n",
|
||||
"result = chat.generate([messages])\n",
|
||||
"pprint(result.generations[0][0].generation_info)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling (a.k.a. function calling) with Gemini\n",
|
||||
"\n",
|
||||
"We can pass tool definitions to Gemini models to get the model to invoke those tools when appropriate. This is useful not only for LLM-powered tool use but also for getting structured outputs out of models more generally.\n",
|
||||
"\n",
|
||||
"With `ChatVertexAI.bind_tools()`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to a Gemini tool schema, which looks like:\n",
|
||||
"```python\n",
|
||||
"{\n",
|
||||
" \"name\": \"...\", # tool name\n",
|
||||
" \"description\": \"...\", # tool description\n",
|
||||
" \"parameters\": {...} # tool input schema as JSONSchema\n",
|
||||
"}\n",
|
||||
"```"
|
||||
"The LangChain VertexAI integration lives in the `langchain-google-vertexai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'GetWeather', 'arguments': '{\"location\": \"San Francisco, CA\"}'}}, response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'citation_metadata': None, 'usage_metadata': {'prompt_token_count': 41, 'candidates_token_count': 7, 'total_token_count': 48}}, id='run-05e760dc-0682-4286-88e1-5b23df69b083-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco, CA'}, 'id': 'cd2499c4-4513-4059-bfff-5321b6e922d0'}])"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetWeather(BaseModel):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-pro\", temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools([GetWeather])\n",
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"what is the weather like in San Francisco\",\n",
|
||||
")\n",
|
||||
"ai_msg"
|
||||
"%pip install -qU langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The tool calls can be access via the `AIMessage.tool_calls` attribute, where they are extracted in a model-agnostic format:"
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(\n",
|
||||
" model=\"gemini-1.5-flash-001\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" max_retries=6,\n",
|
||||
" stop=None,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'GetWeather',\n",
|
||||
" 'args': {'location': 'San Francisco, CA'},\n",
|
||||
" 'id': 'cd2499c4-4513-4059-bfff-5321b6e922d0'}]"
|
||||
"AIMessage(content=\"J'adore programmer. \\n\", response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 20, 'candidates_token_count': 7, 'total_token_count': 27}}, id='run-7032733c-d05c-4f0c-a17a-6c575fdd1ae0-0', usage_metadata={'input_tokens': 20, 'output_tokens': 7, 'total_tokens': 27})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg.tool_calls"
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer. \n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For a complete guide on tool calling [head here](/docs/how_to/function_calling)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Structured outputs\n",
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"Many applications require structured model outputs. Tool calling makes it much easier to do this reliably. The [with_structured_outputs](https://api.python.langchain.com/en/latest/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html) constructor provides a simple interface built on top of tool calling for getting structured outputs out of a model. For a complete guide on structured outputs [head here](/docs/how_to/structured_output).\n",
|
||||
"\n",
|
||||
"### ChatVertexAI.with_structured_outputs()\n",
|
||||
"\n",
|
||||
"To get structured outputs from our Gemini model all we need to do is to specify a desired schema, either as a Pydantic class or as a JSON schema, "
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Person(name='Stefan', age=13)"
|
||||
"AIMessage(content='Ich liebe Programmieren. \\n', response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 15, 'candidates_token_count': 8, 'total_token_count': 23}}, id='run-c71955fd-8dc1-422b-88a7-853accf4811b-0', usage_metadata={'input_tokens': 15, 'output_tokens': 8, 'total_tokens': 23})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -471,139 +212,36 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"class Person(BaseModel):\n",
|
||||
" \"\"\"Save information about a person.\"\"\"\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
" name: str = Field(..., description=\"The person's name.\")\n",
|
||||
" age: int = Field(..., description=\"The person's age.\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"structured_llm = llm.with_structured_output(Person)\n",
|
||||
"structured_llm.invoke(\"Stefan is already 13 years old\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### [Legacy] Using `create_structured_runnable()`\n",
|
||||
"\n",
|
||||
"The legacy wasy to get structured outputs is using the `create_structured_runnable` constructor:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import create_structured_runnable\n",
|
||||
"\n",
|
||||
"chain = create_structured_runnable(Person, llm)\n",
|
||||
"chain.invoke(\"My name is Erick and I'm 27 years old\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Asynchronous calls\n",
|
||||
"\n",
|
||||
"We can make asynchronous calls via the Runnables [Async Interface](/docs/concepts#interface)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# for running these examples in the notebook:\n",
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"import nest_asyncio\n",
|
||||
"\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' अहं प्रोग्रामनं प्रेमामि')"
|
||||
]
|
||||
},
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = (\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chat = ChatVertexAI(model=\"chat-bison\", max_tokens=1000, temperature=0.5)\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"asyncio.run(\n",
|
||||
" chain.ainvoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"Sanskrit\",\n",
|
||||
" \"text\": \"I love programming\",\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Streaming calls\n",
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"We can also stream outputs via the `stream` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" The five most populous countries in the world are:\n",
|
||||
"1. China (1.4 billion)\n",
|
||||
"2. India (1.3 billion)\n",
|
||||
"3. United States (331 million)\n",
|
||||
"4. Indonesia (273 million)\n",
|
||||
"5. Pakistan (220 million)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"human\", \"List out the 5 most populous countries in the world\")]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chat = ChatVertexAI()\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"for chunk in chain.stream({}):\n",
|
||||
" sys.stdout.write(chunk.content)\n",
|
||||
" sys.stdout.flush()"
|
||||
"For detailed documentation of all ChatVertexAI features and configurations, like how to send multimodal inputs and configure safety settings, head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -627,5 +265,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -2,10 +2,15 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Groq\n",
|
||||
"keywords: [chatgroq]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
@@ -15,45 +20,67 @@
|
||||
"source": [
|
||||
"# Groq\n",
|
||||
"\n",
|
||||
"Install the langchain-groq package if not already installed:\n",
|
||||
"LangChain supports integration with [Groq](https://groq.com/) chat models. Groq specializes in fast AI inference.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install langchain-groq\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Request an [API key](https://wow.groq.com) and set it as an environment variable:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"export GROQ_API_KEY=<YOUR API KEY>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Alternatively, you may configure the API key when you initialize ChatGroq."
|
||||
"To get started, you'll first need to install the langchain-groq package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-groq"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Import the ChatGroq class and initialize it with a model:"
|
||||
"Request an [API key](https://wow.groq.com) and set it as an environment variable:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"export GROQ_API_KEY=<YOUR API KEY>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Alternatively, you may configure the API key when you initialize ChatGroq.\n",
|
||||
"\n",
|
||||
"Here's an example of it in action:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Low latency is crucial for Large Language Models (LLMs) because it directly impacts the user experience, model performance, and overall efficiency. Here are some reasons why low latency is essential for LLMs:\\n\\n1. **Real-time Interaction**: LLMs are often used in applications that require real-time interaction, such as chatbots, virtual assistants, and language translation. Low latency ensures that the model responds quickly to user input, providing a seamless and engaging experience.\\n2. **Conversational Flow**: In conversational AI, latency can disrupt the natural flow of conversation. Low latency helps maintain a smooth conversation, allowing users to respond quickly and naturally, without feeling like they're waiting for the model to catch up.\\n3. **Model Performance**: High latency can lead to increased error rates, as the model may struggle to keep up with the input pace. Low latency enables the model to process information more efficiently, resulting in better accuracy and performance.\\n4. **Scalability**: As the number of users and requests increases, low latency becomes even more critical. It allows the model to handle a higher volume of requests without sacrificing performance, making it more scalable and efficient.\\n5. **Resource Utilization**: Low latency can reduce the computational resources required to process requests. By minimizing latency, you can optimize resource allocation, reduce costs, and improve overall system efficiency.\\n6. **User Experience**: High latency can lead to frustration, abandonment, and a poor user experience. Low latency ensures that users receive timely responses, which is essential for building trust and satisfaction.\\n7. **Competitive Advantage**: In applications like customer service or language translation, low latency can be a key differentiator. It can provide a competitive advantage by offering a faster and more responsive experience, setting your application apart from others.\\n8. **Edge Computing**: With the increasing adoption of edge computing, low latency is critical for processing data closer to the user. This reduces latency even further, enabling real-time processing and analysis of data.\\n9. **Real-time Analytics**: Low latency enables real-time analytics and insights, which are essential for applications like sentiment analysis, trend detection, and anomaly detection.\\n10. **Future-Proofing**: As LLMs continue to evolve and become more complex, low latency will become even more critical. By prioritizing low latency now, you'll be better prepared to handle the demands of future LLM applications.\\n\\nIn summary, low latency is vital for LLMs because it ensures a seamless user experience, improves model performance, and enables efficient resource utilization. By prioritizing low latency, you can build more effective, scalable, and efficient LLM applications that meet the demands of real-time interaction and processing.\", response_metadata={'token_usage': {'completion_tokens': 541, 'prompt_tokens': 33, 'total_tokens': 574, 'completion_time': 1.499777658, 'prompt_time': 0.008344704, 'queue_time': None, 'total_time': 1.508122362}, 'model_name': 'llama3-70b-8192', 'system_fingerprint': 'fp_87cbfbbc4d', 'finish_reason': 'stop', 'logprobs': None}, id='run-49dad960-ace8-4cd7-90b3-2db99ecbfa44-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_groq import ChatGroq"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatGroq(temperature=0, model_name=\"mixtral-8x7b-32768\")"
|
||||
"from langchain_groq import ChatGroq\n",
|
||||
"\n",
|
||||
"chat = ChatGroq(\n",
|
||||
" temperature=0,\n",
|
||||
" model=\"llama3-70b-8192\",\n",
|
||||
" # api_key=\"\" # Optional if not set as an environment variable\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"system = \"You are a helpful assistant.\"\n",
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke({\"text\": \"Explain the importance of low latency for LLMs.\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -62,97 +89,206 @@
|
||||
"source": [
|
||||
"You can view the available models [here](https://console.groq.com/docs/models).\n",
|
||||
"\n",
|
||||
"If you do not want to set your API key in the environment, you can pass it directly to the client:\n",
|
||||
"```python\n",
|
||||
"chat = ChatGroq(temperature=0, groq_api_key=\"YOUR_API_KEY\", model_name=\"mixtral-8x7b-32768\")\n",
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"```"
|
||||
"Groq chat models support [tool calling](/docs/how_to/tool_calling/) to generate output matching a specific schema. The model may choose to call multiple tools or the same tool multiple times if appropriate.\n",
|
||||
"\n",
|
||||
"Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'get_current_weather',\n",
|
||||
" 'args': {'location': 'San Francisco', 'unit': 'Celsius'},\n",
|
||||
" 'id': 'call_pydj'},\n",
|
||||
" {'name': 'get_current_weather',\n",
|
||||
" 'args': {'location': 'Tokyo', 'unit': 'Celsius'},\n",
|
||||
" 'id': 'call_jgq3'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def get_current_weather(location: str, unit: Optional[str]):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
" return \"Cloudy with a chance of rain.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool_model = chat.bind_tools([get_current_weather], tool_choice=\"auto\")\n",
|
||||
"\n",
|
||||
"res = tool_model.invoke(\"What is the weather like in San Francisco and Tokyo?\")\n",
|
||||
"\n",
|
||||
"res.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Write a prompt and invoke ChatGroq to create completions:"
|
||||
"### `.with_structured_output()`\n",
|
||||
"\n",
|
||||
"You can also use the convenience [`.with_structured_output()`](/docs/how_to/structured_output/#the-with_structured_output-method) method to coerce `ChatGroq` into returning a structured output.\n",
|
||||
"Here is an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Low Latency Large Language Models (LLMs) are a type of artificial intelligence model that can understand and generate human-like text. The term \"low latency\" refers to the model\\'s ability to process and respond to inputs quickly, with minimal delay.\\n\\nThe importance of low latency in LLMs can be explained through the following points:\\n\\n1. Improved user experience: In real-time applications such as chatbots, virtual assistants, and interactive games, users expect quick and responsive interactions. Low latency LLMs can provide instant feedback and responses, creating a more seamless and engaging user experience.\\n\\n2. Better decision-making: In time-sensitive scenarios, such as financial trading or autonomous vehicles, low latency LLMs can quickly process and analyze vast amounts of data, enabling faster and more informed decision-making.\\n\\n3. Enhanced accessibility: For individuals with disabilities, low latency LLMs can help create more responsive and inclusive interfaces, such as voice-controlled assistants or real-time captioning systems.\\n\\n4. Competitive advantage: In industries where real-time data analysis and decision-making are crucial, low latency LLMs can provide a competitive edge by enabling businesses to react more quickly to market changes, customer needs, or emerging opportunities.\\n\\n5. Scalability: Low latency LLMs can efficiently handle a higher volume of requests and interactions, making them more suitable for large-scale applications and services.\\n\\nIn summary, low latency is an essential aspect of LLMs, as it significantly impacts user experience, decision-making, accessibility, competitiveness, and scalability. By minimizing delays and response times, low latency LLMs can unlock new possibilities and applications for artificial intelligence in various industries and scenarios.')"
|
||||
"Joke(setup='Why did the cat join a band?', punchline='Because it wanted to be the purr-cussionist!', rating=None)"
|
||||
]
|
||||
},
|
||||
"execution_count": 29,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = \"You are a helpful assistant.\"\n",
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke({\"text\": \"Explain the importance of low latency LLMs.\"})"
|
||||
"\n",
|
||||
"class Joke(BaseModel):\n",
|
||||
" \"\"\"Joke to tell user.\"\"\"\n",
|
||||
"\n",
|
||||
" setup: str = Field(description=\"The setup of the joke\")\n",
|
||||
" punchline: str = Field(description=\"The punchline to the joke\")\n",
|
||||
" rating: Optional[int] = Field(description=\"How funny the joke is, from 1 to 10\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"structured_llm = chat.with_structured_output(Joke)\n",
|
||||
"\n",
|
||||
"structured_llm.invoke(\"Tell me a joke about cats\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `ChatGroq` also supports async and streaming functionality:"
|
||||
"Behind the scenes, this takes advantage of the above tool calling functionality.\n",
|
||||
"\n",
|
||||
"## Async"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"There's a star that shines up in the sky,\\nThe Sun, that makes the day bright and spry.\\nIt rises and sets,\\nIn a daily, predictable bet,\\nGiving life to the world, oh my!\")"
|
||||
"AIMessage(content='Here is a limerick about the sun:\\n\\nThere once was a sun in the sky,\\nWhose warmth and light caught the eye,\\nIt shone bright and bold,\\nWith a fiery gold,\\nAnd brought life to all, as it flew by.', response_metadata={'token_usage': {'completion_tokens': 51, 'prompt_tokens': 18, 'total_tokens': 69, 'completion_time': 0.144614022, 'prompt_time': 0.00585394, 'queue_time': None, 'total_time': 0.150467962}, 'model_name': 'llama3-70b-8192', 'system_fingerprint': 'fp_2f30b0b571', 'finish_reason': 'stop', 'logprobs': None}, id='run-e42340ba-f0ad-4b54-af61-8308d8ec8256-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatGroq(temperature=0, model_name=\"mixtral-8x7b-32768\")\n",
|
||||
"chat = ChatGroq(temperature=0, model=\"llama3-70b-8192\")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"human\", \"Write a Limerick about {topic}\")])\n",
|
||||
"chain = prompt | chat\n",
|
||||
"await chain.ainvoke({\"topic\": \"The Sun\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The moon's gentle glow\n",
|
||||
"Illuminates the night sky\n",
|
||||
"Peaceful and serene"
|
||||
"Silvery glow bright\n",
|
||||
"Luna's gentle light shines down\n",
|
||||
"Midnight's gentle queen"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatGroq(temperature=0, model_name=\"llama2-70b-4096\")\n",
|
||||
"chat = ChatGroq(temperature=0, model=\"llama3-70b-8192\")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"human\", \"Write a haiku about {topic}\")])\n",
|
||||
"chain = prompt | chat\n",
|
||||
"for chunk in chain.stream({\"topic\": \"The Moon\"}):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Passing custom parameters\n",
|
||||
"\n",
|
||||
"You can pass other Groq-specific parameters using the `model_kwargs` argument on initialization. Here's an example of enabling JSON mode:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='{ \"response\": \"That\\'s a tough question! There are eight species of bears found in the world, and each one is unique and amazing in its own way. However, if I had to pick one, I\\'d say the giant panda is a popular favorite among many people. Who can resist those adorable black and white markings?\", \"followup_question\": \"Would you like to know more about the giant panda\\'s habitat and diet?\" }', response_metadata={'token_usage': {'completion_tokens': 89, 'prompt_tokens': 50, 'total_tokens': 139, 'completion_time': 0.249032839, 'prompt_time': 0.011134497, 'queue_time': None, 'total_time': 0.260167336}, 'model_name': 'llama3-70b-8192', 'system_fingerprint': 'fp_2f30b0b571', 'finish_reason': 'stop', 'logprobs': None}, id='run-558ce67e-8c63-43fe-a48f-6ecf181bc922-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatGroq(\n",
|
||||
" model=\"llama3-70b-8192\", model_kwargs={\"response_format\": {\"type\": \"json_object\"}}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"system = \"\"\"\n",
|
||||
"You are a helpful assistant.\n",
|
||||
"Always respond with a JSON object with two string keys: \"response\" and \"followup_question\".\n",
|
||||
"\"\"\"\n",
|
||||
"human = \"{question}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"chain.invoke({\"question\": \"what bear is best?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -171,7 +307,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -315,7 +315,11 @@
|
||||
"source": [
|
||||
"## 4. Take it for a spin as an agent!\n",
|
||||
"\n",
|
||||
"Here we'll test out `Zephyr-7B-beta` as a zero-shot `ReAct` Agent. The example below is taken from [here](https://python.langchain.com/v0.1/docs/modules/agents/agent_types/react/#using-chat-models).\n",
|
||||
"Here we'll test out `Zephyr-7B-beta` as a zero-shot `ReAct` Agent. \n",
|
||||
"\n",
|
||||
"The agent is based on the paper [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/abs/2210.03629)\n",
|
||||
"\n",
|
||||
"The example below is taken from [here](https://python.langchain.com/v0.1/docs/modules/agents/agent_types/react/#using-chat-models).\n",
|
||||
"\n",
|
||||
"> Note: To run this section, you'll need to have a [SerpAPI Token](https://serpapi.com/) saved as an environment variable: `SERPAPI_API_KEY`"
|
||||
]
|
||||
|
||||
418
docs/docs/integrations/chat/llamacpp.ipynb
Normal file
418
docs/docs/integrations/chat/llamacpp.ipynb
Normal file
@@ -0,0 +1,418 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatLlamaCpp\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with chat model intergrated with [llama cpp python](https://github.com/abetlen/llama-cpp-python)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"| Class | Package | Local | Serializable | JS support |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [ChatLlamaCpp](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.llamacpp.ChatLlamaCpp.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ✅ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To get started and use **all** the features show below, we reccomend using a model that has been fine-tuned for tool-calling.\n",
|
||||
"\n",
|
||||
"We will use [\n",
|
||||
"Hermes-2-Pro-Llama-3-8B-GGUF](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF) from NousResearch. \n",
|
||||
"\n",
|
||||
"> Hermes 2 Pro is an upgraded version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house. This new version of Hermes maintains its excellent general task and conversation capabilities - but also excels at Function Calling\n",
|
||||
"\n",
|
||||
"See our guides on local models to go deeper:\n",
|
||||
"\n",
|
||||
"* [Run LLMs locally](https://python.langchain.com/v0.1/docs/guides/development/local_llms/)\n",
|
||||
"* [Using local models with RAG](https://python.langchain.com/v0.1/docs/use_cases/question_answering/local_retrieval_qa/)\n",
|
||||
"\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain OpenAI integration lives in the `langchain-community` and `llama-cpp-python` packages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community llama-cpp-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Path to your model weights\n",
|
||||
"local_model = \"local/path/to/Hermes-2-Pro-Llama-3-8B-Q8_0.gguf\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import multiprocessing\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_models import ChatLlamaCpp\n",
|
||||
"\n",
|
||||
"llm = ChatLlamaCpp(\n",
|
||||
" temperature=0.5,\n",
|
||||
" model_path=local_model,\n",
|
||||
" n_ctx=10000,\n",
|
||||
" n_gpu_layers=8,\n",
|
||||
" n_batch=300, # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.\n",
|
||||
" max_tokens=512,\n",
|
||||
" n_threads=multiprocessing.cpu_count() - 1,\n",
|
||||
" repeat_penalty=1.5,\n",
|
||||
" top_p=0.5,\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'aime programmer. (In France, \"programming\" is often used in its original sense of scheduling or organizing events.) \n",
|
||||
"\n",
|
||||
"If you meant computer-programming: \n",
|
||||
"Je suis amoureux de la programmation informatique.\n",
|
||||
"\n",
|
||||
"(You might also say simply 'programmation', which would be understood as both meanings - depending on context).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"Firstly, it works mostly the same as OpenAI Function Calling\n",
|
||||
"\n",
|
||||
"OpenAI has a [tool calling](https://platform.openai.com/docs/guides/function-calling) (we use \"tool calling\" and \"function calling\" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally.\n",
|
||||
"\n",
|
||||
"With `ChatLlamaCpp.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to an OpenAI tool schemas, which looks like:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"name\": \"...\",\n",
|
||||
" \"description\": \"...\",\n",
|
||||
" \"parameters\": {...} # JSONSchema\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"and passed in every model invocation.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"However, it cannot automatically trigger a function/tool, we need to force it by specifying the 'tool choice' parameter. This parameter is typically formatted as described below.\n",
|
||||
"\n",
|
||||
"```{\"type\": \"function\", \"function\": {\"name\": <<tool_name>>}}.```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import tool\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class WeatherInput(BaseModel):\n",
|
||||
" location: str = Field(description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
" unit: str = Field(enum=[\"celsius\", \"fahrenheit\"])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(\"get_current_weather\", args_schema=WeatherInput)\n",
|
||||
"def get_weather(location: str, unit: str):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
" return f\"Now the weather in {location} is 22 {unit}\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools(\n",
|
||||
" tools=[get_weather],\n",
|
||||
" tool_choice={\"type\": \"function\", \"function\": {\"name\": \"get_current_weather\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"what is the weather like in HCMC in celsius\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'get_current_weather',\n",
|
||||
" 'args': {'location': 'Ho Chi Minh City', 'unit': 'celsius'},\n",
|
||||
" 'id': 'call__0_get_current_weather_cmpl-394d9943-0a1f-425b-8139-d2826c1431f2'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class MagicFunctionInput(BaseModel):\n",
|
||||
" magic_function_input: int = Field(description=\"The input value for magic function\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(\"get_magic_function\", args_schema=MagicFunctionInput)\n",
|
||||
"def magic_function(magic_function_input: int):\n",
|
||||
" \"\"\"Get the value of magic function for an input.\"\"\"\n",
|
||||
" return magic_function_input + 2\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools(\n",
|
||||
" tools=[magic_function],\n",
|
||||
" tool_choice={\"type\": \"function\", \"function\": {\"name\": \"get_magic_function\"}},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"What is magic function of 3?\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'get_magic_function',\n",
|
||||
" 'args': {'magic_function_input': 3},\n",
|
||||
" 'id': 'call__0_get_magic_function_cmpl-cd83a994-b820-4428-957c-48076c68335a'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Structured output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_core.utils.function_calling import convert_to_openai_tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Joke(BaseModel):\n",
|
||||
" \"\"\"A setup to a joke and the punchline.\"\"\"\n",
|
||||
"\n",
|
||||
" setup: str\n",
|
||||
" punchline: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"dict_schema = convert_to_openai_tool(Joke)\n",
|
||||
"structured_llm = llm.with_structured_output(dict_schema)\n",
|
||||
"result = structured_llm.invoke(\"Tell me a joke about birds\")\n",
|
||||
"result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'setup': '- Why did the chicken cross the playground?',\n",
|
||||
" 'punchline': '\\n\\n- To get to its gilded cage on the other side!'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Streaming\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for chunk in llm.stream(\"what is 25x5\"):\n",
|
||||
" print(chunk.content, end=\"\\n\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatLlamaCpp features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.llamacpp.ChatLlamaCpp.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -225,7 +225,7 @@
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts#langchain-expression-language)"
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts#langchain-expression-language-lcel)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -134,7 +134,7 @@
|
||||
"from langchain_nvidia_ai_endpoints import ChatNVIDIA\n",
|
||||
"\n",
|
||||
"# connect to an embedding NIM running at localhost:8000, specifying a specific model\n",
|
||||
"llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta-llama3-8b-instruct\")"
|
||||
"llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta/llama3-8b-instruct\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -658,7 +658,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
"version": "3.10.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -238,6 +238,67 @@
|
||||
"> Ideally, you do not need to connect Repository IDs here to get Retrieval Augmented Generations. You can still get the same result if you have connected the repositories in prem platform. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prem Templates\n",
|
||||
"\n",
|
||||
"Writing Prompt Templates can be super messy. Prompt templates are long, hard to manage, and must be continuously tweaked to improve and keep the same throughout the application. \n",
|
||||
"\n",
|
||||
"With **Prem**, writing and managing prompts can be super easy. The **_Templates_** tab inside the [launchpad](https://docs.premai.io/get-started/launchpad) helps you write as many prompts you need and use it inside the SDK to make your application running using those prompts. You can read more about Prompt Templates [here](https://docs.premai.io/get-started/prem-templates). \n",
|
||||
"\n",
|
||||
"To use Prem Templates natively with LangChain, you need to pass an id the `HumanMessage`. This id should be the name the variable of your prompt template. the `content` in `HumanMessage` should be the value of that variable. \n",
|
||||
"\n",
|
||||
"let's say for example, if your prompt template was this:\n",
|
||||
"\n",
|
||||
"```text\n",
|
||||
"Say hello to my name and say a feel-good quote\n",
|
||||
"from my age. My name is: {name} and age is {age}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"So now your human_messages should look like:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"human_messages = [\n",
|
||||
" HumanMessage(content=\"Shawn\", id=\"name\"),\n",
|
||||
" HumanMessage(content=\"22\", id=\"age\"),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"Pass this `human_messages` to ChatPremAI Client. Please note: Do not forget to\n",
|
||||
"pass the additional `template_id` to invoke generation with Prem Templates. If you are not aware of `template_id` you can learn more about that [in our docs](https://docs.premai.io/get-started/prem-templates). Here is an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template_id = \"78069ce8-xxxxx-xxxxx-xxxx-xxx\"\n",
|
||||
"response = chat.invoke([human_message], template_id=template_id)\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Prem Template feature is available in streaming too. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
180
docs/docs/integrations/chat/snowflake.ipynb
Normal file
180
docs/docs/integrations/chat/snowflake.ipynb
Normal file
@@ -0,0 +1,180 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Snowflake Cortex\n",
|
||||
"\n",
|
||||
"[Snowflake Cortex](https://docs.snowflake.com/en/user-guide/snowflake-cortex/llm-functions) gives you instant access to industry-leading large language models (LLMs) trained by researchers at companies like Mistral, Reka, Meta, and Google, including [Snowflake Arctic](https://www.snowflake.com/en/data-cloud/arctic/), an open enterprise-grade model developed by Snowflake.\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with Snowflake Cortex."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation and setup\n",
|
||||
"\n",
|
||||
"We start by installing the `snowflake-snowpark-python` library, using the command below. Then we configure the credentials for connecting to Snowflake, as environment variables or pass them directly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet snowflake-snowpark-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# First step is to set up the environment variables, to connect to Snowflake,\n",
|
||||
"# you can also pass these snowflake credentials while instantiating the model\n",
|
||||
"\n",
|
||||
"if os.environ.get(\"SNOWFLAKE_ACCOUNT\") is None:\n",
|
||||
" os.environ[\"SNOWFLAKE_ACCOUNT\"] = getpass.getpass(\"Account: \")\n",
|
||||
"\n",
|
||||
"if os.environ.get(\"SNOWFLAKE_USERNAME\") is None:\n",
|
||||
" os.environ[\"SNOWFLAKE_USERNAME\"] = getpass.getpass(\"Username: \")\n",
|
||||
"\n",
|
||||
"if os.environ.get(\"SNOWFLAKE_PASSWORD\") is None:\n",
|
||||
" os.environ[\"SNOWFLAKE_PASSWORD\"] = getpass.getpass(\"Password: \")\n",
|
||||
"\n",
|
||||
"if os.environ.get(\"SNOWFLAKE_DATABASE\") is None:\n",
|
||||
" os.environ[\"SNOWFLAKE_DATABASE\"] = getpass.getpass(\"Database: \")\n",
|
||||
"\n",
|
||||
"if os.environ.get(\"SNOWFLAKE_SCHEMA\") is None:\n",
|
||||
" os.environ[\"SNOWFLAKE_SCHEMA\"] = getpass.getpass(\"Schema: \")\n",
|
||||
"\n",
|
||||
"if os.environ.get(\"SNOWFLAKE_WAREHOUSE\") is None:\n",
|
||||
" os.environ[\"SNOWFLAKE_WAREHOUSE\"] = getpass.getpass(\"Warehouse: \")\n",
|
||||
"\n",
|
||||
"if os.environ.get(\"SNOWFLAKE_ROLE\") is None:\n",
|
||||
" os.environ[\"SNOWFLAKE_ROLE\"] = getpass.getpass(\"Role: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatSnowflakeCortex\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"# By default, we'll be using the cortex provided model: `snowflake-arctic`, with function: `complete`\n",
|
||||
"chat = ChatSnowflakeCortex()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above cell assumes that your Snowflake credentials are set in your environment variables. If you would rather manually specify them, use the following code:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"chat = ChatSnowflakeCortex(\n",
|
||||
" # change default cortex model and function\n",
|
||||
" model=\"snowflake-arctic\",\n",
|
||||
" cortex_function=\"complete\",\n",
|
||||
"\n",
|
||||
" # change default generation parameters\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=10,\n",
|
||||
" top_p=0.95,\n",
|
||||
"\n",
|
||||
" # specify snowflake credentials\n",
|
||||
" account=\"YOUR_SNOWFLAKE_ACCOUNT\",\n",
|
||||
" username=\"YOUR_SNOWFLAKE_USERNAME\",\n",
|
||||
" password=\"YOUR_SNOWFLAKE_PASSWORD\",\n",
|
||||
" database=\"YOUR_SNOWFLAKE_DATABASE\",\n",
|
||||
" schema=\"YOUR_SNOWFLAKE_SCHEMA\",\n",
|
||||
" role=\"YOUR_SNOWFLAKE_ROLE\",\n",
|
||||
" warehouse=\"YOUR_SNOWFLAKE_WAREHOUSE\"\n",
|
||||
")\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Calling the model\n",
|
||||
"We can now call the model using the `invoke` or `generate` method.\n",
|
||||
"\n",
|
||||
"#### Generation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\" Large language models are artificial intelligence systems designed to understand, generate, and manipulate human language. These models are typically based on deep learning techniques and are trained on vast amounts of text data to learn patterns and structures in language. They can perform a wide range of language-related tasks, such as language translation, text generation, sentiment analysis, and answering questions. Some well-known large language models include Google's BERT, OpenAI's GPT series, and Facebook's RoBERTa. These models have shown remarkable performance in various natural language processing tasks, and their applications continue to expand as research in AI progresses.\", response_metadata={'completion_tokens': 131, 'prompt_tokens': 29, 'total_tokens': 160}, id='run-5435bd0a-83fd-4295-b237-66cbd1b5c0f3-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a friendly assistant.\"),\n",
|
||||
" HumanMessage(content=\"What are large language models?\"),\n",
|
||||
"]\n",
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"`ChatSnowflakeCortex` doesn't support streaming as of now. Support for streaming will be coming in the later versions!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -13,7 +13,7 @@
|
||||
"\n",
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"You need to have an existing dataset on the Apify platform. If you don't have one, please first check out [this notebook](/docs/integrations/tools/apify) on how to use Apify to extract content from documentation, knowledge bases, help centers, or blogs."
|
||||
"You need to have an existing dataset on the Apify platform. If you don't have one, please first check out [this notebook](/docs/integrations/tools/apify) on how to use Apify to extract content from documentation, knowledge bases, help centers, or blogs. This example shows how to load a dataset produced by the [Website Content Crawler](https://apify.com/apify/website-content-crawler)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -101,8 +101,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes import VectorstoreIndexCreator\n",
|
||||
"from langchain_community.document_loaders import ApifyDatasetLoader\n",
|
||||
"from langchain_core.documents import Document"
|
||||
"from langchain_community.utilities import ApifyWrapper\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"from langchain_openai.embeddings import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -125,7 +127,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"index = VectorstoreIndexCreator().from_loaders([loader])"
|
||||
"index = VectorstoreIndexCreator(embedding=OpenAIEmbeddings()).from_loaders([loader])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -135,7 +137,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What is Apify?\"\n",
|
||||
"result = index.query_with_sources(query)"
|
||||
"result = index.query_with_sources(query, llm=OpenAI())"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -83,7 +83,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = ImageCaptionLoader(path_images=list_image_urls)\n",
|
||||
"loader = ImageCaptionLoader(images=list_image_urls)\n",
|
||||
"list_docs = loader.load()\n",
|
||||
"list_docs"
|
||||
]
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install gpudb==7.2.0.1"
|
||||
"%pip install gpudb==7.2.0.9"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -97,14 +97,14 @@
|
||||
"# data and the `SCHEMA.TABLE` combination must exist in Kinetica.\n",
|
||||
"\n",
|
||||
"QUERY = \"select text, survey_id as source from SCHEMA.TABLE limit 10\"\n",
|
||||
"snowflake_loader = KineticaLoader(\n",
|
||||
"kl = KineticaLoader(\n",
|
||||
" query=QUERY,\n",
|
||||
" host=HOST,\n",
|
||||
" username=USERNAME,\n",
|
||||
" password=PASSWORD,\n",
|
||||
" metadata_columns=[\"source\"],\n",
|
||||
")\n",
|
||||
"kinetica_documents = snowflake_loader.load()\n",
|
||||
"kinetica_documents = kl.load()\n",
|
||||
"print(kinetica_documents)"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -7,140 +7,99 @@
|
||||
"source": [
|
||||
"# Recursive URL\n",
|
||||
"\n",
|
||||
"We may want to process load all URLs under a root directory.\n",
|
||||
"\n",
|
||||
"For example, let's look at the [Python 3.9 Document](https://docs.python.org/3.9/).\n",
|
||||
"\n",
|
||||
"This has many interesting child pages that we may want to read in bulk.\n",
|
||||
"\n",
|
||||
"Of course, the `WebBaseLoader` can load a list of pages. \n",
|
||||
"\n",
|
||||
"But, the challenge is traversing the tree of child pages and actually assembling that list!\n",
|
||||
" \n",
|
||||
"We do this using the `RecursiveUrlLoader`.\n",
|
||||
"\n",
|
||||
"This also gives us the flexibility to exclude some children, customize the extractor, and more."
|
||||
"The `RecursiveUrlLoader` lets you recursively scrape all child links from a root URL and parse them into Documents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1be8094f",
|
||||
"id": "947d29e7-3679-483d-973f-79ea3403a370",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Parameters\n",
|
||||
"- url: str, the target url to crawl.\n",
|
||||
"- exclude_dirs: Optional[str], webpage directories to exclude.\n",
|
||||
"- use_async: Optional[bool], wether to use async requests, using async requests is usually faster in large tasks. However, async will disable the lazy loading feature(the function still works, but it is not lazy). By default, it is set to False.\n",
|
||||
"- extractor: Optional[Callable[[str], str]], a function to extract the text of the document from the webpage, by default it returns the page as it is. It is recommended to use tools like goose3 and beautifulsoup to extract the text. By default, it just returns the page as it is.\n",
|
||||
"- max_depth: Optional[int] = None, the maximum depth to crawl. By default, it is set to 2. If you need to crawl the whole website, set it to a number that is large enough would simply do the job.\n",
|
||||
"- timeout: Optional[int] = None, the timeout for each request, in the unit of seconds. By default, it is set to 10.\n",
|
||||
"- prevent_outside: Optional[bool] = None, whether to prevent crawling outside the root url. By default, it is set to True."
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"The `RecursiveUrlLoader` lives in the `langchain-community` package. There's no other required packages, though you will get richer default Document metadata if you have ``beautifulsoup4` installed as well."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "23c18539",
|
||||
"id": "23359ab0-8056-4dee-8bff-c38dc079f17f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.recursive_url_loader import RecursiveUrlLoader"
|
||||
"%pip install -qU langchain-community beautifulsoup4"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6384c057",
|
||||
"id": "07985766-e4e9-4ea1-8a18-924fa4f294e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's try a simple example."
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our document loader object and load Documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "55394afe",
|
||||
"execution_count": 1,
|
||||
"id": "cb208dcf-9ce9-4197-bc44-b80d20aa4e50",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from bs4 import BeautifulSoup as Soup\n",
|
||||
"from langchain_community.document_loaders import RecursiveUrlLoader\n",
|
||||
"\n",
|
||||
"url = \"https://docs.python.org/3.9/\"\n",
|
||||
"loader = RecursiveUrlLoader(\n",
|
||||
" url=url, max_depth=2, extractor=lambda x: Soup(x, \"html.parser\").text\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "084fb2ce",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\n\\n\\n\\nPython Frequently Asked Questions — Python 3.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].page_content[:50]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "13bd7e16",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'source': 'https://docs.python.org/3.9/library/index.html',\n",
|
||||
" 'title': 'The Python Standard Library — Python 3.9.17 documentation',\n",
|
||||
" 'language': None}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[-1].metadata"
|
||||
" \"https://docs.python.org/3.9/\",\n",
|
||||
" # max_depth=2,\n",
|
||||
" # use_async=False,\n",
|
||||
" # extractor=None,\n",
|
||||
" # metadata_extractor=None,\n",
|
||||
" # exclude_dirs=(),\n",
|
||||
" # timeout=10,\n",
|
||||
" # check_response_status=True,\n",
|
||||
" # continue_on_failure=True,\n",
|
||||
" # prevent_outside=True,\n",
|
||||
" # base_url=None,\n",
|
||||
" # ...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5866e5a6",
|
||||
"id": "0fac4425-735f-487d-a12b-c8ed2a209039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"However, since it's hard to perform a perfect filter, you may still see some irrelevant results in the results. You can perform a filter on the returned documents by yourself, if it's needed. Most of the time, the returned results are good enough."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ec8ecef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Testing on LangChain docs."
|
||||
"## Load\n",
|
||||
"\n",
|
||||
"Use ``.load()`` to synchronously load into memory all Documents, with one\n",
|
||||
"Document per visited URL. Starting from the initial URL, we recurse through\n",
|
||||
"all linked URLs up to the specified max_depth.\n",
|
||||
"\n",
|
||||
"Let's run through a basic example of how to use the `RecursiveUrlLoader` on the [Python 3.9 Documentation](https://docs.python.org/3.9/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "349b5598",
|
||||
"id": "a30843c8-4a59-43dc-bf60-f26532f0f8e1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/bagatur/.pyenv/versions/3.9.1/lib/python3.9/html/parser.py:170: XMLParsedAsHTMLWarning: It looks like you're parsing an XML document using an HTML parser. If this really is an HTML document (maybe it's XHTML?), you can ignore or filter this warning. If it's XML, you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the lxml package installed, and pass the keyword argument `features=\"xml\"` into the BeautifulSoup constructor.\n",
|
||||
" k = self.parse_starttag(i)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"8"
|
||||
"{'source': 'https://docs.python.org/3.9/',\n",
|
||||
" 'content_type': 'text/html',\n",
|
||||
" 'title': '3.9.19 Documentation',\n",
|
||||
" 'language': None}"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
@@ -149,10 +108,208 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"url = \"https://js.langchain.com/docs/modules/memory/integrations/\"\n",
|
||||
"loader = RecursiveUrlLoader(url=url)\n",
|
||||
"docs = loader.load()\n",
|
||||
"len(docs)"
|
||||
"docs[0].metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "211856ed-6dd7-46c6-859e-11aaea9093db",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Great! The first document looks like the root page we started from. Let's look at the metadata of the next document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "2d842c03-fab8-4097-9f4f-809b2e71c0ba",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'source': 'https://docs.python.org/3.9/using/index.html',\n",
|
||||
" 'content_type': 'text/html',\n",
|
||||
" 'title': 'Python Setup and Usage — Python 3.9.19 documentation',\n",
|
||||
" 'language': None}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[1].metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5714ace-7cc5-4c5c-9426-f68342880da0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"That url looks like a child of our root page, which is great! Let's move on from metadata to examine the content of one of our documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "51dc6c67-6857-4298-9472-08b147f3a631",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"<!DOCTYPE html>\n",
|
||||
"\n",
|
||||
"<html xmlns=\"http://www.w3.org/1999/xhtml\">\n",
|
||||
" <head>\n",
|
||||
" <meta charset=\"utf-8\" /><title>3.9.19 Documentation</title><meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n",
|
||||
" \n",
|
||||
" <link rel=\"stylesheet\" href=\"_static/pydoctheme.css\" type=\"text/css\" />\n",
|
||||
" <link rel=\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content[:300])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d87cc239",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"That certainly looks like HTML that comes from the url https://docs.python.org/3.9/, which is what we expected. Let's now look at some variations we can make to our basic example that can be helpful in different situations. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8f41cc89",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding an Extractor\n",
|
||||
"\n",
|
||||
"By default the loader sets the raw HTML from each link as the Document page content. To parse this HTML into a more human/LLM-friendly format you can pass in a custom ``extractor`` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "33a6f5b8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/var/folders/td/vzm913rx77x21csd90g63_7c0000gn/T/ipykernel_10935/1083427287.py:6: XMLParsedAsHTMLWarning: It looks like you're parsing an XML document using an HTML parser. If this really is an HTML document (maybe it's XHTML?), you can ignore or filter this warning. If it's XML, you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the lxml package installed, and pass the keyword argument `features=\"xml\"` into the BeautifulSoup constructor.\n",
|
||||
" soup = BeautifulSoup(html, \"lxml\")\n",
|
||||
"/Users/isaachershenson/.pyenv/versions/3.11.9/lib/python3.11/html/parser.py:170: XMLParsedAsHTMLWarning: It looks like you're parsing an XML document using an HTML parser. If this really is an HTML document (maybe it's XHTML?), you can ignore or filter this warning. If it's XML, you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the lxml package installed, and pass the keyword argument `features=\"xml\"` into the BeautifulSoup constructor.\n",
|
||||
" k = self.parse_starttag(i)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"3.9.19 Documentation\n",
|
||||
"\n",
|
||||
"Download\n",
|
||||
"Download these documents\n",
|
||||
"Docs by version\n",
|
||||
"\n",
|
||||
"Python 3.13 (in development)\n",
|
||||
"Python 3.12 (stable)\n",
|
||||
"Python 3.11 (security-fixes)\n",
|
||||
"Python 3.10 (security-fixes)\n",
|
||||
"Python 3.9 (securit\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def bs4_extractor(html: str) -> str:\n",
|
||||
" soup = BeautifulSoup(html, \"lxml\")\n",
|
||||
" return re.sub(r\"\\n\\n+\", \"\\n\\n\", soup.text).strip()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"loader = RecursiveUrlLoader(\"https://docs.python.org/3.9/\", extractor=bs4_extractor)\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0].page_content[:200])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c8e8a826",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This looks much nicer!\n",
|
||||
"\n",
|
||||
"You can similarly pass in a `metadata_extractor` to customize how Document metadata is extracted from the HTTP response. See the [API reference](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.recursive_url_loader.RecursiveUrlLoader.html) for more on this."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1dddbc94",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy loading\n",
|
||||
"\n",
|
||||
"If we're loading a large number of Documents and our downstream operations can be done over subsets of all loaded Documents, we can lazily load our Documents one at a time to minimize our memory footprint:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "7d0114fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/var/folders/4j/2rz3865x6qg07tx43146py8h0000gn/T/ipykernel_73962/2110507528.py:6: XMLParsedAsHTMLWarning: It looks like you're parsing an XML document using an HTML parser. If this really is an HTML document (maybe it's XHTML?), you can ignore or filter this warning. If it's XML, you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the lxml package installed, and pass the keyword argument `features=\"xml\"` into the BeautifulSoup constructor.\n",
|
||||
" soup = BeautifulSoup(html, \"lxml\")\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f88a7c2f-35df-4c3a-b238-f91be2674b96",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this example we never have more than 10 Documents loaded into memory at a time."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3e4d1c8f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"These examples show just a few of the ways in which you can modify the default `RecursiveUrlLoader`, but there are many more modifications that can be made to best fit your use case. Using the parameters `link_regex` and `exclude_dirs` can help you filter out unwanted URLs, `aload()` and `alazy_load()` can be used for aynchronous loading, and more.\n",
|
||||
"\n",
|
||||
"For detailed information on configuring and calling the ``RecursiveUrlLoader``, please see the API reference: https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.recursive_url_loader.RecursiveUrlLoader.html."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -172,7 +329,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
"- C++ (*)\n",
|
||||
"- C# (*)\n",
|
||||
"- COBOL\n",
|
||||
"- Elixir\n",
|
||||
"- Go (*)\n",
|
||||
"- Java (*)\n",
|
||||
"- JavaScript (requires package `esprima`)\n",
|
||||
|
||||
@@ -113,7 +113,7 @@
|
||||
"\n",
|
||||
"LCEL is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains.\n",
|
||||
"\n",
|
||||
"- **[Overview](/docs/concepts#langchain-expression-language)**: LCEL and its benefits\n",
|
||||
"- **[Overview](/docs/concepts#langchain-expression-language-lcel)**: LCEL and its benefits\n",
|
||||
"- **[Interface](/docs/concepts#interface)**: The standard interface for LCEL objects\n",
|
||||
"- **[How-to](/docs/expression_language/how_to)**: Key features of LCEL\n",
|
||||
"- **[Cookbook](/docs/expression_language/cookbook)**: Example code for accomplishing common tasks\n",
|
||||
|
||||
@@ -15,47 +15,45 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "427d5745",
|
||||
"metadata": {},
|
||||
"source": "from langchain_community.document_loaders import YoutubeLoader",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import YoutubeLoader"
|
||||
]
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "34a25b57",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet youtube-transcript-api"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc8b308a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = YoutubeLoader.from_youtube_url(\n",
|
||||
" \"https://www.youtube.com/watch?v=QsYGlZkevEg\", add_video_info=False\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d073dd36",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
@@ -68,26 +66,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ba28af69",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet pytube"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9b8ea390",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = YoutubeLoader.from_youtube_url(\n",
|
||||
" \"https://www.youtube.com/watch?v=QsYGlZkevEg\", add_video_info=True\n",
|
||||
")\n",
|
||||
"loader.load()"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
@@ -104,10 +102,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "08510625",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = YoutubeLoader.from_youtube_url(\n",
|
||||
" \"https://www.youtube.com/watch?v=QsYGlZkevEg\",\n",
|
||||
@@ -116,7 +112,41 @@
|
||||
" translation=\"en\",\n",
|
||||
")\n",
|
||||
"loader.load()"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"### Get transcripts as timestamped chunks\n",
|
||||
"\n",
|
||||
"Get one or more `Document` objects, each containing a chunk of the video transcript. The length of the chunks, in seconds, may be specified. Each chunk's metadata includes a URL of the video on YouTube, which will start the video at the beginning of the specific chunk.\n",
|
||||
"\n",
|
||||
"`transcript_format` param: One of the `langchain_community.document_loaders.youtube.TranscriptFormat` values. In this case, `TranscriptFormat.CHUNKS`.\n",
|
||||
"\n",
|
||||
"`chunk_size_seconds` param: An integer number of video seconds to be represented by each chunk of transcript data. Default is 120 seconds."
|
||||
],
|
||||
"id": "69f4e399a9764d73"
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.youtube import TranscriptFormat\n",
|
||||
"\n",
|
||||
"loader = YoutubeLoader.from_youtube_url(\n",
|
||||
" \"https://www.youtube.com/watch?v=TKCMw0utiak\",\n",
|
||||
" add_video_info=True,\n",
|
||||
" transcript_format=TranscriptFormat.CHUNKS,\n",
|
||||
" chunk_size_seconds=30,\n",
|
||||
")\n",
|
||||
"print(\"\\n\\n\".join(map(repr, loader.load())))"
|
||||
],
|
||||
"id": "540bbf19182f38bc",
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
@@ -142,10 +172,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c345bc43",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Init the GoogleApiClient\n",
|
||||
"from pathlib import Path\n",
|
||||
@@ -170,7 +198,9 @@
|
||||
"\n",
|
||||
"# returns a list of Documents\n",
|
||||
"youtube_loader_channel.load()"
|
||||
]
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -0,0 +1,387 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# DashScope Reranker\n",
|
||||
"\n",
|
||||
"This notebook shows how to use DashScope Reranker for document compression and retrieval. [DashScope](https://dashscope.aliyun.com/) is the generative AI service from Alibaba Cloud (Aliyun).\n",
|
||||
"\n",
|
||||
"DashScope's [Text ReRank Model](https://help.aliyun.com/document_detail/2780058.html?spm=a2c4g.2780059.0.0.6d995024FlrJ12) supports reranking documents with a maximum of 4000 tokens. Moreover, it supports Chinese, English, Japanese, Korean, Thai, Spanish, French, Portuguese, Indonesian, Arabic, and over 50 other languages. For more details, please visit [here](https://help.aliyun.com/document_detail/2780059.html?spm=a2c4g.2780058.0.0.3a9e5b1dWeOQjI)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet dashscope"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet faiss\n",
|
||||
"\n",
|
||||
"# OR (depending on Python version)\n",
|
||||
"\n",
|
||||
"%pip install --upgrade --quiet faiss-cpu"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# To create api key: https://bailian.console.aliyun.com/?apiKey=1#/api-key\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"DASHSCOPE_API_KEY\"] = getpass.getpass(\"DashScope API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Helper function for printing docs\n",
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up the base vector store retriever\n",
|
||||
"Let's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can set up the retriever to retrieve a high number (20) of docs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"I understand. \n",
|
||||
"\n",
|
||||
"I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. \n",
|
||||
"\n",
|
||||
"That’s why one of the first things I did as President was fight to pass the American Rescue Plan. \n",
|
||||
"\n",
|
||||
"Because people were hurting. We needed to act, and we did. \n",
|
||||
"\n",
|
||||
"Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 2:\n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 3:\n",
|
||||
"\n",
|
||||
"To all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n",
|
||||
"\n",
|
||||
"And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n",
|
||||
"\n",
|
||||
"Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 4:\n",
|
||||
"\n",
|
||||
"We cannot let this happen. \n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 5:\n",
|
||||
"\n",
|
||||
"Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. \n",
|
||||
"\n",
|
||||
"The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. \n",
|
||||
"\n",
|
||||
"We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 6:\n",
|
||||
"\n",
|
||||
"Every Administration says they’ll do it, but we are actually doing it. \n",
|
||||
"\n",
|
||||
"We will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. \n",
|
||||
"\n",
|
||||
"But to compete for the best jobs of the future, we also need to level the playing field with China and other competitors.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 7:\n",
|
||||
"\n",
|
||||
"When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. \n",
|
||||
"\n",
|
||||
"For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. \n",
|
||||
"\n",
|
||||
"And I know you’re tired, frustrated, and exhausted. \n",
|
||||
"\n",
|
||||
"But I also know this.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 8:\n",
|
||||
"\n",
|
||||
"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n",
|
||||
"\n",
|
||||
"And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 9:\n",
|
||||
"\n",
|
||||
"My plan will not only lower costs to give families a fair shot, it will lower the deficit. \n",
|
||||
"\n",
|
||||
"The previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. \n",
|
||||
"\n",
|
||||
"But in my administration, the watchdogs have been welcomed back. \n",
|
||||
"\n",
|
||||
"We’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 10:\n",
|
||||
"\n",
|
||||
"He will never extinguish their love of freedom. He will never weaken the resolve of the free world. \n",
|
||||
"\n",
|
||||
"We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. \n",
|
||||
"\n",
|
||||
"The pandemic has been punishing. \n",
|
||||
"\n",
|
||||
"And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. \n",
|
||||
"\n",
|
||||
"I understand.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 11:\n",
|
||||
"\n",
|
||||
"And tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. \n",
|
||||
"\n",
|
||||
"By the end of this year, the deficit will be down to less than half what it was before I took office. \n",
|
||||
"\n",
|
||||
"The only president ever to cut the deficit by more than one trillion dollars in a single year. \n",
|
||||
"\n",
|
||||
"Lowering your costs also means demanding more competition. \n",
|
||||
"\n",
|
||||
"I’m a capitalist, but capitalism without competition isn’t capitalism. \n",
|
||||
"\n",
|
||||
"It’s exploitation—and it drives up prices.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 12:\n",
|
||||
"\n",
|
||||
"Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. \n",
|
||||
"\n",
|
||||
"Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. \n",
|
||||
"\n",
|
||||
"Throughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. \n",
|
||||
"\n",
|
||||
"They keep moving. \n",
|
||||
"\n",
|
||||
"And the costs and the threats to America and the world keep rising.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 13:\n",
|
||||
"\n",
|
||||
"Cancer is the #2 cause of death in America–second only to heart disease. \n",
|
||||
"\n",
|
||||
"Last month, I announced our plan to supercharge \n",
|
||||
"the Cancer Moonshot that President Obama asked me to lead six years ago. \n",
|
||||
"\n",
|
||||
"Our goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases. \n",
|
||||
"\n",
|
||||
"More support for patients and families. \n",
|
||||
"\n",
|
||||
"To get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 14:\n",
|
||||
"\n",
|
||||
"It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans. \n",
|
||||
"\n",
|
||||
"Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance. \n",
|
||||
"\n",
|
||||
"And as my Dad used to say, it gave people a little breathing room.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 15:\n",
|
||||
"\n",
|
||||
"America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n",
|
||||
"\n",
|
||||
"These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n",
|
||||
"\n",
|
||||
"But I want you to know that we are going to be okay. \n",
|
||||
"\n",
|
||||
"When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 16:\n",
|
||||
"\n",
|
||||
"So that’s my plan. It will grow the economy and lower costs for families. \n",
|
||||
"\n",
|
||||
"So what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation. \n",
|
||||
"\n",
|
||||
"My plan will not only lower costs to give families a fair shot, it will lower the deficit.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 17:\n",
|
||||
"\n",
|
||||
"And we will, as one people. \n",
|
||||
"\n",
|
||||
"One America. \n",
|
||||
"\n",
|
||||
"The United States of America. \n",
|
||||
"\n",
|
||||
"May God bless you all. May God protect our troops.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 18:\n",
|
||||
"\n",
|
||||
"As I’ve told Xi Jinping, it is never a good bet to bet against the American people. \n",
|
||||
"\n",
|
||||
"We’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. \n",
|
||||
"\n",
|
||||
"And we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 19:\n",
|
||||
"\n",
|
||||
"And I know you’re tired, frustrated, and exhausted. \n",
|
||||
"\n",
|
||||
"But I also know this. \n",
|
||||
"\n",
|
||||
"Because of the progress we’ve made, because of your resilience and the tools we have, tonight I can say \n",
|
||||
"we are moving forward safely, back to more normal routines. \n",
|
||||
"\n",
|
||||
"We’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July. \n",
|
||||
"\n",
|
||||
"Just a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 20:\n",
|
||||
"\n",
|
||||
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n",
|
||||
"\n",
|
||||
"Last year COVID-19 kept us apart. This year we are finally together again. \n",
|
||||
"\n",
|
||||
"Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n",
|
||||
"\n",
|
||||
"With a duty to one another to the American people to the Constitution. \n",
|
||||
"\n",
|
||||
"And with an unwavering resolve that freedom will always triumph over tyranny.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.embeddings.dashscope import DashScopeEmbeddings\n",
|
||||
"from langchain_community.vectorstores.faiss import FAISS\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"documents = TextLoader(\"../../how_to/state_of_the_union.txt\").load()\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"retriever = FAISS.from_documents(texts, DashScopeEmbeddings()).as_retriever( # type: ignore\n",
|
||||
" search_kwargs={\"k\": 20}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"pretty_print_docs(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Reranking with DashScopeRerank\n",
|
||||
"Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll use the `DashScopeRerank` to rerank the returned results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 2:\n",
|
||||
"\n",
|
||||
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n",
|
||||
"\n",
|
||||
"Last year COVID-19 kept us apart. This year we are finally together again. \n",
|
||||
"\n",
|
||||
"Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n",
|
||||
"\n",
|
||||
"With a duty to one another to the American people to the Constitution. \n",
|
||||
"\n",
|
||||
"And with an unwavering resolve that freedom will always triumph over tyranny.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 3:\n",
|
||||
"\n",
|
||||
"Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. \n",
|
||||
"\n",
|
||||
"The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. \n",
|
||||
"\n",
|
||||
"We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers import ContextualCompressionRetriever\n",
|
||||
"from langchain_community.document_compressors.dashscope_rerank import DashScopeRerank\n",
|
||||
"\n",
|
||||
"compressor = DashScopeRerank()\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=compressor, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.invoke(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -15,16 +15,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet doctran"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -34,7 +42,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -43,7 +51,7 @@
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -64,7 +72,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -107,7 +115,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -119,13 +127,13 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Output\n",
|
||||
"## Output using Sync version\n",
|
||||
"After translating a document, the result will be returned as a new document with the page_content translated into the target language"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -134,7 +142,82 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Documento Confidencial - Solo para Uso Interno\n",
|
||||
"\n",
|
||||
"Fecha: 1 de Julio de 2023\n",
|
||||
"\n",
|
||||
"Asunto: Actualizaciones y Discusiones sobre Varios Temas\n",
|
||||
"\n",
|
||||
"Estimado Equipo,\n",
|
||||
"\n",
|
||||
"Espero que este correo electrónico los encuentre bien. En este documento, me gustaría proporcionarles algunas actualizaciones importantes y discutir varios temas que requieren nuestra atención. Por favor, traten la información contenida aquí como altamente confidencial.\n",
|
||||
"\n",
|
||||
"Medidas de Seguridad y Privacidad\n",
|
||||
"Como parte de nuestro compromiso continuo para garantizar la seguridad y privacidad de los datos de nuestros clientes, hemos implementado medidas sólidas en todos nuestros sistemas. Nos gustaría elogiar a John Doe (email: john.doe@example.com) del departamento de TI por su trabajo diligente en mejorar nuestra seguridad de red. En adelante, recordamos amablemente a todos que se adhieran estrictamente a nuestras políticas y pautas de protección de datos. Además, si encuentran algún riesgo o incidente de seguridad potencial, por favor repórtenlo inmediatamente a nuestro equipo dedicado en security@example.com.\n",
|
||||
"\n",
|
||||
"Actualizaciones de Recursos Humanos y Beneficios para Empleados\n",
|
||||
"Recientemente, dimos la bienvenida a varios nuevos miembros del equipo que han hecho contribuciones significativas a sus respectivos departamentos. Me gustaría reconocer a Jane Smith (SSN: 049-45-5928) por su destacado desempeño en servicio al cliente. Jane ha recibido consistentemente comentarios positivos de nuestros clientes. Además, recuerden que el período de inscripción abierta para nuestro programa de beneficios para empleados se acerca rápidamente. Si tienen alguna pregunta o requieren asistencia, por favor contacten a nuestro representante de Recursos Humanos, Michael Johnson (teléfono: 418-492-3850, email: michael.johnson@example.com).\n",
|
||||
"\n",
|
||||
"Iniciativas y Campañas de Marketing\n",
|
||||
"Nuestro equipo de marketing ha estado trabajando activamente en el desarrollo de nuevas estrategias para aumentar el conocimiento de la marca y fomentar la participación de los clientes. Nos gustaría agradecer a Sarah Thompson (teléfono: 415-555-1234) por sus esfuerzos excepcionales en la gestión de nuestras plataformas de redes sociales. Sarah ha aumentado con éxito nuestra base de seguidores en un 20% solo en el último mes. Además, marquen sus calendarios para el próximo evento de lanzamiento de productos el 15 de Julio. Animamos a todos los miembros del equipo a asistir y apoyar este emocionante hito para nuestra empresa.\n",
|
||||
"\n",
|
||||
"Proyectos de Investigación y Desarrollo\n",
|
||||
"En nuestra búsqueda de innovación, nuestro departamento de investigación y desarrollo ha estado trabajando incansablemente en varios proyectos. Me gustaría reconocer el trabajo excepcional de David Rodriguez (email: david.rodriguez@example.com) en su rol como líder de proyecto. Las contribuciones de David al desarrollo de nuestra tecnología de vanguardia han sido fundamentales. Además, recordamos a todos que compartan sus ideas y sugerencias para posibles nuevos proyectos durante nuestra sesión mensual de lluvia de ideas de I+D, programada para el 10 de Julio.\n",
|
||||
"\n",
|
||||
"Por favor, traten la información en este documento con la máxima confidencialidad y asegúrense de que no sea compartida con personas no autorizadas. Si tienen alguna pregunta o inquietud sobre los temas discutidos, por favor no duden en comunicarse directamente conmigo.\n",
|
||||
"\n",
|
||||
"Gracias por su atención, y sigamos trabajando juntos para alcanzar nuestros objetivos.\n",
|
||||
"\n",
|
||||
"Saludos cordiales,\n",
|
||||
"\n",
|
||||
"Jason Fan\n",
|
||||
"Cofundador y CEO\n",
|
||||
"Psychic\n",
|
||||
"jason@psychic.dev\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(translated_document[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Output using the Async version\n",
|
||||
"\n",
|
||||
"After translating a document, the result will be returned as a new document with the page_content translated into the target language. The async version will improve performance when the documents are chunked in multiple parts. It will also make sure to return the output in the correct order."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = await qa_translator.atransform_documents(documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -152,22 +235,22 @@
|
||||
"Espero que este correo electrónico les encuentre bien. En este documento, me gustaría proporcionarles algunas actualizaciones importantes y discutir varios temas que requieren nuestra atención. Por favor, traten la información contenida aquí como altamente confidencial.\n",
|
||||
"\n",
|
||||
"Medidas de Seguridad y Privacidad\n",
|
||||
"Como parte de nuestro compromiso continuo de garantizar la seguridad y privacidad de los datos de nuestros clientes, hemos implementado medidas sólidas en todos nuestros sistemas. Nos gustaría elogiar a John Doe (correo electrónico: john.doe@example.com) del departamento de TI por su diligente trabajo en mejorar nuestra seguridad de red. En el futuro, recordamos amablemente a todos que se adhieran estrictamente a nuestras políticas y pautas de protección de datos. Además, si encuentran algún riesgo o incidente de seguridad potencial, por favor, repórtelo de inmediato a nuestro equipo dedicado en security@example.com.\n",
|
||||
"Como parte de nuestro compromiso continuo de garantizar la seguridad y privacidad de los datos de nuestros clientes, hemos implementado medidas sólidas en todos nuestros sistemas. Nos gustaría elogiar a John Doe (email: john.doe@example.com) del departamento de TI por su trabajo diligente en mejorar nuestra seguridad de red. En adelante, recordamos amablemente a todos que se adhieran estrictamente a nuestras políticas y pautas de protección de datos. Además, si encuentran algún riesgo o incidente de seguridad potencial, por favor repórtenlo inmediatamente a nuestro equipo dedicado en security@example.com.\n",
|
||||
"\n",
|
||||
"Actualizaciones de Recursos Humanos y Beneficios para Empleados\n",
|
||||
"Recientemente, dimos la bienvenida a varios nuevos miembros del equipo que han realizado contribuciones significativas en sus respectivos departamentos. Me gustaría reconocer a Jane Smith (SSN: 049-45-5928) por su destacado desempeño en servicio al cliente. Jane ha recibido consistentemente comentarios positivos de nuestros clientes. Además, recuerden que el período de inscripción abierta para nuestro programa de beneficios para empleados se acerca rápidamente. Si tienen alguna pregunta o necesitan ayuda, por favor, contacten a nuestro representante de Recursos Humanos, Michael Johnson (teléfono: 418-492-3850, correo electrónico: michael.johnson@example.com).\n",
|
||||
"Recientemente, dimos la bienvenida a varios nuevos miembros del equipo que han hecho contribuciones significativas a sus respectivos departamentos. Me gustaría reconocer a Jane Smith (SSN: 049-45-5928) por su destacado desempeño en servicio al cliente. Jane ha recibido consistentemente comentarios positivos de nuestros clientes. Además, recuerden que el período de inscripción abierta para nuestro programa de beneficios para empleados se acerca rápidamente. Si tienen alguna pregunta o requieren asistencia, por favor contacten a nuestro representante de Recursos Humanos, Michael Johnson (teléfono: 418-492-3850, email: michael.johnson@example.com).\n",
|
||||
"\n",
|
||||
"Iniciativas y Campañas de Marketing\n",
|
||||
"Nuestro equipo de marketing ha estado trabajando activamente en el desarrollo de nuevas estrategias para aumentar el conocimiento de nuestra marca y fomentar la participación de los clientes. Nos gustaría agradecer a Sarah Thompson (teléfono: 415-555-1234) por sus esfuerzos excepcionales en la gestión de nuestras plataformas de redes sociales. Sarah ha logrado aumentar nuestra base de seguidores en un 20% solo en el último mes. Además, marquen sus calendarios para el próximo evento de lanzamiento de productos el 15 de Julio. Animamos a todos los miembros del equipo a asistir y apoyar este emocionante hito para nuestra empresa.\n",
|
||||
"Nuestro equipo de marketing ha estado trabajando activamente en el desarrollo de nuevas estrategias para aumentar el conocimiento de la marca y fomentar la participación de los clientes. Nos gustaría agradecer a Sarah Thompson (teléfono: 415-555-1234) por sus esfuerzos excepcionales en la gestión de nuestras plataformas de redes sociales. Sarah ha aumentado con éxito nuestra base de seguidores en un 20% solo en el último mes. Además, marquen sus calendarios para el próximo evento de lanzamiento de productos el 15 de Julio. Animamos a todos los miembros del equipo a asistir y apoyar este emocionante hito para nuestra empresa.\n",
|
||||
"\n",
|
||||
"Proyectos de Investigación y Desarrollo\n",
|
||||
"En nuestra búsqueda de la innovación, nuestro departamento de investigación y desarrollo ha estado trabajando incansablemente en varios proyectos. Me gustaría reconocer el trabajo excepcional de David Rodriguez (correo electrónico: david.rodriguez@example.com) en su papel de líder de proyecto. Las contribuciones de David al desarrollo de nuestra tecnología de vanguardia han sido fundamentales. Además, nos gustaría recordar a todos que compartan sus ideas y sugerencias para posibles nuevos proyectos durante nuestra sesión mensual de lluvia de ideas de I+D, programada para el 10 de Julio.\n",
|
||||
"En nuestra búsqueda de innovación, nuestro departamento de investigación y desarrollo ha estado trabajando incansablemente en varios proyectos. Me gustaría reconocer el trabajo excepcional de David Rodriguez (email: david.rodriguez@example.com) en su rol como líder de proyecto. Las contribuciones de David al desarrollo de nuestra tecnología de vanguardia han sido fundamentales. Además, recordamos a todos que compartan sus ideas y sugerencias para posibles nuevos proyectos durante nuestra sesión mensual de lluvia de ideas de I+D, programada para el 10 de Julio.\n",
|
||||
"\n",
|
||||
"Por favor, traten la información de este documento con la máxima confidencialidad y asegúrense de no compartirla con personas no autorizadas. Si tienen alguna pregunta o inquietud sobre los temas discutidos, por favor, no duden en comunicarse directamente conmigo.\n",
|
||||
"Por favor, traten la información en este documento con la máxima confidencialidad y asegúrense de que no sea compartida con personas no autorizadas. Si tienen alguna pregunta o inquietud sobre los temas discutidos, por favor no duden en comunicarse directamente conmigo.\n",
|
||||
"\n",
|
||||
"Gracias por su atención y sigamos trabajando juntos para alcanzar nuestros objetivos.\n",
|
||||
"Gracias por su atención, y sigamos trabajando juntos para alcanzar nuestros objetivos.\n",
|
||||
"\n",
|
||||
"Atentamente,\n",
|
||||
"Saludos cordiales,\n",
|
||||
"\n",
|
||||
"Jason Fan\n",
|
||||
"Cofundador y CEO\n",
|
||||
@@ -177,7 +260,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(translated_document[0].page_content)"
|
||||
"print(result[0].page_content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -197,7 +280,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -0,0 +1,420 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Volcengine Reranker\n",
|
||||
"\n",
|
||||
"This notebook shows how to use Volcengine Reranker for document compression and retrieval. [Volcengine](https://www.volcengine.com/) is a cloud service platform developed by ByteDance, the parent company of TikTok.\n",
|
||||
"\n",
|
||||
"Volcengine's Rerank Service supports reranking up to 50 documents with a maximum of 4000 tokens. For more, please visit [here](https://www.volcengine.com/docs/84313/1254474) and [here](https://www.volcengine.com/docs/84313/1254605)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet volcengine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet faiss\n",
|
||||
"\n",
|
||||
"# OR (depending on Python version)\n",
|
||||
"\n",
|
||||
"%pip install --upgrade --quiet faiss-cpu"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# To obtain ak/sk: https://www.volcengine.com/docs/84313/1254488\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"VOLC_API_AK\"] = getpass.getpass(\"Volcengine API AK:\")\n",
|
||||
"os.environ[\"VOLC_API_SK\"] = getpass.getpass(\"Volcengine API SK:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Helper function for printing docs\n",
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up the base vector store retriever\n",
|
||||
"Let's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can set up the retriever to retrieve a high number (20) of docs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/terminator/Developer/langchain/.venv/lib/python3.11/site-packages/sentence_transformers/cross_encoder/CrossEncoder.py:11: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n",
|
||||
" from tqdm.autonotebook import tqdm, trange\n",
|
||||
"/Users/terminator/Developer/langchain/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 2:\n",
|
||||
"\n",
|
||||
"We cannot let this happen. \n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 3:\n",
|
||||
"\n",
|
||||
"As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n",
|
||||
"\n",
|
||||
"While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 4:\n",
|
||||
"\n",
|
||||
"He will never extinguish their love of freedom. He will never weaken the resolve of the free world. \n",
|
||||
"\n",
|
||||
"We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. \n",
|
||||
"\n",
|
||||
"The pandemic has been punishing. \n",
|
||||
"\n",
|
||||
"And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. \n",
|
||||
"\n",
|
||||
"I understand.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 5:\n",
|
||||
"\n",
|
||||
"As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” \n",
|
||||
"\n",
|
||||
"It’s time. \n",
|
||||
"\n",
|
||||
"But with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills. \n",
|
||||
"\n",
|
||||
"Inflation is robbing them of the gains they might otherwise feel. \n",
|
||||
"\n",
|
||||
"I get it. That’s why my top priority is getting prices under control.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 6:\n",
|
||||
"\n",
|
||||
"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n",
|
||||
"\n",
|
||||
"And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 7:\n",
|
||||
"\n",
|
||||
"It’s not only the right thing to do—it’s the economically smart thing to do. \n",
|
||||
"\n",
|
||||
"That’s why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce. \n",
|
||||
"\n",
|
||||
"Let’s get it done once and for all. \n",
|
||||
"\n",
|
||||
"Advancing liberty and justice also requires protecting the rights of women. \n",
|
||||
"\n",
|
||||
"The constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 8:\n",
|
||||
"\n",
|
||||
"I understand. \n",
|
||||
"\n",
|
||||
"I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. \n",
|
||||
"\n",
|
||||
"That’s why one of the first things I did as President was fight to pass the American Rescue Plan. \n",
|
||||
"\n",
|
||||
"Because people were hurting. We needed to act, and we did. \n",
|
||||
"\n",
|
||||
"Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 9:\n",
|
||||
"\n",
|
||||
"Third – we can end the shutdown of schools and businesses. We have the tools we need. \n",
|
||||
"\n",
|
||||
"It’s time for Americans to get back to work and fill our great downtowns again. People working from home can feel safe to begin to return to the office. \n",
|
||||
"\n",
|
||||
"We’re doing that here in the federal government. The vast majority of federal workers will once again work in person. \n",
|
||||
"\n",
|
||||
"Our schools are open. Let’s keep it that way. Our kids need to be in school.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 10:\n",
|
||||
"\n",
|
||||
"He met the Ukrainian people. \n",
|
||||
"\n",
|
||||
"From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n",
|
||||
"\n",
|
||||
"Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. \n",
|
||||
"\n",
|
||||
"In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 11:\n",
|
||||
"\n",
|
||||
"The widow of Sergeant First Class Heath Robinson. \n",
|
||||
"\n",
|
||||
"He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. \n",
|
||||
"\n",
|
||||
"Stationed near Baghdad, just yards from burn pits the size of football fields. \n",
|
||||
"\n",
|
||||
"Heath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter. \n",
|
||||
"\n",
|
||||
"But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. \n",
|
||||
"\n",
|
||||
"Danielle says Heath was a fighter to the very end.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 12:\n",
|
||||
"\n",
|
||||
"Danielle says Heath was a fighter to the very end. \n",
|
||||
"\n",
|
||||
"He didn’t know how to stop fighting, and neither did she. \n",
|
||||
"\n",
|
||||
"Through her pain she found purpose to demand we do better. \n",
|
||||
"\n",
|
||||
"Tonight, Danielle—we are. \n",
|
||||
"\n",
|
||||
"The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. \n",
|
||||
"\n",
|
||||
"And tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 13:\n",
|
||||
"\n",
|
||||
"We can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours. \n",
|
||||
"\n",
|
||||
"Provide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers. \n",
|
||||
"\n",
|
||||
"Revise our laws so businesses have the workers they need and families don’t wait decades to reunite. \n",
|
||||
"\n",
|
||||
"It’s not only the right thing to do—it’s the economically smart thing to do.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 14:\n",
|
||||
"\n",
|
||||
"He rejected repeated efforts at diplomacy. \n",
|
||||
"\n",
|
||||
"He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did. \n",
|
||||
"\n",
|
||||
"We prepared extensively and carefully. \n",
|
||||
"\n",
|
||||
"We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 15:\n",
|
||||
"\n",
|
||||
"As I’ve told Xi Jinping, it is never a good bet to bet against the American people. \n",
|
||||
"\n",
|
||||
"We’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. \n",
|
||||
"\n",
|
||||
"And we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 16:\n",
|
||||
"\n",
|
||||
"Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. \n",
|
||||
"\n",
|
||||
"The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. \n",
|
||||
"\n",
|
||||
"We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 17:\n",
|
||||
"\n",
|
||||
"Look at cars. \n",
|
||||
"\n",
|
||||
"Last year, there weren’t enough semiconductors to make all the cars that people wanted to buy. \n",
|
||||
"\n",
|
||||
"And guess what, prices of automobiles went up. \n",
|
||||
"\n",
|
||||
"So—we have a choice. \n",
|
||||
"\n",
|
||||
"One way to fight inflation is to drive down wages and make Americans poorer. \n",
|
||||
"\n",
|
||||
"I have a better plan to fight inflation. \n",
|
||||
"\n",
|
||||
"Lower your costs, not your wages. \n",
|
||||
"\n",
|
||||
"Make more cars and semiconductors in America. \n",
|
||||
"\n",
|
||||
"More infrastructure and innovation in America. \n",
|
||||
"\n",
|
||||
"More goods moving faster and cheaper in America.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 18:\n",
|
||||
"\n",
|
||||
"So that’s my plan. It will grow the economy and lower costs for families. \n",
|
||||
"\n",
|
||||
"So what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation. \n",
|
||||
"\n",
|
||||
"My plan will not only lower costs to give families a fair shot, it will lower the deficit.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 19:\n",
|
||||
"\n",
|
||||
"Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. \n",
|
||||
"\n",
|
||||
"Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. \n",
|
||||
"\n",
|
||||
"Throughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. \n",
|
||||
"\n",
|
||||
"They keep moving. \n",
|
||||
"\n",
|
||||
"And the costs and the threats to America and the world keep rising.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 20:\n",
|
||||
"\n",
|
||||
"It’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n",
|
||||
"\n",
|
||||
"ARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n",
|
||||
"\n",
|
||||
"A unity agenda for the nation. \n",
|
||||
"\n",
|
||||
"We can do this. \n",
|
||||
"\n",
|
||||
"My fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n",
|
||||
"\n",
|
||||
"In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
||||
"To disable this warning, you can either:\n",
|
||||
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
||||
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores.faiss import FAISS\n",
|
||||
"from langchain_huggingface import HuggingFaceEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"documents = TextLoader(\"../../how_to/state_of_the_union.txt\").load()\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"retriever = FAISS.from_documents(\n",
|
||||
" texts, HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n",
|
||||
").as_retriever(search_kwargs={\"k\": 20})\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"pretty_print_docs(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Reranking with VolcengineRerank\n",
|
||||
"Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll use the `VolcengineRerank` to rerank the returned results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 2:\n",
|
||||
"\n",
|
||||
"As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n",
|
||||
"\n",
|
||||
"While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 3:\n",
|
||||
"\n",
|
||||
"We cannot let this happen. \n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers import ContextualCompressionRetriever\n",
|
||||
"from langchain_community.document_compressors.volcengine_rerank import VolcengineRerank\n",
|
||||
"\n",
|
||||
"compressor = VolcengineRerank()\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=compressor, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.invoke(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -90,7 +90,9 @@
|
||||
"- `voyage-code-2`\n",
|
||||
"- `voyage-2`\n",
|
||||
"- `voyage-law-2`\n",
|
||||
"- `voyage-lite-02-instruct`"
|
||||
"- `voyage-lite-02-instruct`\n",
|
||||
"- `voyage-finance-2`\n",
|
||||
"- `voyage-multilingual-2`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -336,7 +338,10 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Doing reranking with VoyageAIRerank\n",
|
||||
"Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll use the Voyage AI reranker to rerank the returned results."
|
||||
"Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll use the Voyage AI reranker to rerank the returned results. You can use any of the following Reranking models: ([source](https://docs.voyageai.com/docs/reranker)):\n",
|
||||
"\n",
|
||||
"- `rerank-1`\n",
|
||||
"- `rerank-lite-1`"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -9,8 +9,7 @@
|
||||
"\n",
|
||||
">[Diffbot](https://docs.diffbot.com/docs/getting-started-with-diffbot) is a suite of ML-based products that make it easy to structure web data.\n",
|
||||
">\n",
|
||||
">Diffbot's [Natural Language Processing API](https://www.diffbot.com/products/natural-language/) allows for the extraction of entities, relationships, and semantic meaning from unstructured text data.",
|
||||
"\n",
|
||||
">Diffbot's [Natural Language Processing API](https://www.diffbot.com/products/natural-language/) allows for the extraction of entities, relationships, and semantic meaning from unstructured text data.\n",
|
||||
"[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/integrations/graphs/diffbot.ipynb)\n",
|
||||
"\n",
|
||||
"## Use case\n",
|
||||
@@ -70,8 +69,8 @@
|
||||
"source": [
|
||||
"from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer\n",
|
||||
"\n",
|
||||
"diffbot_api_token = \"DIFFBOT_API_TOKEN\"\n",
|
||||
"diffbot_nlp = DiffbotGraphTransformer(diffbot_api_token=diffbot_api_token)"
|
||||
"diffbot_api_key = \"DIFFBOT_KEY\"\n",
|
||||
"diffbot_nlp = DiffbotGraphTransformer(diffbot_api_key=diffbot_api_key)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -111,7 +110,7 @@
|
||||
" --name neo4j \\\n",
|
||||
" -p 7474:7474 -p 7687:7687 \\\n",
|
||||
" -d \\\n",
|
||||
" -e NEO4J_AUTH=neo4j/pleaseletmein \\\n",
|
||||
" -e NEO4J_AUTH=neo4j/password \\\n",
|
||||
" -e NEO4J_PLUGINS=\\[\\\"apoc\\\"\\] \\\n",
|
||||
" neo4j:latest\n",
|
||||
"``` \n",
|
||||
@@ -129,7 +128,7 @@
|
||||
"\n",
|
||||
"url = \"bolt://localhost:7687\"\n",
|
||||
"username = \"neo4j\"\n",
|
||||
"password = \"pleaseletmein\"\n",
|
||||
"password = \"password\"\n",
|
||||
"\n",
|
||||
"graph = Neo4jGraph(url=url, username=username, password=password)"
|
||||
]
|
||||
@@ -296,7 +295,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.9.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -164,10 +164,10 @@
|
||||
"text": [
|
||||
"Node properties:\n",
|
||||
"- **Movie**\n",
|
||||
" - `runtime: INTEGER` Min: 120, Max: 120\n",
|
||||
" - `name: STRING` Available options: ['Top Gun']\n",
|
||||
" - `runtime`: INTEGER Min: 120, Max: 120\n",
|
||||
" - `name`: STRING Available options: ['Top Gun']\n",
|
||||
"- **Actor**\n",
|
||||
" - `name: STRING` Available options: ['Tom Cruise', 'Val Kilmer', 'Anthony Edwards', 'Meg Ryan']\n",
|
||||
" - `name`: STRING Available options: ['Tom Cruise', 'Val Kilmer', 'Anthony Edwards', 'Meg Ryan']\n",
|
||||
"Relationship properties:\n",
|
||||
"\n",
|
||||
"The relationships:\n",
|
||||
@@ -225,7 +225,7 @@
|
||||
"WHERE m.name = 'Top Gun'\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Tom Cruise'}]\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Tom Cruise'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -234,7 +234,7 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': 'Anthony Edwards, Meg Ryan, Val Kilmer, Tom Cruise played in Top Gun.'}"
|
||||
" 'result': 'Tom Cruise, Val Kilmer, Anthony Edwards, and Meg Ryan played in Top Gun.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -286,7 +286,7 @@
|
||||
"WHERE m.name = 'Top Gun'\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Tom Cruise'}, {'a.name': 'Val Kilmer'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -295,7 +295,7 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': 'Anthony Edwards, Meg Ryan played in Top Gun.'}"
|
||||
" 'result': 'Tom Cruise, Val Kilmer played in Top Gun.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
@@ -346,11 +346,11 @@
|
||||
"WHERE m.name = 'Top Gun'\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Tom Cruise'}]\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Tom Cruise'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Intermediate steps: [{'query': \"MATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\\nWHERE m.name = 'Top Gun'\\nRETURN a.name\"}, {'context': [{'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Tom Cruise'}]}]\n",
|
||||
"Final answer: Anthony Edwards, Meg Ryan, Val Kilmer, Tom Cruise played in Top Gun.\n"
|
||||
"Intermediate steps: [{'query': \"MATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\\nWHERE m.name = 'Top Gun'\\nRETURN a.name\"}, {'context': [{'a.name': 'Tom Cruise'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}]}]\n",
|
||||
"Final answer: Tom Cruise, Val Kilmer, Anthony Edwards, and Meg Ryan played in Top Gun.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -406,10 +406,10 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': [{'a.name': 'Anthony Edwards'},\n",
|
||||
" {'a.name': 'Meg Ryan'},\n",
|
||||
" 'result': [{'a.name': 'Tom Cruise'},\n",
|
||||
" {'a.name': 'Val Kilmer'},\n",
|
||||
" {'a.name': 'Tom Cruise'}]}"
|
||||
" {'a.name': 'Anthony Edwards'},\n",
|
||||
" {'a.name': 'Meg Ryan'}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
@@ -482,7 +482,7 @@
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (:Movie {name:\"Top Gun\"})<-[:ACTED_IN]-()\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (m:Movie {name:\"Top Gun\"})<-[:ACTED_IN]-()\n",
|
||||
"RETURN count(*) AS numberOfActors\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'numberOfActors': 4}]\u001b[0m\n",
|
||||
@@ -494,7 +494,7 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'How many people played in Top Gun?',\n",
|
||||
" 'result': 'There were 4 actors who played in Top Gun.'}"
|
||||
" 'result': 'There were 4 actors in Top Gun.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
@@ -548,7 +548,7 @@
|
||||
"WHERE m.name = 'Top Gun'\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Tom Cruise'}]\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Tom Cruise'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -557,7 +557,7 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': 'Anthony Edwards, Meg Ryan, Val Kilmer, and Tom Cruise played in Top Gun.'}"
|
||||
" 'result': 'Tom Cruise, Val Kilmer, Anthony Edwards, and Meg Ryan played in Top Gun.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
@@ -661,7 +661,7 @@
|
||||
"WHERE m.name = 'Top Gun'\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Tom Cruise'}]\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Tom Cruise'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -670,7 +670,7 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': 'Anthony Edwards, Meg Ryan, Val Kilmer, Tom Cruise played in Top Gun.'}"
|
||||
" 'result': 'Tom Cruise, Val Kilmer, Anthony Edwards, and Meg Ryan played in Top Gun.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
@@ -683,12 +683,116 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3fa3f3d5-f7e7-4ca9-8f07-ca22b897f192",
|
||||
"cell_type": "markdown",
|
||||
"id": "81093062-eb7f-4d96-b1fd-c36b8f1b9474",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"## Provide context from database results as tool/function output\n",
|
||||
"\n",
|
||||
"You can use the `use_function_response` parameter to pass context from database results to an LLM as a tool/function output. This method improves the response accuracy and relevance of an answer as the LLM follows the provided context more closely.\n",
|
||||
"_You will need to use an LLM with native function calling support to use this feature_."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "2be8f51c-e80a-4a60-ab1c-266450fc17cd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\n",
|
||||
"WHERE m.name = 'Top Gun'\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Tom Cruise'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': 'The main actors in Top Gun are Tom Cruise, Val Kilmer, Anthony Edwards, and Meg Ryan.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo\"),\n",
|
||||
" graph=graph,\n",
|
||||
" verbose=True,\n",
|
||||
" use_function_response=True,\n",
|
||||
")\n",
|
||||
"chain.invoke({\"query\": \"Who played in Top Gun?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "48a75785-5bc9-49a7-a41b-88bf3ef9d312",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can provide custom system message when using the function response feature by providing `function_response_system` to instruct the model on how to generate answers.\n",
|
||||
"\n",
|
||||
"_Note that `qa_prompt` will have no effect when using `use_function_response`_"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "ddf0a61e-f104-4dbb-abbf-e65f3f57dd9a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\n",
|
||||
"WHERE m.name = 'Top Gun'\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'a.name': 'Tom Cruise'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': \"Arrr matey! In the film Top Gun, ye be seein' Tom Cruise, Val Kilmer, Anthony Edwards, and Meg Ryan sailin' the high seas of the sky! Aye, they be a fine crew of actors, they be!\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo\"),\n",
|
||||
" graph=graph,\n",
|
||||
" verbose=True,\n",
|
||||
" use_function_response=True,\n",
|
||||
" function_response_system=\"Respond as a pirate!\",\n",
|
||||
")\n",
|
||||
"chain.invoke({\"query\": \"Who played in Top Gun?\"})"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -724,6 +724,83 @@
|
||||
"llm(\"Tell me joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9b2b2777",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `MongoDB Atlas` Cache\n",
|
||||
"\n",
|
||||
"[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a fully-managed cloud database available in AWS, Azure, and GCP. It has native support for \n",
|
||||
"Vector Search on the MongoDB document data.\n",
|
||||
"Use [MongoDB Atlas Vector Search](/docs/integrations/providers/mongodb_atlas) to semantically cache prompts and responses."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ecdc2a0a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### `MongoDBCache`\n",
|
||||
"An abstraction to store a simple cache in MongoDB. This does not use Semantic Caching, nor does it require an index to be made on the collection before generation.\n",
|
||||
"\n",
|
||||
"To import this cache:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from langchain_mongodb.cache import MongoDBCache\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"To use this cache with your LLMs:\n",
|
||||
"```python\n",
|
||||
"from langchain_core.globals import set_llm_cache\n",
|
||||
"\n",
|
||||
"# use any embedding provider...\n",
|
||||
"from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n",
|
||||
"\n",
|
||||
"mongodb_atlas_uri = \"<YOUR_CONNECTION_STRING>\"\n",
|
||||
"COLLECTION_NAME=\"<YOUR_CACHE_COLLECTION_NAME>\"\n",
|
||||
"DATABASE_NAME=\"<YOUR_DATABASE_NAME>\"\n",
|
||||
"\n",
|
||||
"set_llm_cache(MongoDBCache(\n",
|
||||
" connection_string=mongodb_atlas_uri,\n",
|
||||
" collection_name=COLLECTION_NAME,\n",
|
||||
" database_name=DATABASE_NAME,\n",
|
||||
"))\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### `MongoDBAtlasSemanticCache`\n",
|
||||
"Semantic caching allows users to retrieve cached prompts based on semantic similarity between the user input and previously cached results. Under the hood it blends MongoDBAtlas as both a cache and a vectorstore.\n",
|
||||
"The MongoDBAtlasSemanticCache inherits from `MongoDBAtlasVectorSearch` and needs an Atlas Vector Search Index defined to work. Please look at the [usage example](/docs/integrations/vectorstores/mongodb_atlas) on how to set up the index.\n",
|
||||
"\n",
|
||||
"To import this cache:\n",
|
||||
"```python\n",
|
||||
"from langchain_mongodb.cache import MongoDBAtlasSemanticCache\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"To use this cache with your LLMs:\n",
|
||||
"```python\n",
|
||||
"from langchain_core.globals import set_llm_cache\n",
|
||||
"\n",
|
||||
"# use any embedding provider...\n",
|
||||
"from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n",
|
||||
"\n",
|
||||
"mongodb_atlas_uri = \"<YOUR_CONNECTION_STRING>\"\n",
|
||||
"COLLECTION_NAME=\"<YOUR_CACHE_COLLECTION_NAME>\"\n",
|
||||
"DATABASE_NAME=\"<YOUR_DATABASE_NAME>\"\n",
|
||||
"\n",
|
||||
"set_llm_cache(MongoDBAtlasSemanticCache(\n",
|
||||
" embedding=FakeEmbeddings(),\n",
|
||||
" connection_string=mongodb_atlas_uri,\n",
|
||||
" collection_name=COLLECTION_NAME,\n",
|
||||
" database_name=DATABASE_NAME,\n",
|
||||
"))\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"To find more resources about using MongoDBSemanticCache visit [here](https://www.mongodb.com/blog/post/introducing-semantic-caching-dedicated-mongodb-lang-chain-package-gen-ai-apps)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "726fe754",
|
||||
@@ -993,7 +1070,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CASSANDRA_KEYSPACE = demo_keyspace\n"
|
||||
@@ -1029,7 +1106,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ASTRA_DB_ID = 01234567-89ab-cdef-0123-456789abcdef\n",
|
||||
@@ -1633,14 +1710,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from elasticsearch import Elasticsearch\n",
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain_elasticsearch import ElasticsearchCache\n",
|
||||
"\n",
|
||||
"es_client = Elasticsearch(hosts=\"http://localhost:9200\")\n",
|
||||
"set_llm_cache(\n",
|
||||
" ElasticsearchCache(\n",
|
||||
" es_connection=es_client,\n",
|
||||
" es_url=\"http://localhost:9200\",\n",
|
||||
" index_name=\"llm-chat-cache\",\n",
|
||||
" metadata={\"project\": \"my_chatgpt_project\"},\n",
|
||||
" )\n",
|
||||
@@ -1684,7 +1759,6 @@
|
||||
"import json\n",
|
||||
"from typing import Any, Dict, List\n",
|
||||
"\n",
|
||||
"from elasticsearch import Elasticsearch\n",
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain_core.caches import RETURN_VAL_TYPE\n",
|
||||
"from langchain_elasticsearch import ElasticsearchCache\n",
|
||||
@@ -1715,9 +1789,10 @@
|
||||
" ]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"es_client = Elasticsearch(hosts=\"http://localhost:9200\")\n",
|
||||
"set_llm_cache(\n",
|
||||
" SearchableElasticsearchCache(es_connection=es_client, index_name=\"llm-chat-cache\")\n",
|
||||
" SearchableElasticsearchCache(\n",
|
||||
" es_url=\"http://localhost:9200\", index_name=\"llm-chat-cache\"\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -2071,6 +2146,71 @@
|
||||
"# so it uses the cached result!\n",
|
||||
"llm(\"Tell me one joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae1f5e1c-085e-4998-9f2d-b5867d2c3d5b",
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-05-31T17:18:43.345495Z",
|
||||
"iopub.status.busy": "2024-05-31T17:18:43.345015Z",
|
||||
"iopub.status.idle": "2024-05-31T17:18:43.351003Z",
|
||||
"shell.execute_reply": "2024-05-31T17:18:43.350073Z",
|
||||
"shell.execute_reply.started": "2024-05-31T17:18:43.345456Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Cache classes: summary table"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "65072e45-10bc-40f1-979b-2617656bbbce",
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-05-31T17:16:05.616430Z",
|
||||
"iopub.status.busy": "2024-05-31T17:16:05.616221Z",
|
||||
"iopub.status.idle": "2024-05-31T17:16:05.624164Z",
|
||||
"shell.execute_reply": "2024-05-31T17:16:05.623673Z",
|
||||
"shell.execute_reply.started": "2024-05-31T17:16:05.616418Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"**Cache** classes are implemented by inheriting the [BaseCache](https://api.python.langchain.com/en/latest/caches/langchain_core.caches.BaseCache.html) class.\n",
|
||||
"\n",
|
||||
"This table lists all 20 derived classes with links to the API Reference.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Namespace 🔻 | Class |\n",
|
||||
"|------------|---------|\n",
|
||||
"| langchain_astradb.cache | [AstraDBCache](https://api.python.langchain.com/en/latest/cache/langchain_astradb.cache.AstraDBCache.html) |\n",
|
||||
"| langchain_astradb.cache | [AstraDBSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_astradb.cache.AstraDBSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [AstraDBCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.AstraDBCache.html) |\n",
|
||||
"| langchain_community.cache | [AstraDBSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.AstraDBSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [AzureCosmosDBSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.AzureCosmosDBSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [CassandraCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.CassandraCache.html) |\n",
|
||||
"| langchain_community.cache | [CassandraSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.CassandraSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [GPTCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.GPTCache.html) |\n",
|
||||
"| langchain_community.cache | [InMemoryCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.InMemoryCache.html) |\n",
|
||||
"| langchain_community.cache | [MomentoCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.MomentoCache.html) |\n",
|
||||
"| langchain_community.cache | [OpenSearchSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.OpenSearchSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [RedisSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.RedisSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [SQLAlchemyCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SQLAlchemyCache.html) |\n",
|
||||
"| langchain_community.cache | [SQLAlchemyMd5Cache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SQLAlchemyMd5Cache.html) |\n",
|
||||
"| langchain_community.cache | [UpstashRedisCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.UpstashRedisCache.html) |\n",
|
||||
"| langchain_core.caches | [InMemoryCache](https://api.python.langchain.com/en/latest/caches/langchain_core.caches.InMemoryCache.html) |\n",
|
||||
"| langchain_elasticsearch.cache | [ElasticsearchCache](https://api.python.langchain.com/en/latest/cache/langchain_elasticsearch.cache.ElasticsearchCache.html) |\n",
|
||||
"| langchain_mongodb.cache | [MongoDBAtlasSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_mongodb.cache.MongoDBAtlasSemanticCache.html) |\n",
|
||||
"| langchain_mongodb.cache | [MongoDBCache](https://api.python.langchain.com/en/latest/cache/langchain_mongodb.cache.MongoDBCache.html) |\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "19067f14-c69a-4156-9504-af43a0713669",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -2089,7 +2229,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -12,6 +12,17 @@
|
||||
"This example goes over how to use LangChain to interact with Aleph Alpha models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "84483bd5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Installing the langchain package needed to use the integration\n",
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -9,6 +9,16 @@
|
||||
">[Machine Learning Platform for AI of Alibaba Cloud](https://www.alibabacloud.com/help/en/pai) is a machine learning or deep learning engineering platform intended for enterprises and developers. It provides easy-to-use, cost-effective, high-performance, and easy-to-scale plug-ins that can be applied to various industry scenarios. With over 140 built-in optimization algorithms, `Machine Learning Platform for AI` provides whole-process AI engineering capabilities including data labeling (`PAI-iTAG`), model building (`PAI-Designer` and `PAI-DSW`), model training (`PAI-DLC`), compilation optimization, and inference deployment (`PAI-EAS`). `PAI-EAS` supports different types of hardware resources, including CPUs and GPUs, and features high throughput and low latency. It allows you to deploy large-scale complex models with a few clicks and perform elastic scale-ins and scale-outs in real time. It also provides a comprehensive O&M and monitoring system."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##Installing the langchain packages needed to use the integration\n",
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
|
||||
@@ -16,6 +16,16 @@
|
||||
">`API Gateway` handles all the tasks involved in accepting and processing up to hundreds of thousands of concurrent API calls, including traffic management, CORS support, authorization >and access control, throttling, monitoring, and API version management. `API Gateway` has no minimum fees or startup costs. You pay for the API calls you receive and the amount of data >transferred out and, with the `API Gateway` tiered pricing model, you can reduce your cost as your API usage scales."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##Installing the langchain packages needed to use the integration\n",
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -3,10 +3,15 @@
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "602a52a4",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Anthropic\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
@@ -17,9 +22,13 @@
|
||||
"source": [
|
||||
"# AnthropicLLM\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `Anthropic` models.\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Anthropic legacy Claude 2 models as [text completion models](/docs/concepts/#llms). The latest and most popular Anthropic models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
"\n",
|
||||
"NOTE: AnthropicLLM only supports legacy Claude 2 models. To use the newest Claude 3 models, please use [`ChatAnthropic`](/docs/integrations/chat/anthropic) instead.\n",
|
||||
"You are probably looking for [this page instead](/docs/integrations/chat/anthropic/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `Anthropic` models.\n",
|
||||
"\n",
|
||||
"## Installation"
|
||||
]
|
||||
|
||||
@@ -12,6 +12,17 @@
|
||||
"This example goes over how to use LangChain to interact with [Anyscale Endpoint](https://app.endpoints.anyscale.com/). "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "134bd228",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##Installing the langchain packages needed to use the integration\n",
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -18,6 +18,17 @@
|
||||
"To use, you should have the `aphrodite-engine` python package installed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4dba1074",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##Installing the langchain packages needed to use the integration\n",
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -8,6 +8,16 @@
|
||||
"This notebook demonstrates how to use the `Arcee` class for generating text using Arcee's Domain Adapted Language Models (DALMs)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##Installing the langchain packages needed to use the integration\n",
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -11,6 +11,16 @@
|
||||
"This notebook goes over how to use an LLM hosted on an `Azure ML Online Endpoint`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##Installing the langchain packages needed to use the integration\n",
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -7,7 +7,13 @@
|
||||
"source": [
|
||||
"# Azure OpenAI\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use Langchain with [Azure OpenAI](https://aka.ms/azure-openai).\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Azure OpenAI [text completion models](/docs/concepts/#llms). The latest and most popular Azure OpenAI models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
"\n",
|
||||
"Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/azure_chat_openai/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"This page goes over how to use LangChain with [Azure OpenAI](https://aka.ms/azure-openai).\n",
|
||||
"\n",
|
||||
"The Azure OpenAI API is compatible with OpenAI's API. The `openai` Python package makes it easy to use both OpenAI and Azure OpenAI. You can call Azure OpenAI the same way you call OpenAI with the exceptions noted below.\n",
|
||||
"\n",
|
||||
|
||||
@@ -8,6 +8,16 @@
|
||||
"Baichuan Inc. (https://www.baichuan-ai.com/) is a Chinese startup in the era of AGI, dedicated to addressing fundamental human needs: Efficiency, Health, and Happiness."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##Installing the langchain packages needed to use the integration\n",
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -45,6 +45,16 @@
|
||||
"- AquilaChat-7B"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##Installing the langchain packages needed to use the integration\n",
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
|
||||
@@ -12,6 +12,16 @@
|
||||
"This example goes over how to use LangChain to interact with Banana models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##Installing the langchain packages needed to use the integration\n",
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -45,6 +45,16 @@
|
||||
"In this example, we'll work with Mistral 7B. [Deploy Mistral 7B here](https://app.baseten.co/explore/mistral_7b_instruct) and follow along with the deployed model's ID, found in the model dashboard."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"##Installing the langchain packages needed to use the integration\n",
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -11,6 +11,12 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Amazon Bedrock models as [text completion models](/docs/concepts/#llms). Many popular models available on Bedrock are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
"\n",
|
||||
"You may be looking for [this page instead](/docs/integrations/chat/bedrock/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
">[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that offers a choice of \n",
|
||||
"> high-performing foundation models (FMs) from leading AI companies like `AI21 Labs`, `Anthropic`, `Cohere`, \n",
|
||||
"> `Meta`, `Stability AI`, and `Amazon` via a single API, along with a broad set of capabilities you need to \n",
|
||||
|
||||
@@ -7,6 +7,12 @@
|
||||
"source": [
|
||||
"# Cohere\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Cohere models as [text completion models](/docs/concepts/#llms). Many popular Cohere models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
"\n",
|
||||
"You may be looking for [this page instead](/docs/integrations/chat/cohere/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
">[Cohere](https://cohere.ai/about) is a Canadian startup that provides natural language processing models that help companies improve human-machine interactions.\n",
|
||||
"\n",
|
||||
"Head to the [API reference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.cohere.Cohere.html) for detailed documentation of all attributes and methods."
|
||||
@@ -193,7 +199,7 @@
|
||||
"id": "39198f7d-6fc8-4662-954a-37ad38c4bec4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts#langchain-expression-language)"
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts#langchain-expression-language-lcel)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,6 +7,12 @@
|
||||
"source": [
|
||||
"# Fireworks\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Fireworks models as [text completion models](/docs/concepts/#llms). Many popular Fireworks models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
"\n",
|
||||
"You may be looking for [this page instead](/docs/integrations/chat/fireworks/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
">[Fireworks](https://app.fireworks.ai/) accelerates product development on generative AI by creating an innovative AI experiment and production platform. \n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `Fireworks` models."
|
||||
|
||||
@@ -25,6 +25,12 @@
|
||||
"id": "bead5ede-d9cc-44b9-b062-99c90a10cf40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Google models as [text completion models](/docs/concepts/#llms). Many popular Google models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
"\n",
|
||||
"You may be looking for [this page instead](/docs/integrations/chat/google_generative_ai/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"A guide on using [Google Generative AI](https://developers.generativeai.google/) models with Langchain. Note: It's separate from Google Cloud Vertex AI [integration](/docs/integrations/llms/google_vertex_ai_palm)."
|
||||
]
|
||||
},
|
||||
|
||||
@@ -15,6 +15,12 @@
|
||||
"source": [
|
||||
"# Google Cloud Vertex AI\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Google Vertex [text completion models](/docs/concepts/#llms). Many Google models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
"\n",
|
||||
"You may be looking for [this page instead](/docs/integrations/chat/google_vertex_ai_palm/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"**Note:** This is separate from the `Google Generative AI` integration, it exposes [Vertex AI Generative API](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview) on `Google Cloud`.\n",
|
||||
"\n",
|
||||
"VertexAI exposes all foundational models available in google cloud:\n",
|
||||
@@ -328,7 +334,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts#langchain-expression-language)"
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts#langchain-expression-language-lcel)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -121,6 +121,28 @@
|
||||
"print(chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4a31db5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To get response without prompt, you can bind `skip_prompt=True` with LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5e4aaad2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | hf.bind(skip_prompt=True)\n",
|
||||
"\n",
|
||||
"question = \"What is electroencephalography?\"\n",
|
||||
"\n",
|
||||
"print(chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dbbc3a37",
|
||||
|
||||
@@ -6,6 +6,12 @@
|
||||
"source": [
|
||||
"# Ollama\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Ollama models as [text completion models](/docs/concepts/#llms). Many popular Ollama models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
"\n",
|
||||
"You may be looking for [this page instead](/docs/integrations/chat/ollama/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally.\n",
|
||||
"\n",
|
||||
"Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. \n",
|
||||
|
||||
@@ -7,6 +7,12 @@
|
||||
"source": [
|
||||
"# OpenAI\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of OpenAI [text completion models](/docs/concepts/#llms). The latest and most popular OpenAI models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
"\n",
|
||||
"Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/openai/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"[OpenAI](https://platform.openai.com/docs/introduction) offers a spectrum of models with different levels of power suitable for different tasks.\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `OpenAI` [models](https://platform.openai.com/docs/models)"
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade-strategy eager \"optimum[openvino,nncf]\" --quiet"
|
||||
"%pip install --upgrade-strategy eager \"optimum[openvino,nncf]\" langchain-huggingface --quiet"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -130,6 +130,28 @@
|
||||
"print(chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "446a01e0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To get response without prompt, you can bind `skip_prompt=True` with LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e3baeab2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | ov_llm.bind(skip_prompt=True)\n",
|
||||
"\n",
|
||||
"question = \"What is electroencephalography?\"\n",
|
||||
"\n",
|
||||
"print(chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12524837-e9ab-455a-86be-66b95f4f893a",
|
||||
@@ -243,7 +265,8 @@
|
||||
" skip_prompt=True,\n",
|
||||
" skip_special_tokens=True,\n",
|
||||
")\n",
|
||||
"ov_llm.pipeline._forward_params = {\"streamer\": streamer, \"max_new_tokens\": 100}\n",
|
||||
"pipeline_kwargs = {\"pipeline_kwargs\": {\"streamer\": streamer, \"max_new_tokens\": 100}}\n",
|
||||
"chain = prompt | ov_llm.bind(**pipeline_kwargs)\n",
|
||||
"\n",
|
||||
"t1 = Thread(target=chain.invoke, args=({\"question\": question},))\n",
|
||||
"t1.start()\n",
|
||||
|
||||
@@ -7,6 +7,12 @@
|
||||
"source": [
|
||||
"# Together AI\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Together AI models as [text completion models](/docs/concepts/#llms). Many popular Together AI models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
"\n",
|
||||
"You may be looking for [this page instead](/docs/integrations/chat/together/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"[Together AI](https://www.together.ai/) offers an API to query [50+ leading open-source models](https://docs.together.ai/docs/inference-models) in a couple lines of code.\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with Together AI models."
|
||||
|
||||
245
docs/docs/integrations/memory/kafka_chat_message_history.ipynb
Normal file
245
docs/docs/integrations/memory/kafka_chat_message_history.ipynb
Normal file
@@ -0,0 +1,245 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c21deb80-9cf7-4185-8205-a38110152d2c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Kafka\n",
|
||||
"\n",
|
||||
"[Kafka](https://github.com/apache/kafka) is a distributed messaging system that is used to publish and subscribe to streams of records. \n",
|
||||
"This demo shows how to use `KafkaChatMessageHistory` to store and retrieve chat messages from a Kafka cluster."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c7c4fc02-18ac-4285-b8d6-507357e2aa13",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"A running Kafka cluster is required to run the demo. You can follow this [instruction](https://developer.confluent.io/get-started/python) to create a Kafka cluster locally."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f09f3b45-c4ff-4e59-bf79-238cc85d6465",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_message_histories import KafkaChatMessageHistory\n",
|
||||
"\n",
|
||||
"chat_session_id = \"chat-message-history-kafka\"\n",
|
||||
"bootstrap_servers = \"localhost:64797\" # host:port. `localhost:Plaintext Ports` if setup Kafka cluster locally\n",
|
||||
"history = KafkaChatMessageHistory(\n",
|
||||
" chat_session_id,\n",
|
||||
" bootstrap_servers,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "109812d2-85c5-4a65-a8a0-2d16eb80347b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Optional parameters to construct `KafkaChatMessageHistory`:\n",
|
||||
" - `ttl_ms`: Time to live in milliseconds for the chat messages.\n",
|
||||
" - `partition`: Number of partition of the topic to store the chat messages.\n",
|
||||
" - `replication_factor`: Replication factor of the topic to store the chat messages."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c8fba39f-650b-4192-94ea-1a2a89f5348d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`KafkaChatMessageHistory` internally uses Kafka consumer to read chat messages, and it has the ability to mark the consumed position persistently. It has following methods to retrieve chat messages:\n",
|
||||
"- `messages`: continue consuming chat messages from last one.\n",
|
||||
"- `messages_from_beginning`: reset the consumer to the beginning of the history and consume messages. Optional parameters:\n",
|
||||
" 1. `max_message_count`: maximum number of messages to read.\n",
|
||||
" 2. `max_time_sec`: maximum time in seconds to read messages.\n",
|
||||
"- `messages_from_latest`: reset the consumer to the end of the chat history and try consuming messages. Optional parameters same as above.\n",
|
||||
"- `messages_from_last_consumed`: return messages continuing from the last consumed message, similar to `messages`, but with optional parameters.\n",
|
||||
"\n",
|
||||
"`max_message_count` and `max_time_sec` are used to avoid blocking indefinitely when retrieving messages.\n",
|
||||
"As a result, `messages` and other methods to retrieve messages may not return all messages in the chat history. You will need to specify `max_message_count` and `max_time_sec` to retrieve all chat history in a single batch.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "caf2176b-db7a-451a-a292-3d9fde585ded",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Add messages and retrieve."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "7e52a70d-3921-4614-b8cd-53b8d3c2deb4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='hi!'), AIMessage(content='whats up?')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"history.add_user_message(\"hi!\")\n",
|
||||
"history.add_ai_message(\"whats up?\")\n",
|
||||
"\n",
|
||||
"history.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "874ce388-da8f-4796-b9ca-3ac114195b10",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Calling `messages` again returns an empty list because the consumer is at the end of the chat history."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "f863618e-7da1-4f46-9182-7a1387b93b16",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"history.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e108255b-c240-44f7-9ecc-52bf04cd15b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Add new messages and continue consuming."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "31aa7403-5392-4ad4-ba43-226020a274e3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='hi again!'), AIMessage(content='whats up again?')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"history.add_user_message(\"hi again!\")\n",
|
||||
"history.add_ai_message(\"whats up again?\")\n",
|
||||
"history.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5062fabc-c605-40dd-933b-c68de2727874",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To reset the consumer and read from beginning:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "005816ae-c8ed-4e41-9ecd-b6432578c8f1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='hi again!'),\n",
|
||||
" AIMessage(content='whats up again?'),\n",
|
||||
" HumanMessage(content='hi!'),\n",
|
||||
" AIMessage(content='whats up?')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"history.messages_from_beginning()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "42cc7bed-5cd7-417f-94fd-fe2e511cc9c6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Set the consumer to the end of the chat history, add a couple of new messages, and consume:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "d8b4f1cd-fa47-461b-b1b6-278ad54e9ac5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='HI!'), AIMessage(content='WHATS UP?')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"history.messages_from_latest()\n",
|
||||
"history.add_user_message(\"HI!\")\n",
|
||||
"history.add_ai_message(\"WHATS UP?\")\n",
|
||||
"history.messages"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -14,6 +14,18 @@ pip install -U langchain-anthropic
|
||||
You need to set the `ANTHROPIC_API_KEY` environment variable.
|
||||
You can get an Anthropic API key [here](https://console.anthropic.com/settings/keys)
|
||||
|
||||
## Chat Models
|
||||
|
||||
### ChatAnthropic
|
||||
|
||||
See a [usage example](/docs/integrations/chat/anthropic).
|
||||
|
||||
```python
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
model = ChatAnthropic(model='claude-3-opus-20240229')
|
||||
```
|
||||
|
||||
## LLMs
|
||||
|
||||
### [Legacy] AnthropicLLM
|
||||
@@ -28,17 +40,3 @@ from langchain_anthropic import AnthropicLLM
|
||||
|
||||
model = AnthropicLLM(model='claude-2.1')
|
||||
```
|
||||
|
||||
## Chat Models
|
||||
|
||||
### ChatAnthropic
|
||||
|
||||
See a [usage example](/docs/integrations/chat/anthropic).
|
||||
|
||||
```python
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
model = ChatAnthropic(model='claude-3-opus-20240229')
|
||||
```
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user