mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-06 09:10:27 +00:00
Compare commits
281 Commits
cc/bind_to
...
eugene/rat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
de339acbc1 | ||
|
|
c929c0e38d | ||
|
|
2ec1b96527 | ||
|
|
0fd685fb7d | ||
|
|
be89bf47e8 | ||
|
|
b3419a3018 | ||
|
|
98cbfb9643 | ||
|
|
5776402f92 | ||
|
|
f5e36246e3 | ||
|
|
7dd6b32991 | ||
|
|
4b1b7959a2 | ||
|
|
afee851645 | ||
|
|
a73e2222d4 | ||
|
|
e160b669c8 | ||
|
|
d59c656ea5 | ||
|
|
2394807033 | ||
|
|
acfce30017 | ||
|
|
8f3c052db1 | ||
|
|
29a3b3a711 | ||
|
|
20fe4deea0 | ||
|
|
3a55f4bfe9 | ||
|
|
fea9ff3831 | ||
|
|
b55f6105c6 | ||
|
|
4585eaef1b | ||
|
|
f337f3ed36 | ||
|
|
22175738ac | ||
|
|
12c3454fd9 | ||
|
|
e271965d1e | ||
|
|
b9bea36dd4 | ||
|
|
da06d4d7af | ||
|
|
5f73c836a6 | ||
|
|
597be7d501 | ||
|
|
379803751e | ||
|
|
ad18afc3ec | ||
|
|
464a525a5a | ||
|
|
0f45ac4088 | ||
|
|
ac41c97d21 | ||
|
|
aaf788b7cb | ||
|
|
47ae06698f | ||
|
|
03881c6743 | ||
|
|
2d6b0bf3e3 | ||
|
|
ee3955c68c | ||
|
|
325068bb53 | ||
|
|
bff6ca78a2 | ||
|
|
6878bc39b5 | ||
|
|
55e66aa40c | ||
|
|
9b7db08184 | ||
|
|
8691a5a37f | ||
|
|
4919d5d6df | ||
|
|
918e1c8a93 | ||
|
|
58def6e34d | ||
|
|
e787532479 | ||
|
|
e80b0932ee | ||
|
|
9e06991aae | ||
|
|
a14e02ab33 | ||
|
|
378db2e1a5 | ||
|
|
a197a8e184 | ||
|
|
0bb54ab9f0 | ||
|
|
f47b4edcc2 | ||
|
|
837a3d400b | ||
|
|
20b72a044c | ||
|
|
70c71efcab | ||
|
|
a5a3d28776 | ||
|
|
2a70a07aad | ||
|
|
5ac936a284 | ||
|
|
3c4652c906 | ||
|
|
2c6b9e8771 | ||
|
|
1639ccfd15 | ||
|
|
ab036c1a4c | ||
|
|
3dce2e1d35 | ||
|
|
c48e99e7f2 | ||
|
|
8a140ee77c | ||
|
|
df357f82ca | ||
|
|
236e957abb | ||
|
|
199e64d372 | ||
|
|
1f01c0fd98 | ||
|
|
884f76e05a | ||
|
|
a45337ea07 | ||
|
|
1318d534af | ||
|
|
10e3982b59 | ||
|
|
721f709dec | ||
|
|
02f0a29293 | ||
|
|
dcba7df2fe | ||
|
|
0f7569ddbc | ||
|
|
5ade0187d0 | ||
|
|
0f6737cbfe | ||
|
|
7ab82eb8cc | ||
|
|
37b89fb7fc | ||
|
|
40c02cedaf | ||
|
|
cecd875cdc | ||
|
|
0c6a3fdd6b | ||
|
|
d98b830e4b | ||
|
|
6b08a33fa4 | ||
|
|
947628311b | ||
|
|
c1d1fc13c2 | ||
|
|
74e3d796f1 | ||
|
|
7b28359719 | ||
|
|
5e48f35fba | ||
|
|
838464de25 | ||
|
|
f4ee3c8a22 | ||
|
|
50cb0a03bc | ||
|
|
842065a9cc | ||
|
|
27ad6a4bb3 | ||
|
|
dda9438e87 | ||
|
|
604dfe2d99 | ||
|
|
f101c759ed | ||
|
|
372c27f2e5 | ||
|
|
6a45bf9554 | ||
|
|
f5856680fe | ||
|
|
07715f815b | ||
|
|
020cc1cf3e | ||
|
|
9aae8ef416 | ||
|
|
06f47678ae | ||
|
|
9c3da11910 | ||
|
|
5affbada61 | ||
|
|
f9d64d22e5 | ||
|
|
3691701d58 | ||
|
|
ef049769f0 | ||
|
|
cd19ba9a07 | ||
|
|
83f3d95ffa | ||
|
|
b5acb91080 | ||
|
|
f99369a54c | ||
|
|
242b085be7 | ||
|
|
c3308f31bc | ||
|
|
c50dd79512 | ||
|
|
aade9bfde5 | ||
|
|
0ee6ed76ca | ||
|
|
62b6965d2a | ||
|
|
ef22ebe431 | ||
|
|
f62b323108 | ||
|
|
b2bc15e640 | ||
|
|
61ea7bf60b | ||
|
|
4c651ba13a | ||
|
|
334fc1ed1c | ||
|
|
ba74341eee | ||
|
|
3adf710f1d | ||
|
|
07c5c60f63 | ||
|
|
aade1550c6 | ||
|
|
63c60a31f0 | ||
|
|
242de9aa5e | ||
|
|
916b813107 | ||
|
|
1c65529fd7 | ||
|
|
6182a402f1 | ||
|
|
0dec72cab0 | ||
|
|
570566b858 | ||
|
|
f9baaae3ec | ||
|
|
4da1df568a | ||
|
|
96ccba9c27 | ||
|
|
a4c101ae97 | ||
|
|
c5a07e2dd8 | ||
|
|
80f3d48195 | ||
|
|
7d83189b19 | ||
|
|
eb26b5535a | ||
|
|
96bac8e20d | ||
|
|
034a8c7c1b | ||
|
|
a402de3dae | ||
|
|
a47f69a120 | ||
|
|
cc2cbfabfc | ||
|
|
9e4a0e76f6 | ||
|
|
81639243e2 | ||
|
|
61976a4147 | ||
|
|
b5360e2e5f | ||
|
|
4cf67084d3 | ||
|
|
bcb5f354ad | ||
|
|
24e9b48d15 | ||
|
|
cf28708e7b | ||
|
|
2a3288b15d | ||
|
|
1792684e8f | ||
|
|
e60ad12521 | ||
|
|
fc41730e28 | ||
|
|
47ed7f766a | ||
|
|
80e7cd6cff | ||
|
|
6c3e65a878 | ||
|
|
616196c620 | ||
|
|
dd7938ace8 | ||
|
|
ef07308c30 | ||
|
|
049bc37111 | ||
|
|
5ccf8ebfac | ||
|
|
1e9cc02ed8 | ||
|
|
dc42279eb5 | ||
|
|
e38bf08139 | ||
|
|
5caa381177 | ||
|
|
c59e663365 | ||
|
|
0c1889c713 | ||
|
|
5fcf2ef7ca | ||
|
|
77dd327282 | ||
|
|
f5a38772a8 | ||
|
|
5f2dea2b20 | ||
|
|
69b1603173 | ||
|
|
d83164f837 | ||
|
|
198b85334f | ||
|
|
7aeaa1974d | ||
|
|
1c753d1e81 | ||
|
|
6716379f0c | ||
|
|
58fdb070fa | ||
|
|
1d7a3ae7ce | ||
|
|
d2f671271e | ||
|
|
a3c10fc6ce | ||
|
|
5171ffc026 | ||
|
|
6c7d9f93b9 | ||
|
|
8f4620f4b8 | ||
|
|
9d97de34ae | ||
|
|
56cca23745 | ||
|
|
66bebeb76a | ||
|
|
b0aa915dea | ||
|
|
d93ae756e6 | ||
|
|
1244e66bd4 | ||
|
|
a001037319 | ||
|
|
20151384d7 | ||
|
|
d895614d19 | ||
|
|
9d0c1d2dc9 | ||
|
|
a7296bddc2 | ||
|
|
c9473367b1 | ||
|
|
f77659463a | ||
|
|
ccdaf14eff | ||
|
|
cacdf96f9c | ||
|
|
36ee083753 | ||
|
|
e8a21146d3 | ||
|
|
a0958c0607 | ||
|
|
620b118c70 | ||
|
|
888fbc07b5 | ||
|
|
ab2d7821a7 | ||
|
|
6fc7610b1c | ||
|
|
0da5078cad | ||
|
|
d0728b0ba0 | ||
|
|
9224027e45 | ||
|
|
5c3e2612da | ||
|
|
65321bf975 | ||
|
|
2b7d1cdd2f | ||
|
|
a653b209ba | ||
|
|
f071581aea | ||
|
|
f0a7581b50 | ||
|
|
474b88326f | ||
|
|
bdc03997c9 | ||
|
|
3f1cf00d97 | ||
|
|
6b47c7361e | ||
|
|
7677ceea60 | ||
|
|
aee55eda39 | ||
|
|
d09dda5a08 | ||
|
|
12950cc602 | ||
|
|
e8ee781a42 | ||
|
|
02e71cebed | ||
|
|
259d4d2029 | ||
|
|
3aed74a6fc | ||
|
|
13b0d7ec8f | ||
|
|
71cd6e6feb | ||
|
|
99054e19eb | ||
|
|
7a1321e2f9 | ||
|
|
cb5031f22f | ||
|
|
f1618ec540 | ||
|
|
8d82a0d483 | ||
|
|
0a1e475a30 | ||
|
|
6166ea67a8 | ||
|
|
d77d9bfc00 | ||
|
|
aa3e3cfa40 | ||
|
|
14ba1d4b45 | ||
|
|
18da9f5e59 | ||
|
|
d3a2b9fae0 | ||
|
|
7014d07cab | ||
|
|
35784d1c33 | ||
|
|
8858846607 | ||
|
|
ffe6ca986e | ||
|
|
7790d67f94 | ||
|
|
1132fb801b | ||
|
|
1d37aa8403 | ||
|
|
cb95198398 | ||
|
|
d002fa902f | ||
|
|
8d100c58de | ||
|
|
5fd1e67808 | ||
|
|
eeb996034b | ||
|
|
03fba07d15 | ||
|
|
c481a2715d | ||
|
|
8ee8ca7c83 | ||
|
|
4121d4151f | ||
|
|
bd18faa2a0 | ||
|
|
f1f1f75782 | ||
|
|
4ba14adec6 | ||
|
|
457677c1b7 | ||
|
|
8327925ab7 | ||
|
|
122e80e04d | ||
|
|
c4417ea93c |
@@ -5,10 +5,10 @@ services:
|
||||
dockerfile: libs/langchain/dev.Dockerfile
|
||||
context: ..
|
||||
volumes:
|
||||
# Update this to wherever you want VS Code to mount the folder of your project
|
||||
# Update this to wherever you want VS Code to mount the folder of your project
|
||||
- ..:/workspaces/langchain:cached
|
||||
networks:
|
||||
- langchain-network
|
||||
- langchain-network
|
||||
# environment:
|
||||
# MONGO_ROOT_USERNAME: root
|
||||
# MONGO_ROOT_PASSWORD: example123
|
||||
@@ -28,5 +28,3 @@ services:
|
||||
networks:
|
||||
langchain-network:
|
||||
driver: bridge
|
||||
|
||||
|
||||
|
||||
34
.github/scripts/check_diff.py
vendored
34
.github/scripts/check_diff.py
vendored
@@ -6,6 +6,7 @@ import sys
|
||||
import tomllib
|
||||
from collections import defaultdict
|
||||
from typing import Dict, List, Set
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
LANGCHAIN_DIRS = [
|
||||
@@ -26,17 +27,48 @@ def all_package_dirs() -> Set[str]:
|
||||
|
||||
|
||||
def dependents_graph() -> dict:
|
||||
"""
|
||||
Construct a mapping of package -> dependents, such that we can
|
||||
run tests on all dependents of a package when a change is made.
|
||||
"""
|
||||
dependents = defaultdict(set)
|
||||
|
||||
for path in glob.glob("./libs/**/pyproject.toml", recursive=True):
|
||||
if "template" in path:
|
||||
continue
|
||||
|
||||
# load regular and test deps from pyproject.toml
|
||||
with open(path, "rb") as f:
|
||||
pyproject = tomllib.load(f)["tool"]["poetry"]
|
||||
|
||||
pkg_dir = "libs" + "/".join(path.split("libs")[1].split("/")[:-1])
|
||||
for dep in pyproject["dependencies"]:
|
||||
for dep in [
|
||||
*pyproject["dependencies"].keys(),
|
||||
*pyproject["group"]["test"]["dependencies"].keys(),
|
||||
]:
|
||||
if "langchain" in dep:
|
||||
dependents[dep].add(pkg_dir)
|
||||
continue
|
||||
|
||||
# load extended deps from extended_testing_deps.txt
|
||||
package_path = Path(path).parent
|
||||
extended_requirement_path = package_path / "extended_testing_deps.txt"
|
||||
if extended_requirement_path.exists():
|
||||
with open(extended_requirement_path, "r") as f:
|
||||
extended_deps = f.read().splitlines()
|
||||
for depline in extended_deps:
|
||||
if depline.startswith("-e "):
|
||||
# editable dependency
|
||||
assert depline.startswith(
|
||||
"-e ../partners/"
|
||||
), "Extended test deps should only editable install partner packages"
|
||||
partner = depline.split("partners/")[1]
|
||||
dep = f"langchain-{partner}"
|
||||
else:
|
||||
dep = depline.split("==")[0]
|
||||
|
||||
if "langchain" in dep:
|
||||
dependents[dep].add(pkg_dir)
|
||||
return dependents
|
||||
|
||||
|
||||
|
||||
35
.github/scripts/check_prerelease_dependencies.py
vendored
Normal file
35
.github/scripts/check_prerelease_dependencies.py
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
import sys
|
||||
import tomllib
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
|
||||
# read toml file
|
||||
with open(toml_file, "rb") as file:
|
||||
toml_data = tomllib.load(file)
|
||||
|
||||
# see if we're releasing an rc
|
||||
version = toml_data["tool"]["poetry"]["version"]
|
||||
releasing_rc = "rc" in version
|
||||
|
||||
# if not, iterate through dependencies and make sure none allow prereleases
|
||||
if not releasing_rc:
|
||||
dependencies = toml_data["tool"]["poetry"]["dependencies"]
|
||||
for lib in dependencies:
|
||||
dep_version = dependencies[lib]
|
||||
dep_version_string = (
|
||||
dep_version["version"] if isinstance(dep_version, dict) else dep_version
|
||||
)
|
||||
|
||||
if "rc" in dep_version_string:
|
||||
raise ValueError(
|
||||
f"Dependency {lib} has a prerelease version. Please remove this."
|
||||
)
|
||||
|
||||
if isinstance(dep_version, dict) and dep_version.get(
|
||||
"allow-prereleases", False
|
||||
):
|
||||
raise ValueError(
|
||||
f"Dependency {lib} has allow-prereleases set to true. Please remove this."
|
||||
)
|
||||
20
.github/scripts/get_min_versions.py
vendored
20
.github/scripts/get_min_versions.py
vendored
@@ -1,6 +1,11 @@
|
||||
import sys
|
||||
|
||||
import tomllib
|
||||
if sys.version_info >= (3, 11):
|
||||
import tomllib
|
||||
else:
|
||||
# for python 3.10 and below, which doesnt have stdlib tomllib
|
||||
import tomli as tomllib
|
||||
|
||||
from packaging.version import parse as parse_version
|
||||
import re
|
||||
|
||||
@@ -9,8 +14,11 @@ MIN_VERSION_LIBS = [
|
||||
"langchain-community",
|
||||
"langchain",
|
||||
"langchain-text-splitters",
|
||||
"SQLAlchemy",
|
||||
]
|
||||
|
||||
SKIP_IF_PULL_REQUEST = ["langchain-core"]
|
||||
|
||||
|
||||
def get_min_version(version: str) -> str:
|
||||
# base regex for x.x.x with cases for rc/post/etc
|
||||
@@ -37,7 +45,7 @@ def get_min_version(version: str) -> str:
|
||||
raise ValueError(f"Unrecognized version format: {version}")
|
||||
|
||||
|
||||
def get_min_version_from_toml(toml_path: str):
|
||||
def get_min_version_from_toml(toml_path: str, versions_for: str):
|
||||
# Parse the TOML file
|
||||
with open(toml_path, "rb") as file:
|
||||
toml_data = tomllib.load(file)
|
||||
@@ -50,6 +58,10 @@ def get_min_version_from_toml(toml_path: str):
|
||||
|
||||
# Iterate over the libs in MIN_VERSION_LIBS
|
||||
for lib in MIN_VERSION_LIBS:
|
||||
if versions_for == "pull_request" and lib in SKIP_IF_PULL_REQUEST:
|
||||
# some libs only get checked on release because of simultaneous
|
||||
# changes
|
||||
continue
|
||||
# Check if the lib is present in the dependencies
|
||||
if lib in dependencies:
|
||||
# Get the version string
|
||||
@@ -70,8 +82,10 @@ def get_min_version_from_toml(toml_path: str):
|
||||
if __name__ == "__main__":
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
versions_for = sys.argv[2]
|
||||
assert versions_for in ["release", "pull_request"]
|
||||
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file)
|
||||
min_versions = get_min_version_from_toml(toml_file, versions_for)
|
||||
|
||||
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))
|
||||
|
||||
@@ -21,14 +21,6 @@ jobs:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "poetry run pytest -m compile tests/integration_tests #${{ inputs.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
10
.github/workflows/_release.yml
vendored
10
.github/workflows/_release.yml
vendored
@@ -189,7 +189,7 @@ jobs:
|
||||
--extra-index-url https://test.pypi.org/simple/ \
|
||||
"$PKG_NAME==$VERSION" || \
|
||||
( \
|
||||
sleep 5 && \
|
||||
sleep 15 && \
|
||||
poetry run pip install \
|
||||
--extra-index-url https://test.pypi.org/simple/ \
|
||||
"$PKG_NAME==$VERSION" \
|
||||
@@ -221,12 +221,17 @@ jobs:
|
||||
run: make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Check for prerelease versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry run python $GITHUB_WORKSPACE/.github/scripts/check_prerelease_dependencies.py pyproject.toml
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml)"
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
@@ -285,6 +290,7 @@ jobs:
|
||||
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
|
||||
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
UNSTRUCTURED_API_KEY: ${{ secrets.UNSTRUCTURED_API_KEY }}
|
||||
run: make integration_tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
|
||||
19
.github/workflows/_test.yml
vendored
19
.github/workflows/_test.yml
vendored
@@ -65,3 +65,22 @@ jobs:
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging tomli
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
# Temporarily disabled until we can get the minimum versions working
|
||||
# - name: Run unit tests with minimum dependency versions
|
||||
# if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
# env:
|
||||
# MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
# run: |
|
||||
# poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
|
||||
# make tests
|
||||
# working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
4
.github/workflows/_test_doc_imports.yml
vendored
4
.github/workflows/_test_doc_imports.yml
vendored
@@ -14,10 +14,6 @@ env:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.12"
|
||||
name: "check doc imports #${{ inputs.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -64,7 +64,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -355,7 +355,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U --quiet langchain langchain_community openai chromadb langchain-experimental\n",
|
||||
"%pip install -U --quiet langchain langchain-chroma langchain-community openai langchain-experimental\n",
|
||||
"%pip install --quiet \"unstructured[all-docs]\" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken"
|
||||
]
|
||||
},
|
||||
@@ -344,8 +344,8 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.embeddings import VertexAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub chromadb langchain-anthropic"
|
||||
"pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub langchain-chroma langchain-anthropic"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -645,7 +645,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"\n",
|
||||
"# Initialize all_texts with leaf_texts\n",
|
||||
"all_texts = leaf_texts.copy()\n",
|
||||
|
||||
@@ -57,4 +57,6 @@ Notebook | Description
|
||||
[two_agent_debate_tools.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_agent_debate_tools.ipynb) | Simulate multi-agent dialogues where the agents can utilize various tools.
|
||||
[two_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_player_dnd.ipynb) | Simulate a two-player dungeons & dragons game, where a dialogue simulator class is used to coordinate the dialogue between the protagonist and the dungeon master.
|
||||
[wikibase_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/wikibase_agent.ipynb) | Create a simple wikibase agent that utilizes sparql generation, with testing done on http://wikidata.org.
|
||||
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.
|
||||
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.
|
||||
[rag-locally-on-intel-cpu.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag-locally-on-intel-cpu.ipynb) | Perform Retrieval-Augmented-Generation (RAG) on locally downloaded open-source models using langchain and open source tools and execute it on Intel Xeon CPU. We showed an example of how to apply RAG on Llama 2 model and enable it to answer the queries related to Intel Q1 2024 earnings release.
|
||||
[visual_RAG_vdms.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/visual_RAG_vdms.ipynb) | Performs Visual Retrieval-Augmented-Generation (RAG) using videos and scene descriptions generated by open source models.
|
||||
@@ -39,7 +39,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain unstructured[all-docs] pydantic lxml langchainhub"
|
||||
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml langchainhub"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -320,7 +320,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain unstructured[all-docs] pydantic lxml"
|
||||
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -375,7 +375,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain unstructured[all-docs] pydantic lxml"
|
||||
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -378,8 +378,8 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.embeddings import GPT4AllEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain openai langchain_chroma langchain-experimental # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -132,7 +132,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"baseline = Chroma.from_texts(\n",
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-airbyte"
|
||||
"%pip install -qU langchain-airbyte langchain_chroma"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -123,7 +123,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tiktoken\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"enc = tiktoken.get_encoding(\"cl100k_base\")\n",
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub chromadb hnswlib --upgrade --quiet"
|
||||
"! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub langchain-chroma hnswlib --upgrade --quiet"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -547,7 +547,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --quiet pypdf chromadb tiktoken openai \n",
|
||||
"%pip install --quiet pypdf langchain-chroma tiktoken openai \n",
|
||||
"%pip uninstall -y langchain-fireworks\n",
|
||||
"%pip install --editable /mnt/disks/data/langchain/libs/partners/fireworks"
|
||||
]
|
||||
@@ -138,7 +138,7 @@
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"# Add to vectorDB\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_fireworks.embeddings import FireworksEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = Chroma.from_documents(\n",
|
||||
|
||||
@@ -170,7 +170,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"with open(\"../../state_of_the_union.txt\") as f:\n",
|
||||
|
||||
603
cookbook/img-to_img-search_CLIP_ChromaDB.ipynb
Normal file
603
cookbook/img-to_img-search_CLIP_ChromaDB.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph"
|
||||
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -30,8 +30,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph tavily-python"
|
||||
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph tavily-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -77,8 +77,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
@@ -180,8 +180,8 @@
|
||||
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.messages import BaseMessage, FunctionMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph"
|
||||
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -86,8 +86,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
@@ -188,7 +188,7 @@
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.messages import BaseMessage, FunctionMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -187,7 +187,7 @@
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"import numpy as np\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_experimental.open_clip import OpenCLIPEmbeddings\n",
|
||||
"from PIL import Image as _PILImage\n",
|
||||
"\n",
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain"
|
||||
"! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -167,7 +167,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -194,7 +194,7 @@
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"import numpy as np\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
"from PIL import Image as _PILImage\n",
|
||||
"\n",
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter"
|
||||
]
|
||||
|
||||
@@ -80,7 +80,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
|
||||
756
cookbook/rag-locally-on-intel-cpu.ipynb
Normal file
756
cookbook/rag-locally-on-intel-cpu.ipynb
Normal file
@@ -0,0 +1,756 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "10f50955-be55-422f-8c62-3a32f8cf02ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RAG application running locally on Intel Xeon CPU using langchain and open-source models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "48113be6-44bb-4aac-aed3-76a1365b9561",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Author - Pratool Bharti (pratool.bharti@intel.com)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8b10b54b-1572-4ea1-9c1e-1d29fcc3dcd9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this cookbook, we use langchain tools and open source models to execute locally on CPU. This notebook has been validated to run on Intel Xeon 8480+ CPU. Here we implement a RAG pipeline for Llama2 model to answer questions about Intel Q1 2024 earnings release."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "acadbcec-3468-4926-8ce5-03b678041c0a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Create a conda or virtualenv environment with python >=3.10 and install following libraries**\n",
|
||||
"<br>\n",
|
||||
"\n",
|
||||
"`pip install --upgrade langchain langchain-community langchainhub langchain-chroma bs4 gpt4all pypdf pysqlite3-binary` <br>\n",
|
||||
"`pip install llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84c392c8-700a-42ec-8e94-806597f22e43",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Load pysqlite3 in sys modules since ChromaDB requires sqlite3.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "145cd491-b388-4ea7-bdc8-2f4995cac6fd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"__import__(\"pysqlite3\")\n",
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"sys.modules[\"sqlite3\"] = sys.modules.pop(\"pysqlite3\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14dde7e2-b236-49b9-b3a0-08c06410418c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Import essential components from langchain to load and split data**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "887643ba-249e-48d6-9aa7-d25087e8dfbf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_community.document_loaders import PyPDFLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "922c0eba-8736-4de5-bd2f-3d0f00b16e43",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Download Intel Q1 2024 earnings release**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "2d6a2419-5338-4188-8615-a40a65ff8019",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"--2024-07-15 15:04:43-- https://d1io3yog0oux5.cloudfront.net/_11d435a500963f99155ee058df09f574/intel/db/887/9014/earnings_release/Q1+24_EarningsRelease_FINAL.pdf\n",
|
||||
"Resolving proxy-dmz.intel.com (proxy-dmz.intel.com)... 10.7.211.16\n",
|
||||
"Connecting to proxy-dmz.intel.com (proxy-dmz.intel.com)|10.7.211.16|:912... connected.\n",
|
||||
"Proxy request sent, awaiting response... 200 OK\n",
|
||||
"Length: 133510 (130K) [application/pdf]\n",
|
||||
"Saving to: ‘intel_q1_2024_earnings.pdf’\n",
|
||||
"\n",
|
||||
"intel_q1_2024_earni 100%[===================>] 130.38K --.-KB/s in 0.005s \n",
|
||||
"\n",
|
||||
"2024-07-15 15:04:44 (24.6 MB/s) - ‘intel_q1_2024_earnings.pdf’ saved [133510/133510]\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!wget 'https://d1io3yog0oux5.cloudfront.net/_11d435a500963f99155ee058df09f574/intel/db/887/9014/earnings_release/Q1+24_EarningsRelease_FINAL.pdf' -O intel_q1_2024_earnings.pdf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e3612627-e105-453d-8a50-bbd6e39dedb5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Loading earning release pdf document through PyPDFLoader**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "cac6278e-ebad-4224-a062-bf6daca24cb0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = PyPDFLoader(\"intel_q1_2024_earnings.pdf\")\n",
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a7dca43b-1c62-41df-90c7-6ed2904f823d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Splitting entire document in several chunks with each chunk size is 500 tokens**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "4486adbe-0d0e-4685-8c08-c1774ed6e993",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "af142346-e793-4a52-9a56-63e3be416b3d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Looking at the first split of the document**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e4240fd1-898e-4bfc-a377-02c9bc25b56e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': 'intel_q1_2024_earnings.pdf', 'page': 0}, page_content='Intel Corporation\\n2200 Mission College Blvd.\\nSanta Clara, CA 95054-1549\\n \\nNews Release\\n Intel Reports First -Quarter 2024 Financial Results\\nNEWS SUMMARY\\n▪First-quarter revenue of $12.7 billion , up 9% year over year (YoY).\\n▪First-quarter GAAP earnings (loss) per share (EPS) attributable to Intel was $(0.09) ; non-GAAP EPS \\nattributable to Intel was $0.18 .')"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"all_splits[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b88d2632-7c1b-49ef-a691-c0eb67d23e6a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**One of the major step in RAG is to convert each split of document into embeddings and store in a vector database such that searching relevant documents are efficient.** <br>\n",
|
||||
"**For that, importing Chroma vector database from langchain. Also, importing open source GPT4All for embedding models**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "9ff99dd7-9d47-4239-ba0a-d775792334ba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.embeddings import GPT4AllEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b5d1f4dd-dd8d-4a20-95d1-2dbdd204375a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**In next step, we will download one of the most popular embedding model \"all-MiniLM-L6-v2\". Find more details of the model at this link https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "05db3494-5d8e-4a13-9941-26330a86f5e5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_name = \"all-MiniLM-L6-v2.gguf2.f16.gguf\"\n",
|
||||
"gpt4all_kwargs = {\"allow_download\": \"True\"}\n",
|
||||
"embeddings = GPT4AllEmbeddings(model_name=model_name, gpt4all_kwargs=gpt4all_kwargs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4e53999e-1983-46ac-8039-2783e194c3ae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Store all the embeddings in the Chroma database**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "0922951a-9ddf-4761-973d-8e9a86f61284",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Chroma.from_documents(documents=all_splits, embedding=embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "29f94fa0-6c75-4a65-a1a3-debc75422479",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Now, let's find relevant splits from the documents related to the question**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "88c8152d-ec7a-4f0b-9d86-877789407537",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"4\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question = \"What is Intel CCG revenue in Q1 2024\"\n",
|
||||
"docs = vectorstore.similarity_search(question)\n",
|
||||
"print(len(docs))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "53330c6b-cb0f-43f9-b379-2e57ac1e5335",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Look at the first retrieved document from the vector database**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "43a6d94f-b5c4-47b0-a353-2db4c3d24d9c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'page': 1, 'source': 'intel_q1_2024_earnings.pdf'}, page_content='Client Computing Group (CCG) $7.5 billion up31%\\nData Center and AI (DCAI) $3.0 billion up5%\\nNetwork and Edge (NEX) $1.4 billion down 8%\\nTotal Intel Products revenue $11.9 billion up17%\\nIntel Foundry $4.4 billion down 10%\\nAll other:\\nAltera $342 million down 58%\\nMobileye $239 million down 48%\\nOther $194 million up17%\\nTotal all other revenue $775 million down 46%\\nIntersegment eliminations $(4.4) billion\\nTotal net revenue $12.7 billion up9%\\nIntel Products Highlights')"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "64ba074f-4b36-442e-b7e2-b26d6e2815c3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Download Lllama-2 model from Huggingface and store locally** <br>\n",
|
||||
"**You can download different quantization variant of Lllama-2 model from the link below. We are using Q8 version here (7.16GB).** <br>\n",
|
||||
"https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c8dd0811-6f43-4bc6-b854-2ab377639c9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!huggingface-cli download TheBloke/Llama-2-7b-Chat-GGUF llama-2-7b-chat.Q8_0.gguf --local-dir . --local-dir-use-symlinks False"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3895b1f5-f51d-4539-abf0-af33d7ca48ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Import langchain components required to load downloaded LLMs model**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "fb087088-aa62-44c0-8356-061e9b9f1186",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_community.llms import LlamaCpp"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5a8a111e-2614-4b70-b034-85cd3e7304cb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Loading the local Lllama-2 model using Llama-cpp library**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "fb917da2-c0d7-4995-b56d-26254276e0da",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"llama_model_loader: loaded meta data with 19 key-value pairs and 291 tensors from llama-2-7b-chat.Q8_0.gguf (version GGUF V2)\n",
|
||||
"llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
|
||||
"llama_model_loader: - kv 0: general.architecture str = llama\n",
|
||||
"llama_model_loader: - kv 1: general.name str = LLaMA v2\n",
|
||||
"llama_model_loader: - kv 2: llama.context_length u32 = 4096\n",
|
||||
"llama_model_loader: - kv 3: llama.embedding_length u32 = 4096\n",
|
||||
"llama_model_loader: - kv 4: llama.block_count u32 = 32\n",
|
||||
"llama_model_loader: - kv 5: llama.feed_forward_length u32 = 11008\n",
|
||||
"llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128\n",
|
||||
"llama_model_loader: - kv 7: llama.attention.head_count u32 = 32\n",
|
||||
"llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 32\n",
|
||||
"llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000001\n",
|
||||
"llama_model_loader: - kv 10: general.file_type u32 = 7\n",
|
||||
"llama_model_loader: - kv 11: tokenizer.ggml.model str = llama\n",
|
||||
"llama_model_loader: - kv 12: tokenizer.ggml.tokens arr[str,32000] = [\"<unk>\", \"<s>\", \"</s>\", \"<0x00>\", \"<...\n",
|
||||
"llama_model_loader: - kv 13: tokenizer.ggml.scores arr[f32,32000] = [0.000000, 0.000000, 0.000000, 0.0000...\n",
|
||||
"llama_model_loader: - kv 14: tokenizer.ggml.token_type arr[i32,32000] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...\n",
|
||||
"llama_model_loader: - kv 15: tokenizer.ggml.bos_token_id u32 = 1\n",
|
||||
"llama_model_loader: - kv 16: tokenizer.ggml.eos_token_id u32 = 2\n",
|
||||
"llama_model_loader: - kv 17: tokenizer.ggml.unknown_token_id u32 = 0\n",
|
||||
"llama_model_loader: - kv 18: general.quantization_version u32 = 2\n",
|
||||
"llama_model_loader: - type f32: 65 tensors\n",
|
||||
"llama_model_loader: - type q8_0: 226 tensors\n",
|
||||
"llm_load_vocab: special tokens cache size = 259\n",
|
||||
"llm_load_vocab: token to piece cache size = 0.1684 MB\n",
|
||||
"llm_load_print_meta: format = GGUF V2\n",
|
||||
"llm_load_print_meta: arch = llama\n",
|
||||
"llm_load_print_meta: vocab type = SPM\n",
|
||||
"llm_load_print_meta: n_vocab = 32000\n",
|
||||
"llm_load_print_meta: n_merges = 0\n",
|
||||
"llm_load_print_meta: vocab_only = 0\n",
|
||||
"llm_load_print_meta: n_ctx_train = 4096\n",
|
||||
"llm_load_print_meta: n_embd = 4096\n",
|
||||
"llm_load_print_meta: n_layer = 32\n",
|
||||
"llm_load_print_meta: n_head = 32\n",
|
||||
"llm_load_print_meta: n_head_kv = 32\n",
|
||||
"llm_load_print_meta: n_rot = 128\n",
|
||||
"llm_load_print_meta: n_swa = 0\n",
|
||||
"llm_load_print_meta: n_embd_head_k = 128\n",
|
||||
"llm_load_print_meta: n_embd_head_v = 128\n",
|
||||
"llm_load_print_meta: n_gqa = 1\n",
|
||||
"llm_load_print_meta: n_embd_k_gqa = 4096\n",
|
||||
"llm_load_print_meta: n_embd_v_gqa = 4096\n",
|
||||
"llm_load_print_meta: f_norm_eps = 0.0e+00\n",
|
||||
"llm_load_print_meta: f_norm_rms_eps = 1.0e-06\n",
|
||||
"llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
|
||||
"llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
|
||||
"llm_load_print_meta: f_logit_scale = 0.0e+00\n",
|
||||
"llm_load_print_meta: n_ff = 11008\n",
|
||||
"llm_load_print_meta: n_expert = 0\n",
|
||||
"llm_load_print_meta: n_expert_used = 0\n",
|
||||
"llm_load_print_meta: causal attn = 1\n",
|
||||
"llm_load_print_meta: pooling type = 0\n",
|
||||
"llm_load_print_meta: rope type = 0\n",
|
||||
"llm_load_print_meta: rope scaling = linear\n",
|
||||
"llm_load_print_meta: freq_base_train = 10000.0\n",
|
||||
"llm_load_print_meta: freq_scale_train = 1\n",
|
||||
"llm_load_print_meta: n_ctx_orig_yarn = 4096\n",
|
||||
"llm_load_print_meta: rope_finetuned = unknown\n",
|
||||
"llm_load_print_meta: ssm_d_conv = 0\n",
|
||||
"llm_load_print_meta: ssm_d_inner = 0\n",
|
||||
"llm_load_print_meta: ssm_d_state = 0\n",
|
||||
"llm_load_print_meta: ssm_dt_rank = 0\n",
|
||||
"llm_load_print_meta: model type = 7B\n",
|
||||
"llm_load_print_meta: model ftype = Q8_0\n",
|
||||
"llm_load_print_meta: model params = 6.74 B\n",
|
||||
"llm_load_print_meta: model size = 6.67 GiB (8.50 BPW) \n",
|
||||
"llm_load_print_meta: general.name = LLaMA v2\n",
|
||||
"llm_load_print_meta: BOS token = 1 '<s>'\n",
|
||||
"llm_load_print_meta: EOS token = 2 '</s>'\n",
|
||||
"llm_load_print_meta: UNK token = 0 '<unk>'\n",
|
||||
"llm_load_print_meta: LF token = 13 '<0x0A>'\n",
|
||||
"llm_load_print_meta: max token length = 48\n",
|
||||
"llm_load_tensors: ggml ctx size = 0.14 MiB\n",
|
||||
"llm_load_tensors: CPU buffer size = 6828.64 MiB\n",
|
||||
"...................................................................................................\n",
|
||||
"llama_new_context_with_model: n_ctx = 2048\n",
|
||||
"llama_new_context_with_model: n_batch = 512\n",
|
||||
"llama_new_context_with_model: n_ubatch = 512\n",
|
||||
"llama_new_context_with_model: flash_attn = 0\n",
|
||||
"llama_new_context_with_model: freq_base = 10000.0\n",
|
||||
"llama_new_context_with_model: freq_scale = 1\n",
|
||||
"llama_kv_cache_init: CPU KV buffer size = 1024.00 MiB\n",
|
||||
"llama_new_context_with_model: KV self size = 1024.00 MiB, K (f16): 512.00 MiB, V (f16): 512.00 MiB\n",
|
||||
"llama_new_context_with_model: CPU output buffer size = 0.12 MiB\n",
|
||||
"llama_new_context_with_model: CPU compute buffer size = 164.01 MiB\n",
|
||||
"llama_new_context_with_model: graph nodes = 1030\n",
|
||||
"llama_new_context_with_model: graph splits = 1\n",
|
||||
"AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 0 | \n",
|
||||
"Model metadata: {'tokenizer.ggml.unknown_token_id': '0', 'tokenizer.ggml.eos_token_id': '2', 'general.architecture': 'llama', 'llama.context_length': '4096', 'general.name': 'LLaMA v2', 'llama.embedding_length': '4096', 'llama.feed_forward_length': '11008', 'llama.attention.layer_norm_rms_epsilon': '0.000001', 'llama.rope.dimension_count': '128', 'llama.attention.head_count': '32', 'tokenizer.ggml.bos_token_id': '1', 'llama.block_count': '32', 'llama.attention.head_count_kv': '32', 'general.quantization_version': '2', 'tokenizer.ggml.model': 'llama', 'general.file_type': '7'}\n",
|
||||
"Using fallback chat format: llama-2\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=\"llama-2-7b-chat.Q8_0.gguf\",\n",
|
||||
" n_gpu_layers=-1,\n",
|
||||
" n_batch=512,\n",
|
||||
" n_ctx=2048,\n",
|
||||
" f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls\n",
|
||||
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "43e06f56-ef97-451b-87d9-8465ea442aed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Now let's ask the same question to Llama model without showing them the earnings release.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "1033dd82-5532-437d-a548-27695e109589",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"?\n",
|
||||
"(NASDAQ:INTC)\n",
|
||||
"Intel's CCG (Client Computing Group) revenue for Q1 2024 was $9.6 billion, a decrease of 35% from the previous quarter and a decrease of 42% from the same period last year."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"llama_print_timings: load time = 131.20 ms\n",
|
||||
"llama_print_timings: sample time = 16.05 ms / 68 runs ( 0.24 ms per token, 4236.76 tokens per second)\n",
|
||||
"llama_print_timings: prompt eval time = 131.14 ms / 16 tokens ( 8.20 ms per token, 122.01 tokens per second)\n",
|
||||
"llama_print_timings: eval time = 3225.00 ms / 67 runs ( 48.13 ms per token, 20.78 tokens per second)\n",
|
||||
"llama_print_timings: total time = 3466.40 ms / 83 tokens\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"?\\n(NASDAQ:INTC)\\nIntel's CCG (Client Computing Group) revenue for Q1 2024 was $9.6 billion, a decrease of 35% from the previous quarter and a decrease of 42% from the same period last year.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.invoke(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "75f5cb10-746f-4e37-9386-b85a4d2b84ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**As you can see, model is giving wrong information. Correct asnwer is CCG revenue in Q1 2024 is $7.5B. Now let's apply RAG using the earning release document**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f4150ec-5692-4756-b11a-22feb7ab88ff",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**in RAG, we modify the input prompt by adding relevent documents with the question. Here, we use one of the popular RAG prompt**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "226c14b0-f43e-4a1f-a1e4-04731d467ec4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template=\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\\nQuestion: {question} \\nContext: {context} \\nAnswer:\"))]"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"rag_prompt = hub.pull(\"rlm/rag-prompt\")\n",
|
||||
"rag_prompt.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77deb6a0-0950-450a-916a-f2a029676c20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Appending all retreived documents in a single document**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "2dbc3327-6ef3-4c1f-8797-0c71964b0921",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def format_docs(docs):\n",
|
||||
" return \"\\n\\n\".join(doc.page_content for doc in docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e2d9f18-49d0-43a3-bea8-78746ffa86b7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**The last step is to create a chain using langchain tool that will create an e2e pipeline. It will take question and context as an input.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "427379c2-51ff-4e0f-8278-a45221363299",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough, RunnablePick\n",
|
||||
"\n",
|
||||
"# Chain\n",
|
||||
"chain = (\n",
|
||||
" RunnablePassthrough.assign(context=RunnablePick(\"context\") | format_docs)\n",
|
||||
" | rag_prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "095d6280-c949-4d00-8e32-8895a82d245f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Llama.generate: prefix-match hit\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Based on the provided context, Intel CCG revenue in Q1 2024 was $7.5 billion up 31%."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"llama_print_timings: load time = 131.20 ms\n",
|
||||
"llama_print_timings: sample time = 7.74 ms / 31 runs ( 0.25 ms per token, 4004.13 tokens per second)\n",
|
||||
"llama_print_timings: prompt eval time = 2529.41 ms / 674 tokens ( 3.75 ms per token, 266.46 tokens per second)\n",
|
||||
"llama_print_timings: eval time = 1542.94 ms / 30 runs ( 51.43 ms per token, 19.44 tokens per second)\n",
|
||||
"llama_print_timings: total time = 4123.68 ms / 704 tokens\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Based on the provided context, Intel CCG revenue in Q1 2024 was $7.5 billion up 31%.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"context\": docs, \"question\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "638364b2-6bd2-4471-9961-d3a1d1b9d4ee",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Now we see the results are correct as it is mentioned in earnings release.** <br>\n",
|
||||
"**To further automate, we will create a chain that will take input as question and retriever so that we don't need to retrieve documents seperately**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "4654e5b7-635f-4767-8b31-4c430164cdd5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"qa_chain = (\n",
|
||||
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | rag_prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0979f393-fd0a-4e82-b844-68371c6ad68f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Now we only need to pass the question to the chain and it will fetch the contexts directly from the vector database to generate the answer**\n",
|
||||
"<br>\n",
|
||||
"**Let's try with another question**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "3ea07b82-e6ec-4084-85f4-191373530172",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Llama.generate: prefix-match hit\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" According to the provided context, Intel DCAI revenue in Q1 2024 was $3.0 billion up 5%."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"llama_print_timings: load time = 131.20 ms\n",
|
||||
"llama_print_timings: sample time = 6.28 ms / 31 runs ( 0.20 ms per token, 4937.88 tokens per second)\n",
|
||||
"llama_print_timings: prompt eval time = 2681.93 ms / 730 tokens ( 3.67 ms per token, 272.19 tokens per second)\n",
|
||||
"llama_print_timings: eval time = 1471.07 ms / 30 runs ( 49.04 ms per token, 20.39 tokens per second)\n",
|
||||
"llama_print_timings: total time = 4206.77 ms / 760 tokens\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' According to the provided context, Intel DCAI revenue in Q1 2024 was $3.0 billion up 5%.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa_chain.invoke(\"what is Intel DCAI revenue in Q1 2024?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9407f2a0-4a35-4315-8e96-02fcb80f210c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "rag-on-intel",
|
||||
"language": "python",
|
||||
"name": "rag-on-intel"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -36,10 +36,10 @@
|
||||
"from bs4 import BeautifulSoup as Soup\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryByteStore, LocalFileStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders.recursive_url_loader import (\n",
|
||||
" RecursiveUrlLoader,\n",
|
||||
")\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"# For our example, we'll load docs from the web\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
@@ -370,13 +370,14 @@
|
||||
],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n",
|
||||
"from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n",
|
||||
"from langchain_huggingface.llms import HuggingFacePipeline\n",
|
||||
"from optimum.intel.ipex import IPEXModelForCausalLM\n",
|
||||
"from transformers import AutoTokenizer, pipeline\n",
|
||||
"\n",
|
||||
"model_id = \"Intel/neural-chat-7b-v3-3\"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
|
||||
"model = AutoModelForCausalLM.from_pretrained(\n",
|
||||
" model_id, device_map=\"auto\", torch_dtype=torch.bfloat16\n",
|
||||
"model = IPEXModelForCausalLM.from_pretrained(\n",
|
||||
" model_id, torch_dtype=torch.bfloat16, export=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, max_new_tokens=100)\n",
|
||||
@@ -581,7 +582,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
"version": "3.10.14"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -740,7 +740,7 @@ Even this relatively large model will most likely fail to generate more complica
|
||||
|
||||
|
||||
```bash
|
||||
poetry run pip install pyyaml chromadb
|
||||
poetry run pip install pyyaml langchain_chroma
|
||||
import yaml
|
||||
```
|
||||
|
||||
@@ -994,7 +994,7 @@ from langchain.prompts import FewShotPromptTemplate, PromptTemplate
|
||||
from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX
|
||||
from langchain_huggingface import HuggingFaceEmbeddings
|
||||
from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector
|
||||
from langchain_community.vectorstores import Chroma
|
||||
from langchain_chroma import Chroma
|
||||
|
||||
example_prompt = PromptTemplate(
|
||||
input_variables=["table_info", "input", "sql_cmd", "sql_result", "answer"],
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install --quiet pypdf chromadb tiktoken openai langchain-together"
|
||||
"! pip install --quiet pypdf tiktoken openai langchain-chroma langchain-together"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -45,8 +45,8 @@
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"# Add to vectorDB\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"\"\"\"\n",
|
||||
"from langchain_together.embeddings import TogetherEmbeddings\n",
|
||||
|
||||
677
cookbook/visual_RAG_vdms.ipynb
Normal file
677
cookbook/visual_RAG_vdms.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -13,7 +13,7 @@ OUTPUT_NEW_DOCS_DIR = $(OUTPUT_NEW_DIR)/docs
|
||||
|
||||
PYTHON = .venv/bin/python
|
||||
|
||||
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm" | tr '\n' ' ')
|
||||
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm|couchbase" | tr '\n' ' ')
|
||||
|
||||
PORT ?= 3001
|
||||
|
||||
@@ -38,6 +38,8 @@ generate-files:
|
||||
|
||||
$(PYTHON) scripts/model_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/tool_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/document_loader_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/copy_templates.py $(INTERMEDIATE_DIR)
|
||||
|
||||
@@ -178,3 +178,10 @@ autosummary_generate = True
|
||||
|
||||
html_copy_source = False
|
||||
html_show_sourcelink = False
|
||||
|
||||
# Set canonical URL from the Read the Docs Domain
|
||||
html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "")
|
||||
|
||||
# Tell Jinja2 templates the build is running on Read the Docs
|
||||
if os.environ.get("READTHEDOCS", "") == "True":
|
||||
html_context["READTHEDOCS"] = True
|
||||
|
||||
@@ -78,7 +78,7 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
|
||||
continue
|
||||
|
||||
if inspect.isclass(type_):
|
||||
# The clasification of the class is used to select a template
|
||||
# The type of the class is used to select a template
|
||||
# for the object when rendering the documentation.
|
||||
# See `templates` directory for defined templates.
|
||||
# This is a hacky solution to distinguish between different
|
||||
|
||||
@@ -55,6 +55,7 @@ A developer platform that lets you debug, test, evaluate, and monitor LLM applic
|
||||
dark: useBaseUrl('/svg/langchain_stack_062024_dark.svg'),
|
||||
}}
|
||||
title="LangChain Framework Overview"
|
||||
style={{ width: "100%" }}
|
||||
/>
|
||||
|
||||
## LangChain Expression Language (LCEL)
|
||||
@@ -235,7 +236,7 @@ This is where information like log-probs and token usage may be stored.
|
||||
These represent a decision from an language model to call a tool. They are included as part of an `AIMessage` output.
|
||||
They can be accessed from there with the `.tool_calls` property.
|
||||
|
||||
This property returns a list of dictionaries. Each dictionary has the following keys:
|
||||
This property returns a list of `ToolCall`s. A `ToolCall` is a dictionary with the following arguments:
|
||||
|
||||
- `name`: The name of the tool that should be called.
|
||||
- `args`: The arguments to that tool.
|
||||
@@ -245,13 +246,18 @@ This property returns a list of dictionaries. Each dictionary has the following
|
||||
|
||||
This represents a system message, which tells the model how to behave. Not every model provider supports this.
|
||||
|
||||
#### FunctionMessage
|
||||
|
||||
This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result.
|
||||
|
||||
#### ToolMessage
|
||||
|
||||
This represents the result of a tool call. This is distinct from a FunctionMessage in order to match OpenAI's `function` and `tool` message types. In addition to `role` and `content`, this message has a `tool_call_id` parameter which conveys the id of the call to the tool that was called to produce this result.
|
||||
This represents the result of a tool call. In addition to `role` and `content`, this message has:
|
||||
|
||||
- a `tool_call_id` field which conveys the id of the call to the tool that was called to produce this result.
|
||||
- an `artifact` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.
|
||||
|
||||
#### (Legacy) FunctionMessage
|
||||
|
||||
This is a legacy message type, corresponding to OpenAI's legacy function-calling API. ToolMessage should be used instead to correspond to the updated tool-calling API.
|
||||
|
||||
This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result.
|
||||
|
||||
|
||||
### Prompt templates
|
||||
@@ -495,35 +501,87 @@ For specifics on how to use retrievers, see the [relevant how-to guides here](/d
|
||||
### Tools
|
||||
<span data-heading-keywords="tool,tools"></span>
|
||||
|
||||
Tools are interfaces that an agent, a chain, or a chat model / LLM can use to interact with the world.
|
||||
Tools are utilities designed to be called by a model: their inputs are designed to be generated by models, and their outputs are designed to be passed back to models.
|
||||
Tools are needed whenever you want a model to control parts of your code or call out to external APIs.
|
||||
|
||||
A tool consists of the following components:
|
||||
A tool consists of:
|
||||
|
||||
1. The name of the tool
|
||||
2. A description of what the tool does
|
||||
3. JSON schema of what the inputs to the tool are
|
||||
4. The function to call
|
||||
5. Whether the result of a tool should be returned directly to the user (only relevant for agents)
|
||||
1. The name of the tool.
|
||||
2. A description of what the tool does.
|
||||
3. A JSON schema defining the inputs to the tool.
|
||||
4. A function (and, optionally, an async variant of the function).
|
||||
|
||||
The name, description and JSON schema are provided as context
|
||||
to the LLM, allowing the LLM to determine how to use the tool
|
||||
appropriately.
|
||||
When a tool is bound to a model, the name, description and JSON schema are provided as context to the model.
|
||||
Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs.
|
||||
Typical usage may look like the following:
|
||||
|
||||
Given a list of available tools and a prompt, an LLM can request
|
||||
that one or more tools be invoked with appropriate arguments.
|
||||
```python
|
||||
tools = [...] # Define a list of tools
|
||||
llm_with_tools = llm.bind_tools(tools)
|
||||
ai_msg = llm_with_tools.invoke("do xyz...") # AIMessage(tool_calls=[ToolCall(...), ...], ...)
|
||||
```
|
||||
|
||||
Generally, when designing tools to be used by a chat model or LLM, it is important to keep in mind the following:
|
||||
The `AIMessage` returned from the model MAY have `tool_calls` associated with it.
|
||||
Read [this guide](/docs/concepts/#aimessage) for more information on what the response type may look like.
|
||||
|
||||
- Chat models that have been fine-tuned for tool calling will be better at tool calling than non-fine-tuned models.
|
||||
- Non fine-tuned models may not be able to use tools at all, especially if the tools are complex or require multiple tool calls.
|
||||
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas.
|
||||
- Simpler tools are generally easier for models to use than more complex tools.
|
||||
Once the chosen tools are invoked, the results can be passed back to the model so that it can complete whatever task
|
||||
it's performing.
|
||||
There are generally two different ways to invoke the tool and pass back the response:
|
||||
|
||||
For specifics on how to use tools, see the [relevant how-to guides here](/docs/how_to/#tools).
|
||||
#### Invoke with just the arguments
|
||||
|
||||
To use an existing pre-built tool, see [here](docs/integrations/tools/) for a list of pre-built tools.
|
||||
When you invoke a tool with just the arguments, you will get back the raw tool output (usually a string).
|
||||
This generally looks like:
|
||||
|
||||
```python
|
||||
# You will want to previously check that the LLM returned tool calls
|
||||
tool_call = ai_msg.tool_calls[0] # ToolCall(args={...}, id=..., ...)
|
||||
tool_output = tool.invoke(tool_call["args"])
|
||||
tool_message = ToolMessage(content=tool_output, tool_call_id=tool_call["id"], name=tool_call["name"])
|
||||
```
|
||||
|
||||
Note that the `content` field will generally be passed back to the model.
|
||||
If you do not want the raw tool response to be passed to the model, but you still want to keep it around,
|
||||
you can transform the tool output but also pass it as an artifact (read more about [`ToolMessage.artifact` here](/docs/concepts/#toolmessage))
|
||||
|
||||
```python
|
||||
... # Same code as above
|
||||
response_for_llm = transform(response)
|
||||
tool_message = ToolMessage(content=response_for_llm, tool_call_id=tool_call["id"], name=tool_call["name"], artifact=tool_output)
|
||||
```
|
||||
|
||||
#### Invoke with `ToolCall`
|
||||
|
||||
The other way to invoke a tool is to call it with the full `ToolCall` that was generated by the model.
|
||||
When you do this, the tool will return a ToolMessage.
|
||||
The benefits of this are that you don't have to write the logic yourself to transform the tool output into a ToolMessage.
|
||||
This generally looks like:
|
||||
|
||||
```python
|
||||
tool_call = ai_msg.tool_calls[0] # ToolCall(args={...}, id=..., ...)
|
||||
tool_message = tool.invoke(tool_call)
|
||||
# -> ToolMessage(content="tool result foobar...", tool_call_id=..., name="tool_name")
|
||||
```
|
||||
|
||||
If you are invoking the tool this way and want to include an [artifact](/docs/concepts/#toolmessage) for the ToolMessage, you will need to have the tool return two things.
|
||||
Read more about [defining tools that return artifacts here](/docs/how_to/tool_artifacts/).
|
||||
|
||||
#### Best practices
|
||||
|
||||
When designing tools to be used by a model, it is important to keep in mind that:
|
||||
|
||||
- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models.
|
||||
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering.
|
||||
- Simple, narrowly scoped tools are easier for models to use than complex tools.
|
||||
|
||||
#### Related
|
||||
|
||||
For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools).
|
||||
|
||||
To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/).
|
||||
|
||||
### Toolkits
|
||||
<span data-heading-keywords="toolkit,toolkits"></span>
|
||||
|
||||
Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.
|
||||
|
||||
@@ -821,7 +879,7 @@ We recommend this method as a starting point when working with structured output
|
||||
- If multiple underlying techniques are supported, you can supply a `method` parameter to
|
||||
[toggle which one is used](/docs/how_to/structured_output/#advanced-specifying-the-method-for-structuring-outputs).
|
||||
|
||||
You may want or need to use other techiniques if:
|
||||
You may want or need to use other techniques if:
|
||||
|
||||
- The chat model you are using does not support tool calling.
|
||||
- You are working with very complex schemas and the model is having trouble generating outputs that conform.
|
||||
|
||||
@@ -33,6 +33,8 @@ Some examples include:
|
||||
|
||||
- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain/)
|
||||
- [Build a Retrieval Augmented Generation (RAG) App](/docs/tutorials/rag/)
|
||||
|
||||
A good structural rule of thumb is to follow the structure of this [example from Numpy](https://numpy.org/numpy-tutorials/content/tutorial-svd.html).
|
||||
|
||||
Here are some high-level tips on writing a good tutorial:
|
||||
|
||||
|
||||
Binary file not shown.
@@ -153,7 +153,7 @@
|
||||
"\n",
|
||||
" def parse(self, text: str) -> List[str]:\n",
|
||||
" lines = text.strip().split(\"\\n\")\n",
|
||||
" return lines\n",
|
||||
" return list(filter(None, lines)) # Remove empty lines\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"output_parser = LineListOutputParser()\n",
|
||||
|
||||
342
docs/docs/how_to/callbacks_custom_events.ipynb
Normal file
342
docs/docs/how_to/callbacks_custom_events.ipynb
Normal file
@@ -0,0 +1,342 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to dispatch custom callback events\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
"- [Astream Events API](/docs/concepts/#astream_events) the `astream_events` method will surface custom callback events.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In some situations, you may want to dipsatch a custom callback event from within a [Runnable](/docs/concepts/#runnable-interface) so it can be surfaced\n",
|
||||
"in a custom callback handler or via the [Astream Events API](/docs/concepts/#astream_events).\n",
|
||||
"\n",
|
||||
"For example, if you have a long running tool with multiple steps, you can dispatch custom events between the steps and use these custom events to monitor progress.\n",
|
||||
"You could also surface these custom events to an end user of your application to show them how the current task is progressing.\n",
|
||||
"\n",
|
||||
"To dispatch a custom event you need to decide on two attributes for the event: the `name` and the `data`.\n",
|
||||
"\n",
|
||||
"| Attribute | Type | Description |\n",
|
||||
"|-----------|------|----------------------------------------------------------------------------------------------------------|\n",
|
||||
"| name | str | A user defined name for the event. |\n",
|
||||
"| data | Any | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"* Dispatching custom callback events requires `langchain-core>=0.2.15`.\n",
|
||||
"* Custom callback events can only be dispatched from within an existing `Runnable`.\n",
|
||||
"* If using `astream_events`, you must use `version='v2'` to see custom events.\n",
|
||||
"* Sending or rendering custom callbacks events in LangSmith is not yet supported.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::caution COMPATIBILITY\n",
|
||||
"LangChain cannot automatically propagate configuration, including callbacks necessary for astream_events(), to child runnables if you are running async code in python<=3.10. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
"\n",
|
||||
"If you are running python<=3.10, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
|
||||
"\n",
|
||||
"If you are running python>=3.11, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in other Python versions.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain-core"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Astream Events API\n",
|
||||
"\n",
|
||||
"The most useful way to consume custom events is via the [Astream Events API](/docs/concepts/#astream_events).\n",
|
||||
"\n",
|
||||
"We can use the `async` `adispatch_custom_event` API to emit custom events in an async setting. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"\n",
|
||||
"To see custom events via the astream events API, you need to use the newer `v2` API of `astream_events`.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chain_start', 'data': {'input': 'hello world'}, 'name': 'foo', 'tags': [], 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'metadata': {}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'event1', 'tags': [], 'metadata': {}, 'data': {'x': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'event2', 'tags': [], 'metadata': {}, 'data': 5, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'foo', 'tags': [], 'metadata': {}, 'data': {'chunk': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_end', 'data': {'output': 'hello world'}, 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'foo', 'tags': [], 'metadata': {}, 'parent_ids': []}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" adispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"async def foo(x: str) -> str:\n",
|
||||
" await adispatch_custom_event(\"event1\", {\"x\": x})\n",
|
||||
" await adispatch_custom_event(\"event2\", 5)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async for event in foo.astream_events(\"hello world\", version=\"v2\"):\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In python <= 3.10, you must propagate the config manually!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chain_start', 'data': {'input': 'hello world'}, 'name': 'bar', 'tags': [], 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'metadata': {}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'event1', 'tags': [], 'metadata': {}, 'data': {'x': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'event2', 'tags': [], 'metadata': {}, 'data': 5, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'bar', 'tags': [], 'metadata': {}, 'data': {'chunk': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_end', 'data': {'output': 'hello world'}, 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'bar', 'tags': [], 'metadata': {}, 'parent_ids': []}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" adispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"async def bar(x: str, config: RunnableConfig) -> str:\n",
|
||||
" \"\"\"An example that shows how to manually propagate config.\n",
|
||||
"\n",
|
||||
" You must do this if you're running python<=3.10.\n",
|
||||
" \"\"\"\n",
|
||||
" await adispatch_custom_event(\"event1\", {\"x\": x}, config=config)\n",
|
||||
" await adispatch_custom_event(\"event2\", 5, config=config)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async for event in bar.astream_events(\"hello world\", version=\"v2\"):\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Async Callback Handler\n",
|
||||
"\n",
|
||||
"You can also consume the dispatched event via an async callback handler."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Received event event1 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: a62b84be-7afd-4829-9947-7165df1f37d9\n",
|
||||
"Received event event2 with data: 5, with tags: ['foo', 'bar'], with metadata: {} and run_id: a62b84be-7afd-4829-9947-7165df1f37d9\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List, Optional\n",
|
||||
"from uuid import UUID\n",
|
||||
"\n",
|
||||
"from langchain_core.callbacks import AsyncCallbackHandler\n",
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" adispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class AsyncCustomCallbackHandler(AsyncCallbackHandler):\n",
|
||||
" async def on_custom_event(\n",
|
||||
" self,\n",
|
||||
" name: str,\n",
|
||||
" data: Any,\n",
|
||||
" *,\n",
|
||||
" run_id: UUID,\n",
|
||||
" tags: Optional[List[str]] = None,\n",
|
||||
" metadata: Optional[Dict[str, Any]] = None,\n",
|
||||
" **kwargs: Any,\n",
|
||||
" ) -> None:\n",
|
||||
" print(\n",
|
||||
" f\"Received event {name} with data: {data}, with tags: {tags}, with metadata: {metadata} and run_id: {run_id}\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"async def bar(x: str, config: RunnableConfig) -> str:\n",
|
||||
" \"\"\"An example that shows how to manually propagate config.\n",
|
||||
"\n",
|
||||
" You must do this if you're running python<=3.10.\n",
|
||||
" \"\"\"\n",
|
||||
" await adispatch_custom_event(\"event1\", {\"x\": x}, config=config)\n",
|
||||
" await adispatch_custom_event(\"event2\", 5, config=config)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async_handler = AsyncCustomCallbackHandler()\n",
|
||||
"await foo.ainvoke(1, {\"callbacks\": [async_handler], \"tags\": [\"foo\", \"bar\"]})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Sync Callback Handler\n",
|
||||
"\n",
|
||||
"Let's see how to emit custom events in a sync environment using `dispatch_custom_event`.\n",
|
||||
"\n",
|
||||
"You **must** call `dispatch_custom_event` from within an existing `Runnable`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Received event event1 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: 27b5ce33-dc26-4b34-92dd-08a89cb22268\n",
|
||||
"Received event event2 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: 27b5ce33-dc26-4b34-92dd-08a89cb22268\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List, Optional\n",
|
||||
"from uuid import UUID\n",
|
||||
"\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" dispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CustomHandler(BaseCallbackHandler):\n",
|
||||
" def on_custom_event(\n",
|
||||
" self,\n",
|
||||
" name: str,\n",
|
||||
" data: Any,\n",
|
||||
" *,\n",
|
||||
" run_id: UUID,\n",
|
||||
" tags: Optional[List[str]] = None,\n",
|
||||
" metadata: Optional[Dict[str, Any]] = None,\n",
|
||||
" **kwargs: Any,\n",
|
||||
" ) -> None:\n",
|
||||
" print(\n",
|
||||
" f\"Received event {name} with data: {data}, with tags: {tags}, with metadata: {metadata} and run_id: {run_id}\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"def foo(x: int, config: RunnableConfig) -> int:\n",
|
||||
" dispatch_custom_event(\"event1\", {\"x\": x})\n",
|
||||
" dispatch_custom_event(\"event2\", {\"x\": x})\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"handler = CustomHandler()\n",
|
||||
"foo.invoke(1, {\"callbacks\": [handler], \"tags\": [\"foo\", \"bar\"]})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've seen how to emit custom events, you can check out the more in depth guide for [astream events](/docs/how_to/streaming/#using-stream-events) which is the easiest way to leverage custom events."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -15,6 +15,12 @@
|
||||
"\n",
|
||||
"Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain >= 0.2.8``\n",
|
||||
"\n",
|
||||
"This functionality was added in ``langchain-core == 0.2.8``. Please make sure your package is up to date.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -25,7 +31,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-openai langchain-anthropic langchain-google-vertexai"
|
||||
"%pip install -qU langchain>=0.2.8 langchain-openai langchain-anthropic langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -76,32 +82,6 @@
|
||||
"print(\"Gemini 1.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fff9a4c8-b6ee-4a1a-8d3d-0ecaa312d4ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Simple config example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "75c25d39-bf47-4b51-a6c6-64d9c572bfd6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"user_config = {\n",
|
||||
" \"model\": \"...user-specified...\",\n",
|
||||
" \"model_provider\": \"...user-specified...\",\n",
|
||||
" \"temperature\": 0,\n",
|
||||
" \"max_tokens\": 1000,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(**user_config)\n",
|
||||
"llm.invoke(\"what's your name\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f811f219-5e78-4b62-b495-915d52a22532",
|
||||
@@ -125,12 +105,215 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "da07b5c0-d2e6-42e4-bfcd-2efcfaae6221",
|
||||
"cell_type": "markdown",
|
||||
"id": "476a44db-c50d-4846-951d-0f1c9ba8bbaa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"## Creating a configurable model\n",
|
||||
"\n",
|
||||
"You can also create a runtime-configurable model by specifying `configurable_fields`. If you don't specify a `model` value, then \"model\" and \"model_provider\" be configurable by default."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "6c037f27-12d7-4e83-811e-4245c0e3ba58",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_d576307f90', 'finish_reason': 'stop', 'logprobs': None}, id='run-5428ab5c-b5c0-46de-9946-5d4ca40dbdc8-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"configurable_model = init_chat_model(temperature=0)\n",
|
||||
"\n",
|
||||
"configurable_model.invoke(\n",
|
||||
" \"what's your name\", config={\"configurable\": {\"model\": \"gpt-4o\"}}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "321e3036-abd2-4e1f-bcc6-606efd036954",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_012XvotUJ3kGLXJUWKBVxJUi', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-1ad1eefe-f1c6-4244-8bc6-90e2cb7ee554-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"configurable_model.invoke(\n",
|
||||
" \"what's your name\", config={\"configurable\": {\"model\": \"claude-3-5-sonnet-20240620\"}}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7f3b3d4a-4066-45e4-8297-ea81ac8e70b7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configurable model with default values\n",
|
||||
"\n",
|
||||
"We can create a configurable model with default model values, specify which parameters are configurable, and add prefixes to configurable params:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "814a2289-d0db-401e-b555-d5116112b413",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_ce0793330f', 'finish_reason': 'stop', 'logprobs': None}, id='run-3923e328-7715-4cd6-b215-98e4b6bf7c9d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first_llm = init_chat_model(\n",
|
||||
" model=\"gpt-4o\",\n",
|
||||
" temperature=0,\n",
|
||||
" configurable_fields=(\"model\", \"model_provider\", \"temperature\", \"max_tokens\"),\n",
|
||||
" config_prefix=\"first\", # useful when you have a chain with multiple models\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"first_llm.invoke(\"what's your name\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "6c8755ba-c001-4f5a-a497-be3f1db83244",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_01RyYR64DoMPNCfHeNnroMXm', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-22446159-3723-43e6-88df-b84797e7751d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first_llm.invoke(\n",
|
||||
" \"what's your name\",\n",
|
||||
" config={\n",
|
||||
" \"configurable\": {\n",
|
||||
" \"first_model\": \"claude-3-5-sonnet-20240620\",\n",
|
||||
" \"first_temperature\": 0.5,\n",
|
||||
" \"first_max_tokens\": 100,\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0072b1a3-7e44-4b4e-8b07-efe1ba91a689",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using a configurable model declaratively\n",
|
||||
"\n",
|
||||
"We can call declarative operations like `bind_tools`, `with_structured_output`, `with_configurable`, etc. on a configurable model and chain a configurable model in the same way that we would a regularly instantiated chat model object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "067dabee-1050-4110-ae24-c48eba01e13b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'Los Angeles, CA'},\n",
|
||||
" 'id': 'call_sYT3PFMufHGWJD32Hi2CTNUP'},\n",
|
||||
" {'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'New York, NY'},\n",
|
||||
" 'id': 'call_j1qjhxRnD3ffQmRyqjlI1Lnk'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetWeather(BaseModel):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetPopulation(BaseModel):\n",
|
||||
" \"\"\"Get the current population in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])\n",
|
||||
"\n",
|
||||
"llm_with_tools.invoke(\n",
|
||||
" \"what's bigger in 2024 LA or NYC\", config={\"configurable\": {\"model\": \"gpt-4o\"}}\n",
|
||||
").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "e57dfe9f-cd24-4e37-9ce9-ccf8daf78f89",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'Los Angeles, CA'},\n",
|
||||
" 'id': 'toolu_01CxEHxKtVbLBrvzFS7GQ5xR'},\n",
|
||||
" {'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'New York City, NY'},\n",
|
||||
" 'id': 'toolu_013A79qt5toWSsKunFBDZd5S'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(\n",
|
||||
" \"what's bigger in 2024 LA or NYC\",\n",
|
||||
" config={\"configurable\": {\"model\": \"claude-3-5-sonnet-20240620\"}},\n",
|
||||
").tool_calls"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -149,7 +332,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
|
||||
"\n",
|
||||
"This guide requires `langchain-openai >= 0.1.8`."
|
||||
"This guide requires `langchain-openai >= 0.1.9`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -153,7 +153,7 @@
|
||||
"\n",
|
||||
"#### OpenAI\n",
|
||||
"\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.9` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
|
||||
@@ -300,7 +300,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 2,
|
||||
"id": "ac9295d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -312,10 +312,8 @@
|
||||
"\n",
|
||||
"## Quick Install\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"# Hopefully this code block isn't split\n",
|
||||
"pip install langchain\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"As an open-source project in a rapidly developing field, we are extremely open to contributions.\n",
|
||||
"\"\"\""
|
||||
@@ -323,7 +321,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"id": "3a0cb17a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -332,15 +330,14 @@
|
||||
"text/plain": [
|
||||
"[Document(page_content='# 🦜️🔗 LangChain'),\n",
|
||||
" Document(page_content='⚡ Building applications with LLMs through composability ⚡'),\n",
|
||||
" Document(page_content='## Quick Install\\n\\n```bash'),\n",
|
||||
" Document(page_content='## Quick Install'),\n",
|
||||
" Document(page_content=\"# Hopefully this code block isn't split\"),\n",
|
||||
" Document(page_content='pip install langchain'),\n",
|
||||
" Document(page_content='```'),\n",
|
||||
" Document(page_content='As an open-source project in a rapidly developing field, we'),\n",
|
||||
" Document(page_content='are extremely open to contributions.')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -742,7 +739,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -48,20 +48,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "40ed76a2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 24.0 is available.\n",
|
||||
"You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
|
||||
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-openai\n",
|
||||
"\n",
|
||||
|
||||
@@ -220,6 +220,57 @@
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14002ec8-7ee5-4f91-9315-dd21c3808776",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### `LLMListwiseRerank`\n",
|
||||
"\n",
|
||||
"[LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html) uses [zero-shot listwise document reranking](https://arxiv.org/pdf/2305.02156) and functions similarly to `LLMChainFilter` as a robust but more expensive option. It is recommended to use a more powerful LLM.\n",
|
||||
"\n",
|
||||
"Note that `LLMListwiseRerank` requires a model with the [with_structured_output](/docs/integrations/chat/) method implemented."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "4ab9ee9f-917e-4d6f-9344-eb7f01533228",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers.document_compressors import LLMListwiseRerank\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"\n",
|
||||
"_filter = LLMListwiseRerank.from_llm(llm, top_n=1)\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=_filter, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.invoke(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7194da42",
|
||||
@@ -295,7 +346,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"id": "617a1756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "9a8bceb3-95bd-4496-bb9e-57655136e070",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use Runnables as Tools\n",
|
||||
"# How to convert Runnables as Tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -180,7 +180,7 @@
|
||||
"id": "32b1a992-8997-4c98-8eb2-c9fe9431b799",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, we can add typing information via [Runnable.with_types](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_types):"
|
||||
"Alternatively, the schema can be fully specified by directly passing the desired [args_schema](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool.args_schema) for the tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -190,10 +190,18 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"as_tool = runnable.with_types(input_type=Args).as_tool(\n",
|
||||
" name=\"My tool\",\n",
|
||||
" description=\"Explanation of when to use tool.\",\n",
|
||||
")"
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GSchema(BaseModel):\n",
|
||||
" \"\"\"Apply a function to an integer and list of integers.\"\"\"\n",
|
||||
"\n",
|
||||
" a: int = Field(..., description=\"Integer\")\n",
|
||||
" b: List[int] = Field(..., description=\"List of ints\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"runnable = RunnableLambda(g)\n",
|
||||
"as_tool = runnable.as_tool(GSchema)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -259,9 +267,9 @@
|
||||
"We first instantiate a chat model that supports [tool calling](/docs/how_to/tool_calling/):\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
"/>\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -131,7 +131,7 @@
|
||||
"source": [
|
||||
"## Base Chat Model\n",
|
||||
"\n",
|
||||
"Let's implement a chat model that echoes back the first `n` characetrs of the last message in the prompt!\n",
|
||||
"Let's implement a chat model that echoes back the first `n` characters of the last message in the prompt!\n",
|
||||
"\n",
|
||||
"To do so, we will inherit from `BaseChatModel` and we'll need to implement the following:\n",
|
||||
"\n",
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "5436020b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to create custom tools\n",
|
||||
"# How to create tools\n",
|
||||
"\n",
|
||||
"When constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components:\n",
|
||||
"\n",
|
||||
@@ -16,13 +16,15 @@
|
||||
"| args_schema | Pydantic BaseModel | Optional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters |\n",
|
||||
"| return_direct | boolean | Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. |\n",
|
||||
"\n",
|
||||
"LangChain provides 3 ways to create tools:\n",
|
||||
"LangChain supports the creation of tools from:\n",
|
||||
"\n",
|
||||
"1. Using [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool) -- the simplest way to define a custom tool.\n",
|
||||
"2. Using [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method -- this is similar to the `@tool` decorator, but allows more configuration and specification of both sync and async implementations.\n",
|
||||
"1. Functions;\n",
|
||||
"2. LangChain [Runnables](/docs/concepts#runnable-interface);\n",
|
||||
"3. By sub-classing from [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
|
||||
"\n",
|
||||
"The `@tool` or the `StructuredTool.from_function` class method should be sufficient for most use cases.\n",
|
||||
"Creating tools from functions may be sufficient for most use cases, and can be done via a simple [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool). If more configuration is needed-- e.g., specification of both sync and async implementations-- one can also use the [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method.\n",
|
||||
"\n",
|
||||
"In this guide we provide an overview of these methods.\n",
|
||||
"\n",
|
||||
":::{.callout-tip}\n",
|
||||
"\n",
|
||||
@@ -35,7 +37,9 @@
|
||||
"id": "c7326b23",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## @tool decorator\n",
|
||||
"## Creating tools from functions\n",
|
||||
"\n",
|
||||
"### @tool decorator\n",
|
||||
"\n",
|
||||
"This `@tool` decorator is the simplest way to define a custom tool. The decorator uses the function name as the tool name by default, but this can be overridden by passing a string as the first argument. Additionally, the decorator will use the function's docstring as the tool's description - so a docstring MUST be provided. "
|
||||
]
|
||||
@@ -51,7 +55,7 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"multiply\n",
|
||||
"multiply(a: int, b: int) -> int - Multiply two numbers.\n",
|
||||
"Multiply two numbers.\n",
|
||||
"{'a': {'title': 'A', 'type': 'integer'}, 'b': {'title': 'B', 'type': 'integer'}}\n"
|
||||
]
|
||||
}
|
||||
@@ -96,6 +100,57 @@
|
||||
" return a * b"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8f0edc51-c586-414c-8941-c8abe779943f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that `@tool` supports parsing of annotations, nested schemas, and other features:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "5626423f-053e-4a66-adca-1d794d835397",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'multiply_by_maxSchema',\n",
|
||||
" 'description': 'Multiply a by the maximum of b.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'a': {'title': 'A',\n",
|
||||
" 'description': 'scale factor',\n",
|
||||
" 'type': 'string'},\n",
|
||||
" 'b': {'title': 'B',\n",
|
||||
" 'description': 'list of ints over which to take maximum',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'integer'}}},\n",
|
||||
" 'required': ['a', 'b']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Annotated, List\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply_by_max(\n",
|
||||
" a: Annotated[str, \"scale factor\"],\n",
|
||||
" b: Annotated[List[int], \"list of ints over which to take maximum\"],\n",
|
||||
") -> int:\n",
|
||||
" \"\"\"Multiply a by the maximum of b.\"\"\"\n",
|
||||
" return a * max(b)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"multiply_by_max.args_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98d6eee9",
|
||||
@@ -106,7 +161,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"id": "9216d03a-f6ea-4216-b7e1-0661823a4c0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -115,7 +170,7 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"multiplication-tool\n",
|
||||
"multiplication-tool(a: int, b: int) -> int - Multiply two numbers.\n",
|
||||
"Multiply two numbers.\n",
|
||||
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
|
||||
"True\n"
|
||||
]
|
||||
@@ -145,17 +200,82 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b63fcc3b",
|
||||
"id": "33a9e94d-0b60-48f3-a4c2-247dce096e66",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## StructuredTool\n",
|
||||
"\n",
|
||||
"The `StrurcturedTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code."
|
||||
"#### Docstring parsing"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d0cb586-93d4-4ff1-9779-71df7853cb68",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`@tool` can optionally parse [Google Style docstrings](https://google.github.io/styleguide/pyguide.html#383-functions-and-methods) and associate the docstring components (such as arg descriptions) to the relevant parts of the tool schema. To toggle this behavior, specify `parse_docstring`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"id": "336f5538-956e-47d5-9bde-b732559f9e61",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'fooSchema',\n",
|
||||
" 'description': 'The foo.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'bar': {'title': 'Bar',\n",
|
||||
" 'description': 'The bar.',\n",
|
||||
" 'type': 'string'},\n",
|
||||
" 'baz': {'title': 'Baz', 'description': 'The baz.', 'type': 'integer'}},\n",
|
||||
" 'required': ['bar', 'baz']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def foo(bar: str, baz: int) -> str:\n",
|
||||
" \"\"\"The foo.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" bar: The bar.\n",
|
||||
" baz: The baz.\n",
|
||||
" \"\"\"\n",
|
||||
" return bar\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"foo.args_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f18a2503-5393-421b-99fa-4a01dd824d0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::{.callout-caution}\n",
|
||||
"By default, `@tool(parse_docstring=True)` will raise `ValueError` if the docstring does not parse correctly. See [API Reference](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) for detail and examples.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b63fcc3b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### StructuredTool\n",
|
||||
"\n",
|
||||
"The `StructuredTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "564fbe6f-11df-402d-b135-ef6ff25e1e63",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -198,7 +318,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"id": "6bc055d4-1fbe-4db5-8881-9c382eba6b1b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -208,7 +328,7 @@
|
||||
"text": [
|
||||
"6\n",
|
||||
"Calculator\n",
|
||||
"Calculator(a: int, b: int) -> int - multiply numbers\n",
|
||||
"multiply numbers\n",
|
||||
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
|
||||
]
|
||||
}
|
||||
@@ -239,6 +359,63 @@
|
||||
"print(calculator.args)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5517995d-54e3-449b-8fdb-03561f5e4647",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating tools from Runnables\n",
|
||||
"\n",
|
||||
"LangChain [Runnables](/docs/concepts#runnable-interface) that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n",
|
||||
"\n",
|
||||
"Example usage:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "8ef593c5-cf72-4c10-bfc9-7d21874a0c24",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer_style': {'title': 'Answer Style', 'type': 'string'}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.language_models import GenericFakeChatModel\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"human\", \"Hello. Please respond in the style of {answer_style}.\")]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Placeholder LLM\n",
|
||||
"llm = GenericFakeChatModel(messages=iter([\"hello matey\"]))\n",
|
||||
"\n",
|
||||
"chain = prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"as_tool = chain.as_tool(\n",
|
||||
" name=\"Style responder\", description=\"Description of when to use tool.\"\n",
|
||||
")\n",
|
||||
"as_tool.args"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0521b787-a146-45a6-8ace-ae1ac4669dd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [this guide](/docs/how_to/convert_runnable_to_tool) for more detail."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b840074b-9c10-4ca0-aed8-626c52b2398f",
|
||||
@@ -251,7 +428,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 10,
|
||||
"id": "1dad8f8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -300,7 +477,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 11,
|
||||
"id": "bb551c33",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -351,7 +528,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 12,
|
||||
"id": "6615cb77-fd4c-4676-8965-f92cc71d4944",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -383,7 +560,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 13,
|
||||
"id": "bb2af583-eadd-41f4-a645-bf8748bd3dcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -428,7 +605,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 14,
|
||||
"id": "4ad0932c-8610-4278-8c57-f9218f654c8a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -473,7 +650,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 15,
|
||||
"id": "7094c0e8-6192-4870-a942-aad5b5ae48fd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -496,7 +673,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 16,
|
||||
"id": "b4d22022-b105-4ccc-a15b-412cb9ea3097",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -506,7 +683,7 @@
|
||||
"'Error: There is no city by the name of foobar.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -530,7 +707,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 17,
|
||||
"id": "3fad1728-d367-4e1b-9b54-3172981271cf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -540,7 +717,7 @@
|
||||
"\"There is no such city, but it's probably above 0K there!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -564,7 +741,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 18,
|
||||
"id": "ebfe7c1f-318d-4e58-99e1-f31e69473c46",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -574,7 +751,7 @@
|
||||
"'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -591,13 +768,189 @@
|
||||
"\n",
|
||||
"get_weather_tool.invoke({\"city\": \"foobar\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1a8d8383-11b3-445e-956f-df4e96995e00",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Returning artifacts of Tool execution\n",
|
||||
"\n",
|
||||
"Sometimes there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns custom objects like Documents, we may want to pass some view or metadata about this output to the model without passing the raw output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n",
|
||||
"\n",
|
||||
"The Tool and [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-core >= 0.2.19``\n",
|
||||
"\n",
|
||||
"This functionality was added in ``langchain-core == 0.2.19``. Please make sure your package is up to date.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If we want our tool to distinguish between message content and other artifacts, we need to specify `response_format=\"content_and_artifact\"` when defining our tool and make sure that we return a tuple of (content, artifact):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "14905425-0334-43a0-9de9-5bcf622ede0e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import random\n",
|
||||
"from typing import List, Tuple\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(response_format=\"content_and_artifact\")\n",
|
||||
"def generate_random_ints(min: int, max: int, size: int) -> Tuple[str, List[int]]:\n",
|
||||
" \"\"\"Generate size random ints in the range [min, max].\"\"\"\n",
|
||||
" array = [random.randint(min, max) for _ in range(size)]\n",
|
||||
" content = f\"Successfully generated array of {size} random ints in [{min}, {max}].\"\n",
|
||||
" return content, array"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49f057a6-8938-43ea-8faf-ae41e797ceb8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we invoke our tool directly with the tool arguments, we'll get back just the content part of the output:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0f2e1528-404b-46e6-b87c-f0957c4b9217",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Successfully generated array of 10 random ints in [0, 9].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke({\"min\": 0, \"max\": 9, \"size\": 10})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e62ebba-1737-4b97-b61a-7313ade4e8c2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we invoke our tool with a ToolCall (like the ones generated by tool-calling models), we'll get back a ToolMessage that contains both the content and artifact generated by the Tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cc197777-26eb-46b3-a83b-c2ce116c6311",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[1, 4, 2, 5, 3, 9, 0, 4, 7, 7])"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke(\n",
|
||||
" {\n",
|
||||
" \"name\": \"generate_random_ints\",\n",
|
||||
" \"args\": {\"min\": 0, \"max\": 9, \"size\": 10},\n",
|
||||
" \"id\": \"123\", # required\n",
|
||||
" \"type\": \"tool_call\", # required\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dfdc1040-bf25-4790-b4c3-59452db84e11",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can do the same when subclassing BaseTool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "fe1a09d1-378b-4b91-bb5e-0697c3d7eb92",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GenerateRandomFloats(BaseTool):\n",
|
||||
" name: str = \"generate_random_floats\"\n",
|
||||
" description: str = \"Generate size random floats in the range [min, max].\"\n",
|
||||
" response_format: str = \"content_and_artifact\"\n",
|
||||
"\n",
|
||||
" ndigits: int = 2\n",
|
||||
"\n",
|
||||
" def _run(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
|
||||
" range_ = max - min\n",
|
||||
" array = [\n",
|
||||
" round(min + (range_ * random.random()), ndigits=self.ndigits)\n",
|
||||
" for _ in range(size)\n",
|
||||
" ]\n",
|
||||
" content = f\"Generated {size} floats in [{min}, {max}], rounded to {self.ndigits} decimals.\"\n",
|
||||
" return content, array\n",
|
||||
"\n",
|
||||
" # Optionally define an equivalent async method\n",
|
||||
"\n",
|
||||
" # async def _arun(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
|
||||
" # ..."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "8c3d16f6-1c4a-48ab-b05a-38547c592e79",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.4277, 0.7578, 2.4871])"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rand_gen = GenerateRandomFloats(ndigits=4)\n",
|
||||
"\n",
|
||||
"rand_gen.invoke(\n",
|
||||
" {\n",
|
||||
" \"name\": \"generate_random_floats\",\n",
|
||||
" \"args\": {\"min\": 0.1, \"max\": 3.3333, \"size\": 3},\n",
|
||||
" \"id\": \"123\",\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -609,7 +962,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.9"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -67,15 +67,16 @@ If you'd prefer not to set an environment variable you can pass the key in direc
|
||||
```python
|
||||
from langchain_cohere import CohereEmbeddings
|
||||
|
||||
embeddings_model = CohereEmbeddings(cohere_api_key="...")
|
||||
embeddings_model = CohereEmbeddings(cohere_api_key="...", model='embed-english-v3.0')
|
||||
```
|
||||
|
||||
Otherwise you can initialize without any params:
|
||||
Otherwise you can initialize simply as shown below:
|
||||
```python
|
||||
from langchain_cohere import CohereEmbeddings
|
||||
|
||||
embeddings_model = CohereEmbeddings()
|
||||
embeddings_model = CohereEmbeddings(model='embed-english-v3.0')
|
||||
```
|
||||
Do note that it is mandatory to pass the model parameter while initializing the CohereEmbeddings class.
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="huggingface" label="Hugging Face">
|
||||
|
||||
@@ -9,11 +9,13 @@
|
||||
"source": [
|
||||
"# Hybrid Search\n",
|
||||
"\n",
|
||||
"The standard search in LangChain is done by vector similarity. However, a number of vectorstores implementations (Astra DB, ElasticSearch, Neo4J, AzureSearch, ...) also support more advanced search combining vector similarity search and other search techniques (full-text, BM25, and so on). This is generally referred to as \"Hybrid\" search.\n",
|
||||
"The standard search in LangChain is done by vector similarity. However, a number of vectorstores implementations (Astra DB, ElasticSearch, Neo4J, AzureSearch, Qdrant...) also support more advanced search combining vector similarity search and other search techniques (full-text, BM25, and so on). This is generally referred to as \"Hybrid\" search.\n",
|
||||
"\n",
|
||||
"**Step 1: Make sure the vectorstore you are using supports hybrid search**\n",
|
||||
"\n",
|
||||
"At the moment, there is no unified way to perform hybrid search in LangChain. Each vectorstore may have their own way to do it. This is generally exposed as a keyword argument that is passed in during `similarity_search`. By reading the documentation or source code, figure out whether the vectorstore you are using supports hybrid search, and, if so, how to use it.\n",
|
||||
"At the moment, there is no unified way to perform hybrid search in LangChain. Each vectorstore may have their own way to do it. This is generally exposed as a keyword argument that is passed in during `similarity_search`.\n",
|
||||
"\n",
|
||||
"By reading the documentation or source code, figure out whether the vectorstore you are using supports hybrid search, and, if so, how to use it.\n",
|
||||
"\n",
|
||||
"**Step 2: Add that parameter as a configurable field for the chain**\n",
|
||||
"\n",
|
||||
|
||||
@@ -44,6 +44,7 @@ This highlights functionality that is core to using LangChain.
|
||||
- [How to: inspect runnables](/docs/how_to/inspect)
|
||||
- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks)
|
||||
- [How to: migrate chains to LCEL](/docs/how_to/migrate_chains)
|
||||
- [How to: pass runtime secrets to a runnable](/docs/how_to/runnable_runtime_secrets)
|
||||
|
||||
## Components
|
||||
|
||||
@@ -80,11 +81,10 @@ These are the core building blocks you can use when building applications.
|
||||
- [How to: stream a response back](/docs/how_to/chat_streaming)
|
||||
- [How to: track token usage](/docs/how_to/chat_token_usage_tracking)
|
||||
- [How to: track response metadata across providers](/docs/how_to/response_metadata)
|
||||
- [How to: let your end users choose their model](/docs/how_to/chat_models_universal_init/)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: stream tool calls](/docs/how_to/tool_streaming)
|
||||
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
|
||||
- [How to: bind model-specific formated tools](/docs/how_to/tools_model_specific)
|
||||
- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific)
|
||||
- [How to: force a specific tool call](/docs/how_to/tool_choice)
|
||||
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
|
||||
|
||||
@@ -185,17 +185,21 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
|
||||
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call. Refer [here](/docs/integrations/tools/) for a list of pre-buit tools.
|
||||
|
||||
- [How to: create custom tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: convert Runnables to tools](/docs/how_to/convert_runnable_to_tool)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: pass tool results back to model](/docs/how_to/tool_results_pass_to_model)
|
||||
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
|
||||
- [How to: create tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: use chat models to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: pass tool outputs to chat models](/docs/how_to/tool_results_pass_to_model)
|
||||
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
|
||||
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
|
||||
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
|
||||
- [How to: disable parallel tool calling](/docs/how_to/tool_choice)
|
||||
- [How to: stream events from within a tool](/docs/how_to/tool_stream_events)
|
||||
- [How to: add a human-in-the-loop for tools](/docs/how_to/tools_human)
|
||||
- [How to: handle tool errors](/docs/how_to/tools_error)
|
||||
- [How to: force models to call a tool](/docs/how_to/tool_choice)
|
||||
- [How to: disable parallel tool calling](/docs/how_to/tool_calling_parallel)
|
||||
- [How to: access the `RunnableConfig` from a tool](/docs/how_to/tool_configure)
|
||||
- [How to: stream events from a tool](/docs/how_to/tool_stream_events)
|
||||
- [How to: return artifacts from a tool](/docs/how_to/tool_artifacts/)
|
||||
- [How to: convert Runnables to tools](/docs/how_to/convert_runnable_to_tool)
|
||||
- [How to: add ad-hoc tool calling capability to models](/docs/how_to/tools_prompting)
|
||||
- [How to: pass in runtime secrets](/docs/how_to/runnable_runtime_secrets)
|
||||
|
||||
### Multimodal
|
||||
|
||||
@@ -223,6 +227,7 @@ For in depth how-to guides for agents, please check out [LangGraph](https://lang
|
||||
- [How to: pass callbacks into a module constructor](/docs/how_to/callbacks_constructor)
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: use callbacks in async environments](/docs/how_to/callbacks_async)
|
||||
- [How to: dispatch custom callback events](/docs/how_to/callbacks_custom_events)
|
||||
|
||||
### Custom
|
||||
|
||||
@@ -235,6 +240,7 @@ All of LangChain components can easily be extended to support your own versions.
|
||||
- [How to: write a custom output parser class](/docs/how_to/output_parser_custom)
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: define a custom tool](/docs/how_to/custom_tools)
|
||||
- [How to: dispatch custom callback events](/docs/how_to/callbacks_custom_events)
|
||||
|
||||
### Serialization
|
||||
- [How to: save and load LangChain objects](/docs/how_to/serialization)
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
" * document addition by id (`add_documents` method with `ids` argument)\n",
|
||||
" * delete by id (`delete` method with `ids` argument)\n",
|
||||
"\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SingleStoreDB`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MongoDBAtlasVectorSearch`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SingleStoreDB`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
" \n",
|
||||
"## Caution\n",
|
||||
"\n",
|
||||
|
||||
@@ -15,7 +15,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "25b0b0fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_openai langchain_community\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"# Please manually enter OpenAI Key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "0aa6d335",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -23,13 +39,14 @@
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# To make the caching really obvious, lets use a slower model.\n",
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||
"# To make the caching really obvious, lets use a slower and older model.\n",
|
||||
"# Caching supports newer chat models as well.\n",
|
||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 3,
|
||||
"id": "f168ff0d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -37,17 +54,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 13.7 ms, sys: 6.54 ms, total: 20.2 ms\n",
|
||||
"Wall time: 330 ms\n"
|
||||
"CPU times: user 546 ms, sys: 379 ms, total: 925 ms\n",
|
||||
"Wall time: 1.11 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\""
|
||||
"\"\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -59,12 +76,12 @@
|
||||
"set_llm_cache(InMemoryCache())\n",
|
||||
"\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm.predict(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 4,
|
||||
"id": "ce7620fb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -72,17 +89,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 436 µs, sys: 921 µs, total: 1.36 ms\n",
|
||||
"Wall time: 1.36 ms\n"
|
||||
"CPU times: user 192 µs, sys: 77 µs, total: 269 µs\n",
|
||||
"Wall time: 270 µs\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\""
|
||||
"\"\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -90,7 +107,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time it is, so it goes faster\n",
|
||||
"llm.predict(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -103,7 +120,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 5,
|
||||
"id": "2e65de83",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -113,7 +130,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 6,
|
||||
"id": "0be83715",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -126,7 +143,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 7,
|
||||
"id": "9b427ce7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -134,17 +151,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 29.3 ms, sys: 17.3 ms, total: 46.7 ms\n",
|
||||
"Wall time: 364 ms\n"
|
||||
"CPU times: user 10.6 ms, sys: 4.21 ms, total: 14.8 ms\n",
|
||||
"Wall time: 851 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nWhy did the tomato turn red?\\n\\nBecause it saw the salad dressing!'"
|
||||
"\"\\n\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -152,12 +169,12 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm.predict(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 8,
|
||||
"id": "87f52611",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -165,17 +182,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 4.58 ms, sys: 2.23 ms, total: 6.8 ms\n",
|
||||
"Wall time: 4.68 ms\n"
|
||||
"CPU times: user 59.7 ms, sys: 63.6 ms, total: 123 ms\n",
|
||||
"Wall time: 134 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nWhy did the tomato turn red?\\n\\nBecause it saw the salad dressing!'"
|
||||
"\"\\n\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -183,7 +200,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time it is, so it goes faster\n",
|
||||
"llm.predict(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -211,7 +228,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -63,6 +63,38 @@
|
||||
"Notice that if the contents of one of the messages to merge is a list of content blocks then the merged message will have a list of content blocks. And if both messages to merge have string contents then those are concatenated with a newline character."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "11f7e8d3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `merge_message_runs` utility also works with messages composed together using the overloaded `+` operation:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b51855c5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = (\n",
|
||||
" SystemMessage(\"you're a good assistant.\")\n",
|
||||
" + SystemMessage(\"you always respond with a joke.\")\n",
|
||||
" + HumanMessage([{\"type\": \"text\", \"text\": \"i wonder why it's called langchain\"}])\n",
|
||||
" + HumanMessage(\"and who is harrison chasing anyways\")\n",
|
||||
" + AIMessage(\n",
|
||||
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
|
||||
" )\n",
|
||||
" + AIMessage(\n",
|
||||
" \"Why, he's probably chasing after the last cup of coffee in the office!\"\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"merged = merge_message_runs(messages)\n",
|
||||
"print(\"\\n\\n\".join([repr(x) for x in merged]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b2eee74-71c8-4168-b968-bca580c25d18",
|
||||
|
||||
@@ -284,17 +284,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 1,
|
||||
"id": "173e1a9c-2a18-4669-b0de-136f39197786",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Arr, matey! I be sailin' the high seas with me crew, searchin' for buried treasure and adventure! How be ye doin' on this fine day?\""
|
||||
"\"Arrr, I be doin' well, me heartie! Just sailin' the high seas in search of treasure and adventure. How be ye?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -316,14 +316,20 @@
|
||||
"\n",
|
||||
"history = InMemoryChatMessageHistory()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_history():\n",
|
||||
" return history\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
|
||||
"\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(chain, lambda x: history)\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" get_history,\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"wrapped_chain.invoke(\n",
|
||||
" {\"input\": \"how are you?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"42\"}},\n",
|
||||
")"
|
||||
"wrapped_chain.invoke({\"input\": \"how are you?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -340,17 +346,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 2,
|
||||
"id": "4e05994f-1fbc-4699-bf2e-62cb0e4deeb8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Ahoy there! What be ye wantin' from this old pirate?\", response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 29, 'total_tokens': 44}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-1846d5f5-0dda-43b6-bb49-864e541f9c29-0', usage_metadata={'input_tokens': 29, 'output_tokens': 15, 'total_tokens': 44})"
|
||||
"'Ahoy matey! What can this old pirate do for ye today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -370,9 +376,16 @@
|
||||
"\n",
|
||||
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
|
||||
"\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(chain, get_session_history)\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" get_session_history,\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"wrapped_chain.invoke(\"Hello!\", config={\"configurable\": {\"session_id\": \"abc123\"}})"
|
||||
"wrapped_chain.invoke(\n",
|
||||
" {\"input\": \"Hello!\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -790,7 +803,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
78
docs/docs/how_to/runnable_runtime_secrets.ipynb
Normal file
78
docs/docs/how_to/runnable_runtime_secrets.ipynb
Normal file
@@ -0,0 +1,78 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6fcd2994-0092-4fa3-9bb1-c9c84babadc5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass runtime secrets to runnables\n",
|
||||
"\n",
|
||||
":::info Requires `langchain-core >= 0.2.22`\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"We can pass in secrets to our runnables at runtime using the `RunnableConfig`. Specifically we can pass in secrets with a `__` prefix to the `configurable` field. This will ensure that these secrets aren't traced as part of the invocation:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "92e42e91-c277-49de-aa7a-dfb5c993c817",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"7"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnableConfig\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def foo(x: int, config: RunnableConfig) -> int:\n",
|
||||
" \"\"\"Sum x and a secret int\"\"\"\n",
|
||||
" return x + config[\"configurable\"][\"__top_secret_int\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"foo.invoke({\"x\": 5}, {\"configurable\": {\"__top_secret_int\": 2, \"traced_key\": \"bar\"}})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae3a4fb9-2ce7-46b2-b654-35dff0ae7197",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace for this run, we can see that \"traced_key\" was recorded (as part of Metadata) while our secret int was not: https://smith.langchain.com/public/aa7e3289-49ca-422d-a408-f6b927210170/r"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -452,7 +452,7 @@
|
||||
"source": [
|
||||
"#### Generator Functions\n",
|
||||
"\n",
|
||||
"Le'ts fix the streaming using a generator function that can operate on the **input stream**.\n",
|
||||
"Let's fix the streaming using a generator function that can operate on the **input stream**.\n",
|
||||
"\n",
|
||||
":::{.callout-tip}\n",
|
||||
"A generator function (a function that uses `yield`) allows writing code that operates on **input streams**\n",
|
||||
|
||||
396
docs/docs/how_to/tool_artifacts.ipynb
Normal file
396
docs/docs/how_to/tool_artifacts.ipynb
Normal file
@@ -0,0 +1,396 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "503e36ae-ca62-4f8a-880c-4fe78ff5df93",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to return artifacts from a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [ToolMessage](/docs/concepts/#toolmessage)\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Function/tool calling](/docs/concepts/#functiontool-calling)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Tools are utilities that can be called by a model, and whose outputs are designed to be fed back to a model. Sometimes, however, there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns a custom object, a dataframe or an image, we may want to pass some metadata about this output to the model without passing the actual output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n",
|
||||
"\n",
|
||||
"The Tool and [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-core >= 0.2.19``\n",
|
||||
"\n",
|
||||
"This functionality was added in ``langchain-core == 0.2.19``. Please make sure your package is up to date.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Defining the tool\n",
|
||||
"\n",
|
||||
"If we want our tool to distinguish between message content and other artifacts, we need to specify `response_format=\"content_and_artifact\"` when defining our tool and make sure that we return a tuple of (content, artifact):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "762b9199-885f-4946-9c98-cc54d72b0d76",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU \"langchain-core>=0.2.19\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "b9eb179d-1f41-4748-9866-b3d3e8c73cd0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import random\n",
|
||||
"from typing import List, Tuple\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(response_format=\"content_and_artifact\")\n",
|
||||
"def generate_random_ints(min: int, max: int, size: int) -> Tuple[str, List[int]]:\n",
|
||||
" \"\"\"Generate size random ints in the range [min, max].\"\"\"\n",
|
||||
" array = [random.randint(min, max) for _ in range(size)]\n",
|
||||
" content = f\"Successfully generated array of {size} random ints in [{min}, {max}].\"\n",
|
||||
" return content, array"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ab05d25-af4a-4e5a-afe2-f090416d7ee7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invoking the tool with ToolCall\n",
|
||||
"\n",
|
||||
"If we directly invoke our tool with just the tool arguments, you'll notice that we only get back the content part of the Tool output:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "5e7d5e77-3102-4a59-8ade-e4e699dd1817",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Successfully generated array of 10 random ints in [0, 9].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Failed to batch ingest runs: LangSmithRateLimitError('Rate limit exceeded for https://api.smith.langchain.com/runs/batch. HTTPError(\\'429 Client Error: Too Many Requests for url: https://api.smith.langchain.com/runs/batch\\', \\'{\"detail\":\"Monthly unique traces usage limit exceeded\"}\\')')\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke({\"min\": 0, \"max\": 9, \"size\": 10})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "30db7228-f04c-489e-afda-9a572eaa90a1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In order to get back both the content and the artifact, we need to invoke our model with a ToolCall (which is just a dictionary with \"name\", \"args\", \"id\" and \"type\" keys), which has additional info needed to generate a ToolMessage like the tool call ID:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "da1d939d-a900-4b01-92aa-d19011a6b034",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[2, 8, 0, 6, 0, 0, 1, 5, 0, 0])"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke(\n",
|
||||
" {\n",
|
||||
" \"name\": \"generate_random_ints\",\n",
|
||||
" \"args\": {\"min\": 0, \"max\": 9, \"size\": 10},\n",
|
||||
" \"id\": \"123\", # required\n",
|
||||
" \"type\": \"tool_call\", # required\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a3cfc03d-020b-42c7-b0f8-c824af19e45e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using with a model\n",
|
||||
"\n",
|
||||
"With a [tool-calling model](/docs/how_to/tool_calling/), we can easily use a model to call our Tool and generate ToolMessages:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
"/>\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "74de0286-b003-4b48-9cdd-ecab435515ca",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | echo: false\n",
|
||||
"# | output: false\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-5-sonnet-20240620\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "8a67424b-d19c-43df-ac7b-690bca42146c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'generate_random_ints',\n",
|
||||
" 'args': {'min': 1, 'max': 24, 'size': 6},\n",
|
||||
" 'id': 'toolu_01EtALY3Wz1DVYhv1TLvZGvE',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools = llm.bind_tools([generate_random_ints])\n",
|
||||
"\n",
|
||||
"ai_msg = llm_with_tools.invoke(\"generate 6 positive ints less than 25\")\n",
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "00c4e906-3ca8-41e8-a0be-65cb0db7d574",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Successfully generated array of 6 random ints in [1, 24].', name='generate_random_ints', tool_call_id='toolu_01EtALY3Wz1DVYhv1TLvZGvE', artifact=[2, 20, 23, 8, 1, 15])"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke(ai_msg.tool_calls[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ddef2690-70de-4542-ab20-2337f77f3e46",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we just pass in the tool call args, we'll only get back the content:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "f4a6c9a6-0ffc-4b0e-a59f-f3c3d69d824d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Successfully generated array of 6 random ints in [1, 24].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke(ai_msg.tool_calls[0][\"args\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98d6443b-ff41-4d91-8523-b6274fc74ee5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we wanted to declaratively create a chain, we could do this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "eb55ec23-95a4-464e-b886-d9679bf3aaa2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ToolMessage(content='Successfully generated array of 1 random ints in [1, 5].', name='generate_random_ints', tool_call_id='toolu_01FwYhnkwDPJPbKdGq4ng6uD', artifact=[5])]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from operator import attrgetter\n",
|
||||
"\n",
|
||||
"chain = llm_with_tools | attrgetter(\"tool_calls\") | generate_random_ints.map()\n",
|
||||
"\n",
|
||||
"chain.invoke(\"give me a random number between 1 and 5\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4df46be2-babb-4bfe-a641-91cd3d03ffaf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating from BaseTool class\n",
|
||||
"\n",
|
||||
"If you want to create a BaseTool object directly, instead of decorating a function with `@tool`, you can do so like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "9a9129e1-6aee-4a10-ad57-62ef3bf0276c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GenerateRandomFloats(BaseTool):\n",
|
||||
" name: str = \"generate_random_floats\"\n",
|
||||
" description: str = \"Generate size random floats in the range [min, max].\"\n",
|
||||
" response_format: str = \"content_and_artifact\"\n",
|
||||
"\n",
|
||||
" ndigits: int = 2\n",
|
||||
"\n",
|
||||
" def _run(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
|
||||
" range_ = max - min\n",
|
||||
" array = [\n",
|
||||
" round(min + (range_ * random.random()), ndigits=self.ndigits)\n",
|
||||
" for _ in range(size)\n",
|
||||
" ]\n",
|
||||
" content = f\"Generated {size} floats in [{min}, {max}], rounded to {self.ndigits} decimals.\"\n",
|
||||
" return content, array\n",
|
||||
"\n",
|
||||
" # Optionally define an equivalent async method\n",
|
||||
"\n",
|
||||
" # async def _arun(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
|
||||
" # ..."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "d7322619-f420-4b29-8ee5-023e693d0179",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rand_gen = GenerateRandomFloats(ndigits=4)\n",
|
||||
"rand_gen.invoke({\"min\": 0.1, \"max\": 3.3333, \"size\": 3})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "0892f277-23a6-4bb8-a0e9-59f533ac9750",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.5789, 2.464, 2.2719])"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rand_gen.invoke(\n",
|
||||
" {\n",
|
||||
" \"name\": \"generate_random_floats\",\n",
|
||||
" \"args\": {\"min\": 0.1, \"max\": 3.3333, \"size\": 3},\n",
|
||||
" \"id\": \"123\",\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -17,7 +17,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use a model to call tools\n",
|
||||
"# How to use chat models to call tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -82,30 +82,24 @@
|
||||
"## Passing tools to chat models\n",
|
||||
"\n",
|
||||
"Chat models that support tool calling features implement a `.bind_tools` method, which \n",
|
||||
"receives a list of LangChain [tool objects](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
|
||||
"receives a list of functions, Pydantic models, or LangChain [tool objects](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
|
||||
"and binds them to the chat model in its expected format. Subsequent invocations of the \n",
|
||||
"chat model will include tool schemas in its calls to the LLM.\n",
|
||||
"\n",
|
||||
"For example, we can define the schema for custom tools using the `@tool` decorator \n",
|
||||
"on Python functions:"
|
||||
"For example, below we implement simple tools for arithmetic:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
@@ -118,12 +112,14 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Or below, we define the schema using [Pydantic](https://docs.pydantic.dev):"
|
||||
"LangChain also implements a `@tool` decorator that allows for further control of the tool schema, such as tool names and argument descriptions. See the how-to guide [here](/docs/how_to/custom_tools/#creating-tools-from-functions) for detail.\n",
|
||||
"\n",
|
||||
"We can also define the schema using [Pydantic](https://docs.pydantic.dev):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -343,7 +339,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -4,7 +4,13 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Disabling parallel tool calling (OpenAI only)\n",
|
||||
"# How to disable parallel tool calling\n",
|
||||
"\n",
|
||||
":::info OpenAI-specific\n",
|
||||
"\n",
|
||||
"This API is currently only supported by OpenAI.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"OpenAI tool calling performs tool calling in parallel by default. That means that if we ask a question like \"What is the weather in Tokyo, New York, and Chicago?\" and we have a tool for getting the weather, it will call the tool 3 times in parallel. We can force it to call only a single tool once by using the ``parallel_tool_call`` parameter."
|
||||
]
|
||||
@@ -99,10 +105,24 @@
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to force tool calling behavior\n",
|
||||
"# How to force models to call a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -125,10 +125,24 @@
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
132
docs/docs/how_to/tool_configure.ipynb
Normal file
132
docs/docs/how_to/tool_configure.ipynb
Normal file
@@ -0,0 +1,132 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to access the RunnableConfig from a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [Custom tools](/docs/how_to/custom_tools)\n",
|
||||
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language-lcel)\n",
|
||||
"- [Configuring runnable behavior](/docs/how_to/configure/)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you have a tool that call chat models, retrievers, or other runnables, you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n",
|
||||
"\n",
|
||||
"Tools are runnables, and you can treat them the same way as any other runnable at the interface level - you can call `invoke()`, `batch()`, and `stream()` on them as normal. However, when writing custom tools, you may want to invoke other runnables like chat models or retrievers. In order to properly trace and configure those sub-invocations, you'll need to manually access and pass in the tool's current [`RunnableConfig`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html) object. This guide show you some examples of how to do that.\n",
|
||||
"\n",
|
||||
":::caution Compatibility\n",
|
||||
"\n",
|
||||
"This guide requires `langchain-core>=0.2.16`.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Inferring by parameter type\n",
|
||||
"\n",
|
||||
"To access reference the active config object from your custom tool, you'll need to add a parameter to your tool's signature typed as `RunnableConfig`. When you invoke your tool, LangChain will inspect your tool's signature, look for a parameter typed as `RunnableConfig`, and if it exists, populate that parameter with the correct value.\n",
|
||||
"\n",
|
||||
"**Note:** The actual name of the parameter doesn't matter, only the typing.\n",
|
||||
"\n",
|
||||
"To illustrate this, define a custom tool that takes a two parameters - one typed as a string, the other typed as `RunnableConfig`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_core"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnableConfig\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"async def reverse_tool(text: str, special_config_param: RunnableConfig) -> str:\n",
|
||||
" \"\"\"A test tool that combines input text with a configurable parameter.\"\"\"\n",
|
||||
" return (text + special_config_param[\"configurable\"][\"additional_field\"])[::-1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then, if we invoke the tool with a `config` containing a `configurable` field, we can see that `additional_field` is passed through correctly:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'321cba'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await reverse_tool.ainvoke(\n",
|
||||
" {\"text\": \"abc\"}, config={\"configurable\": {\"additional_field\": \"123\"}}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now seen how to configure and stream events from within a tool. Next, check out the following guides for more on using tools:\n",
|
||||
"\n",
|
||||
"- [Stream events from child runs within a custom tool](/docs/how_to/tool_stream_events/)\n",
|
||||
"- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n",
|
||||
"\n",
|
||||
"You can also check out some more specific uses of tool calling:\n",
|
||||
"\n",
|
||||
"- Building [tool-using chains and agents](/docs/how_to#tools)\n",
|
||||
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -4,14 +4,22 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass tool outputs to the model\n",
|
||||
"# How to pass tool outputs to chat models\n",
|
||||
"\n",
|
||||
"If we're using the model-generated tool invocations to actually call tools and want to pass the tool results back to the model, we can do so using `ToolMessage`s. First, let's define our tools and our model."
|
||||
":::info Prerequisites\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Function/tool calling](/docs/concepts/#functiontool-calling)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If we're using the model-generated tool invocations to actually call tools and want to pass the tool results back to the model, we can do so using `ToolMessage`s and `ToolCall`s. First, let's define our tools and our model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -35,7 +43,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -54,25 +62,32 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can use ``ToolMessage`` to pass back the output of the tool calls to the model."
|
||||
"The nice thing about Tools is that if we invoke them with a ToolCall, we'll automatically get back a ToolMessage that can be fed back to the model: \n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-core >= 0.2.19``\n",
|
||||
"\n",
|
||||
"This functionality was added in ``langchain-core == 0.2.19``. Please make sure your package is up to date.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='What is 3 * 12? Also, what is 11 + 49?'),\n",
|
||||
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_svc2GLSxNFALbaCAbSjMI9J8', 'function': {'arguments': '{\"a\": 3, \"b\": 12}', 'name': 'Multiply'}, 'type': 'function'}, {'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh', 'function': {'arguments': '{\"a\": 11, \"b\": 49}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 50, 'prompt_tokens': 105, 'total_tokens': 155}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-a79ad1dd-95f1-4a46-b688-4c83f327a7b3-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_svc2GLSxNFALbaCAbSjMI9J8'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh'}]),\n",
|
||||
" ToolMessage(content='36', tool_call_id='call_svc2GLSxNFALbaCAbSjMI9J8'),\n",
|
||||
" ToolMessage(content='60', tool_call_id='call_r8jxte3zW6h3MEGV3zH2qzFh')]"
|
||||
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Smg3NHJNxrKfAmd4f9GkaYn3', 'function': {'arguments': '{\"a\": 3, \"b\": 12}', 'name': 'multiply'}, 'type': 'function'}, {'id': 'call_55K1C0DmH6U5qh810gW34xZ0', 'function': {'arguments': '{\"a\": 11, \"b\": 49}', 'name': 'add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 49, 'prompt_tokens': 88, 'total_tokens': 137}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-56657feb-96dd-456c-ab8e-1857eab2ade0-0', tool_calls=[{'name': 'multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_Smg3NHJNxrKfAmd4f9GkaYn3', 'type': 'tool_call'}, {'name': 'add', 'args': {'a': 11, 'b': 49}, 'id': 'call_55K1C0DmH6U5qh810gW34xZ0', 'type': 'tool_call'}], usage_metadata={'input_tokens': 88, 'output_tokens': 49, 'total_tokens': 137}),\n",
|
||||
" ToolMessage(content='36', name='multiply', tool_call_id='call_Smg3NHJNxrKfAmd4f9GkaYn3'),\n",
|
||||
" ToolMessage(content='60', name='add', tool_call_id='call_55K1C0DmH6U5qh810gW34xZ0')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
@@ -85,24 +100,25 @@
|
||||
"messages.append(ai_msg)\n",
|
||||
"for tool_call in ai_msg.tool_calls:\n",
|
||||
" selected_tool = {\"add\": add, \"multiply\": multiply}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
|
||||
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))\n",
|
||||
" tool_msg = selected_tool.invoke(tool_call)\n",
|
||||
" messages.append(tool_msg)\n",
|
||||
"messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='3 * 12 is 36 and 11 + 49 is 60.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 171, 'total_tokens': 189}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'stop', 'logprobs': None}, id='run-20b52149-e00d-48ea-97cf-f8de7a255f8c-0')"
|
||||
"AIMessage(content='3 * 12 is 36 and 11 + 49 is 60.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 153, 'total_tokens': 171}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-ba5032f0-f773-406d-a408-8314e66511d0-0', usage_metadata={'input_tokens': 153, 'output_tokens': 18, 'total_tokens': 171})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
@@ -118,10 +134,24 @@
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass run time values to a tool\n",
|
||||
"# How to pass run time values to tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -15,26 +15,25 @@
|
||||
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Supported models\n",
|
||||
"\n",
|
||||
"This how-to guide uses models with native tool calling capability.\n",
|
||||
"You can find a [list of all models that support tool calling](/docs/integrations/chat/).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Using with LangGraph\n",
|
||||
":::info Using with LangGraph\n",
|
||||
"\n",
|
||||
"If you're using LangGraph, please refer to [this how-to guide](https://langchain-ai.github.io/langgraph/how-tos/pass-run-time-values-to-tools/)\n",
|
||||
"which shows how to create an agent that keeps track of a given user's favorite pets.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::caution Added in `langchain-core==0.2.21`\n",
|
||||
"\n",
|
||||
"Must have `langchain-core>=0.2.21` to use this functionality.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n",
|
||||
"\n",
|
||||
"Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.\n",
|
||||
"\n",
|
||||
"Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic.\n",
|
||||
"\n",
|
||||
"This how-to guide shows a simple design pattern that creates the tool dynamically at run time and binds to them appropriate values."
|
||||
"This how-to guide shows you how to prevent the model from generating certain tool arguments and injecting them in directly at runtime."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -57,23 +56,12 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"# %pip install -qU langchain langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
@@ -90,10 +78,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Passing request time information\n",
|
||||
"## Hiding arguments from the model\n",
|
||||
"\n",
|
||||
"The idea is to create the tool dynamically at request time, and bind to it the appropriate information. For example,\n",
|
||||
"this information may be the user ID as resolved from the request itself."
|
||||
"We can use the InjectedToolArg annotation to mark certain parameters of our Tool, like `user_id` as being injected at runtime, meaning they shouldn't be generated by the model"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -104,46 +91,88 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"from langchain_core.tools import BaseTool, tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import InjectedToolArg, tool\n",
|
||||
"from typing_extensions import Annotated\n",
|
||||
"\n",
|
||||
"user_to_pets = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_tools_for_user(user_id: str) -> List[BaseTool]:\n",
|
||||
" \"\"\"Generate a set of tools that have a user id associated with them.\"\"\"\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def update_favorite_pets(\n",
|
||||
" pets: List[str], user_id: Annotated[str, InjectedToolArg]\n",
|
||||
") -> None:\n",
|
||||
" \"\"\"Add the list of favorite pets.\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def update_favorite_pets(pets: List[str]) -> None:\n",
|
||||
" \"\"\"Add the list of favorite pets.\"\"\"\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
" Args:\n",
|
||||
" pets: List of favorite pets to set.\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def delete_favorite_pets() -> None:\n",
|
||||
" \"\"\"Delete the list of favorite pets.\"\"\"\n",
|
||||
" if user_id in user_to_pets:\n",
|
||||
" del user_to_pets[user_id]\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def list_favorite_pets() -> None:\n",
|
||||
" \"\"\"List favorite pets if any.\"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def delete_favorite_pets(user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" \"\"\"Delete the list of favorite pets.\n",
|
||||
"\n",
|
||||
" return [update_favorite_pets, delete_favorite_pets, list_favorite_pets]"
|
||||
" Args:\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" if user_id in user_to_pets:\n",
|
||||
" del user_to_pets[user_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def list_favorite_pets(user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" \"\"\"List favorite pets if any.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Verify that the tools work correctly"
|
||||
"If we look at the input schemas for these tools, we'll see that user_id is still listed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_petsSchema',\n",
|
||||
" 'description': 'Add the list of favorite pets.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_favorite_pets.get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"But if we look at the tool call schema, which is what is passed to the model for tool-calling, user_id has been removed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -152,46 +181,60 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'eugene': ['cat', 'dog']}\n",
|
||||
"['cat', 'dog']\n"
|
||||
]
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Add the list of favorite pets.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_pets, delete_pets, list_pets = generate_tools_for_user(\"eugene\")\n",
|
||||
"update_pets.invoke({\"pets\": [\"cat\", \"dog\"]})\n",
|
||||
"print(user_to_pets)\n",
|
||||
"print(list_pets.invoke({}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_run_time_request(user_id: str, query: str):\n",
|
||||
" \"\"\"Handle run time request.\"\"\"\n",
|
||||
" tools = generate_tools_for_user(user_id)\n",
|
||||
" llm_with_tools = llm.bind_tools(tools)\n",
|
||||
" prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", \"You are a helpful assistant.\")],\n",
|
||||
" )\n",
|
||||
" chain = prompt | llm_with_tools\n",
|
||||
" return llm_with_tools.invoke(query)"
|
||||
"update_favorite_pets.tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This code will allow the LLM to invoke the tools, but the LLM is **unaware** of the fact that a **user ID** even exists!"
|
||||
"So when we invoke our tool, we need to pass in user_id:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'123': ['lizard', 'dog']}\n",
|
||||
"['lizard', 'dog']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"user_id = \"123\"\n",
|
||||
"update_favorite_pets.invoke({\"pets\": [\"lizard\", \"dog\"], \"user_id\": user_id})\n",
|
||||
"print(user_to_pets)\n",
|
||||
"print(list_favorite_pets.invoke({\"user_id\": user_id}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"But when the model calls the tool, no user_id argument will be generated:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -204,7 +247,8 @@
|
||||
"text/plain": [
|
||||
"[{'name': 'update_favorite_pets',\n",
|
||||
" 'args': {'pets': ['cats', 'parrots']},\n",
|
||||
" 'id': 'call_jJvjPXsNbFO5MMgW0q84iqCN'}]"
|
||||
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -213,30 +257,349 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_message = handle_run_time_request(\n",
|
||||
" \"eugene\", \"my favorite animals are cats and parrots.\"\n",
|
||||
")\n",
|
||||
"ai_message.tool_calls"
|
||||
"tools = [\n",
|
||||
" update_favorite_pets,\n",
|
||||
" delete_favorite_pets,\n",
|
||||
" list_favorite_pets,\n",
|
||||
"]\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)\n",
|
||||
"ai_msg = llm_with_tools.invoke(\"my favorite animals are cats and parrots\")\n",
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::{.callout-important}\n",
|
||||
"## Injecting arguments at runtime"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want to actually execute our tools using the model-generated tool call, we'll need to inject the user_id ourselves:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'update_favorite_pets',\n",
|
||||
" 'args': {'pets': ['cats', 'parrots'], 'user_id': '123'},\n",
|
||||
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from copy import deepcopy\n",
|
||||
"\n",
|
||||
"Chat models only output requests to invoke tools, they don't actually invoke the underlying tools.\n",
|
||||
"from langchain_core.runnables import chain\n",
|
||||
"\n",
|
||||
"To see how to invoke the tools, please refer to [how to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling).\n",
|
||||
":::"
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def inject_user_id(ai_msg):\n",
|
||||
" tool_calls = []\n",
|
||||
" for tool_call in ai_msg.tool_calls:\n",
|
||||
" tool_call_copy = deepcopy(tool_call)\n",
|
||||
" tool_call_copy[\"args\"][\"user_id\"] = user_id\n",
|
||||
" tool_calls.append(tool_call_copy)\n",
|
||||
" return tool_calls\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"inject_user_id.invoke(ai_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And now we can chain together our model, injection code, and the actual tools to create a tool-executing chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ToolMessage(content='null', name='update_favorite_pets', tool_call_id='call_HUyF6AihqANzEYxQnTUKxkXj')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tool_map = {tool.name: tool for tool in tools}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def tool_router(tool_call):\n",
|
||||
" return tool_map[tool_call[\"name\"]]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = llm_with_tools | inject_user_id | tool_router.map()\n",
|
||||
"chain.invoke(\"my favorite animals are cats and parrots\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the user_to_pets dict, we can see that it's been updated to include cats and parrots:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'123': ['cats', 'parrots']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"user_to_pets"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Other ways of annotating args\n",
|
||||
"\n",
|
||||
"Here are a few other ways of annotating our tool args:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'UpdateFavoritePetsSchema',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class UpdateFavoritePetsSchema(BaseModel):\n",
|
||||
" \"\"\"Update list of favorite pets\"\"\"\n",
|
||||
"\n",
|
||||
" pets: List[str] = Field(..., description=\"List of favorite pets to set.\")\n",
|
||||
" user_id: Annotated[str, InjectedToolArg] = Field(..., description=\"User's ID.\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(args_schema=UpdateFavoritePetsSchema)\n",
|
||||
"def update_favorite_pets(pets, user_id):\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"update_favorite_pets.get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_favorite_pets.tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'UpdateFavoritePetsSchema',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class UpdateFavoritePets(BaseTool):\n",
|
||||
" name: str = \"update_favorite_pets\"\n",
|
||||
" description: str = \"Update list of favorite pets\"\n",
|
||||
" args_schema: Optional[Type[BaseModel]] = UpdateFavoritePetsSchema\n",
|
||||
"\n",
|
||||
" def _run(self, pets, user_id):\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"UpdateFavoritePets().get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"UpdateFavoritePets().tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_petsSchema',\n",
|
||||
" 'description': 'Use the tool.\\n\\nAdd run_manager: Optional[CallbackManagerForToolRun] = None\\nto child implementations to enable tracing.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id', 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"class UpdateFavoritePets2(BaseTool):\n",
|
||||
" name: str = \"update_favorite_pets\"\n",
|
||||
" description: str = \"Update list of favorite pets\"\n",
|
||||
"\n",
|
||||
" def _run(self, pets: List[str], user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"UpdateFavoritePets2().get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"UpdateFavoritePets2().tool_call_schema.schema()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -248,7 +611,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -4,25 +4,32 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to stream events from within a tool\n",
|
||||
"# How to stream events from a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [Custom tools](/docs/how_to/custom_tools)\n",
|
||||
"- [Using stream events](/docs/how_to/streaming/#using-stream-events)\n",
|
||||
"- [Accessing RunnableConfig within a custom tool](/docs/how_to/tool_configure/)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you have tools that call LLMs, retrievers, or other runnables, you may want to access internal events from those runnables. This guide shows you a few ways you can do this using the `astream_events()` method.\n",
|
||||
"If you have tools that call chat models, retrievers, or other runnables, you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"LangChain cannot automatically propagate callbacks to child runnables if you are running async code in python<=3.10.\n",
|
||||
" \n",
|
||||
"This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
":::caution Compatibility\n",
|
||||
"\n",
|
||||
"LangChain cannot automatically propagate configuration, including callbacks necessary for `astream_events()`, to child runnables if you are running `async` code in `python<=3.10`. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
"\n",
|
||||
"If you are running python<=3.10, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
|
||||
"\n",
|
||||
"If you are running python>=3.11, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in older Python versions.\n",
|
||||
"\n",
|
||||
"This guide also requires `langchain-core>=0.2.16`.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"We'll define a custom tool below that calls a chain that summarizes its input in a special way by prompting an LLM to return only 10 words, then reversing the output:\n",
|
||||
"Say you have a custom tool that calls a chain that condenses its input by prompting a chat model to return only 10 words, then reversing the output. First, define it in a naive way:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
@@ -40,7 +47,7 @@
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"%pip install -qU langchain langchain_anthropic langchain_core\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
@@ -65,7 +72,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def special_summarization_tool(long_text: str) -> str:\n",
|
||||
"async def special_summarization_tool(long_text: str) -> str:\n",
|
||||
" \"\"\"A tool that summarizes input text using advanced techniques.\"\"\"\n",
|
||||
" prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n",
|
||||
@@ -75,7 +82,7 @@
|
||||
" return x[::-1]\n",
|
||||
"\n",
|
||||
" chain = prompt | model | StrOutputParser() | reverse\n",
|
||||
" summary = chain.invoke({\"long_text\": long_text})\n",
|
||||
" summary = await chain.ainvoke({\"long_text\": long_text})\n",
|
||||
" return summary"
|
||||
]
|
||||
},
|
||||
@@ -83,7 +90,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you just invoke the tool directly, you can see that you only get the final response:"
|
||||
"Invoking the tool directly works just fine:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -116,31 +123,90 @@
|
||||
"Coming! Hang on a second.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"special_summarization_tool.invoke({\"long_text\": LONG_TEXT})"
|
||||
"await special_summarization_tool.ainvoke({\"long_text\": LONG_TEXT})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you wanted to access the raw output from the chat model, you could use the [`astream_events()`](/docs/how_to/streaming/#using-stream-events) method and look for `on_chat_model_end` events:"
|
||||
"But if you wanted to access the raw output from the chat model rather than the full tool, you might try to use the [`astream_events()`](/docs/how_to/streaming/#using-stream-events) method and look for an `on_chat_model_end` event. Here's what happens:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"stream = special_summarization_tool.astream_events(\n",
|
||||
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"async for event in stream:\n",
|
||||
" if event[\"event\"] == \"on_chat_model_end\":\n",
|
||||
" # Never triggers in python<=3.10!\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You'll notice (unless you're running through this guide in `python>=3.11`) that there are no chat model events emitted from the child run!\n",
|
||||
"\n",
|
||||
"This is because the example above does not pass the tool's config object into the internal chain. To fix this, redefine your tool to take a special parameter typed as `RunnableConfig` (see [this guide](/docs/how_to/tool_configure) for more details). You'll also need to pass that parameter through into the internal chain when executing it:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"async def special_summarization_tool_with_config(\n",
|
||||
" long_text: str, config: RunnableConfig\n",
|
||||
") -> str:\n",
|
||||
" \"\"\"A tool that summarizes input text using advanced techniques.\"\"\"\n",
|
||||
" prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" def reverse(x: str):\n",
|
||||
" return x[::-1]\n",
|
||||
"\n",
|
||||
" chain = prompt | model | StrOutputParser() | reverse\n",
|
||||
" # Pass the \"config\" object as an argument to any executed runnables\n",
|
||||
" summary = await chain.ainvoke({\"long_text\": long_text}, config=config)\n",
|
||||
" return summary"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And now try the same `astream_events()` call as before with your new tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chat_model_end', 'data': {'output': AIMessage(content='Bee defies physics; Barry chooses outfit for graduation day.', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-195c0986-2ffa-43a3-9366-f2f96c42fe57', usage_metadata={'input_tokens': 182, 'output_tokens': 16, 'total_tokens': 198}), 'input': {'messages': [[HumanMessage(content=\"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n\\nNARRATOR:\\n(Black screen with text; The sound of buzzing bees can be heard)\\nAccording to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\\nBARRY BENSON:\\n(Barry is picking out a shirt)\\nYellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\\nJANET BENSON:\\nBarry! Breakfast is ready!\\nBARRY:\\nComing! Hang on a second.\\n\")]]}}, 'run_id': '195c0986-2ffa-43a3-9366-f2f96c42fe57', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['370919df-1bc3-43ae-aab2-8e112a4ddf47', 'de535624-278b-4927-9393-6d0cac3248df']}\n"
|
||||
"{'event': 'on_chat_model_end', 'data': {'output': AIMessage(content='Bee defies physics; Barry chooses outfit for graduation day.', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-d23abc80-0dce-4f74-9d7b-fb98ca4f2a9e', usage_metadata={'input_tokens': 182, 'output_tokens': 16, 'total_tokens': 198}), 'input': {'messages': [[HumanMessage(content=\"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n\\nNARRATOR:\\n(Black screen with text; The sound of buzzing bees can be heard)\\nAccording to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\\nBARRY BENSON:\\n(Barry is picking out a shirt)\\nYellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\\nJANET BENSON:\\nBarry! Breakfast is ready!\\nBARRY:\\nComing! Hang on a second.\\n\")]]}}, 'run_id': 'd23abc80-0dce-4f74-9d7b-fb98ca4f2a9e', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['f25c41fe-8972-4893-bc40-cecf3922c1fa']}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"stream = special_summarization_tool.astream_events(\n",
|
||||
"stream = special_summarization_tool_with_config.astream_events(\n",
|
||||
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
@@ -153,38 +219,38 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And you can see that you get the raw response from the chat model.\n",
|
||||
"Awesome! This time there's an event emitted.\n",
|
||||
"\n",
|
||||
"`astream_events()` will automatically call internal runnables in a chain with streaming enabled if possible, so if you wanted to a stream of tokens as they are generated from the chat model, you could simply filter our calls to look for `on_chat_model_stream` events with no other changes:"
|
||||
"For streaming, `astream_events()` automatically calls internal runnables in a chain with streaming enabled if possible, so if you wanted to a stream of tokens as they are generated from the chat model, you could simply filter to look for `on_chat_model_stream` events with no other changes:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', usage_metadata={'input_tokens': 182, 'output_tokens': 0, 'total_tokens': 182})}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='Bee', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' def', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='ies physics', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=';', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' Barry', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' cho', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='oses outfit', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' for', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' graduation', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' day', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='.', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', usage_metadata={'input_tokens': 0, 'output_tokens': 16, 'total_tokens': 16})}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n"
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42', usage_metadata={'input_tokens': 182, 'output_tokens': 0, 'total_tokens': 182})}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='Bee', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' def', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='ies physics', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=';', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' Barry', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' cho', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='oses outfit', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' for', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' graduation', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' day', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='.', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42', usage_metadata={'input_tokens': 0, 'output_tokens': 16, 'total_tokens': 16})}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"stream = special_summarization_tool.astream_events(\n",
|
||||
"stream = special_summarization_tool_with_config.astream_events(\n",
|
||||
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
@@ -193,67 +259,17 @@
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that you still have access to the final tool response as well. You can access it by looking for an `on_tool_end` event.\n",
|
||||
"\n",
|
||||
"To make events your tool emits easier to identify, you can also add identifiers to runnables using the `with_config()` method. `run_name` will apply to only to the runnable you attach it to, while `tags` will be inherited by runnables called within your initial runnable.\n",
|
||||
"\n",
|
||||
"Let's redeclare the tool with a tag, then run it with `astream_events()` with some filters. You should only see streamed events from the chat model and the final tool output:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630', usage_metadata={'input_tokens': 182, 'output_tokens': 0, 'total_tokens': 182})}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='Bee', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' def', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='ies physics', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=';', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' Barry', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' cho', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='oses outfit', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' for', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' graduation', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' day', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='.', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630', usage_metadata={'input_tokens': 0, 'output_tokens': 16, 'total_tokens': 16})}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_tool_end', 'data': {'output': '.yad noitaudarg rof tiftuo sesoohc yrraB ;scisyhp seifed eeB'}, 'run_id': '49d9d7d3-2b02-4964-a6c5-12f57a063146', 'name': 'special_summarization_tool', 'tags': ['bee_movie'], 'metadata': {}, 'parent_ids': []}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tagged_tool = special_summarization_tool.with_config({\"tags\": [\"bee_movie\"]})\n",
|
||||
"\n",
|
||||
"stream = tagged_tool.astream_events(\n",
|
||||
" {\"long_text\": LONG_TEXT}, version=\"v2\", include_tags=[\"bee_movie\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"async for event in stream:\n",
|
||||
" event_type = event[\"event\"]\n",
|
||||
" if event_type == \"on_chat_model_stream\" or event_type == \"on_tool_end\":\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"Now you've learned how to stream events from within a tool. Next, you can learn more about how to use tools:\n",
|
||||
"You've now seen how to stream events from within a tool. Next, check out the following guides for more on using tools:\n",
|
||||
"\n",
|
||||
"- Bind [model-specific tools](/docs/how_to/tools_model_specific/)\n",
|
||||
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
|
||||
"- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n",
|
||||
"- [Dispatch custom callback events](/docs/how_to/callbacks_custom_events)\n",
|
||||
"\n",
|
||||
"You can also check out some more specific uses of tool calling:\n",
|
||||
"\n",
|
||||
@@ -264,7 +280,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -278,9 +294,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -228,7 +228,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -419,13 +419,13 @@
|
||||
"Invoking: `exponentiate` with `{'base': 405, 'exponent': 2}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[38;5;200m\u001b[1;3m164025\u001b[0m\u001b[32;1m\u001b[1;3mThe result of taking 3 to the fifth power is 243. \n",
|
||||
"\u001b[0m\u001b[38;5;200m\u001b[1;3m13286025\u001b[0m\u001b[32;1m\u001b[1;3mThe result of taking 3 to the fifth power is 243. \n",
|
||||
"\n",
|
||||
"The sum of twelve and three is 15. \n",
|
||||
"\n",
|
||||
"Multiplying 243 by 15 gives 3645. \n",
|
||||
"\n",
|
||||
"Finally, squaring 3645 gives 164025.\u001b[0m\n",
|
||||
"Finally, squaring 3645 gives 13286025.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -434,7 +434,7 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'Take 3 to the fifth power and multiply that by the sum of twelve and three, then square the whole result',\n",
|
||||
" 'output': 'The result of taking 3 to the fifth power is 243. \\n\\nThe sum of twelve and three is 15. \\n\\nMultiplying 243 by 15 gives 3645. \\n\\nFinally, squaring 3645 gives 164025.'}"
|
||||
" 'output': 'The result of taking 3 to the fifth power is 243. \\n\\nThe sum of twelve and three is 15. \\n\\nMultiplying 243 by 15 gives 3645. \\n\\nFinally, squaring 3645 gives 13286025.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
|
||||
@@ -540,7 +540,7 @@
|
||||
"id": "137662a6"
|
||||
},
|
||||
"source": [
|
||||
"## Example usage within a Conversation Chains"
|
||||
"## Example usage within RunnableWithMessageHistory "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -550,7 +550,7 @@
|
||||
"id": "79efa62d"
|
||||
},
|
||||
"source": [
|
||||
"Like any other integration, ChatNVIDIA is fine to support chat utilities like conversation buffers by default. Below, we show the [LangChain ConversationBufferMemory](https://python.langchain.com/docs/modules/memory/types/buffer) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model."
|
||||
"Like any other integration, ChatNVIDIA is fine to support chat utilities like RunnableWithMessageHistory which is analogous to using `ConversationChain`. Below, we show the [LangChain RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -572,8 +572,19 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain_core.chat_history import InMemoryChatMessageHistory\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"\n",
|
||||
"# store is a dictionary that maps session IDs to their corresponding chat histories.\n",
|
||||
"store = {} # memory is maintained outside the chain\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# A function that returns the chat history for a given session ID.\n",
|
||||
"def get_session_history(session_id: str) -> InMemoryChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = InMemoryChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chat = ChatNVIDIA(\n",
|
||||
" model=\"mistralai/mixtral-8x22b-instruct-v0.1\",\n",
|
||||
@@ -582,24 +593,18 @@
|
||||
" top_p=1.0,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation = ConversationChain(llm=chat, memory=ConversationBufferMemory())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f644ff28",
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 268
|
||||
},
|
||||
"id": "f644ff28",
|
||||
"outputId": "bae354cc-2118-4e01-ce20-a717ac94d27d"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation.invoke(\"Hi there!\")[\"response\"]"
|
||||
"# Define a RunnableConfig object, with a `configurable` key. session_id determines thread\n",
|
||||
"config = {\"configurable\": {\"session_id\": \"1\"}}\n",
|
||||
"\n",
|
||||
"conversation = RunnableWithMessageHistory(\n",
|
||||
" chat,\n",
|
||||
" get_session_history,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation.invoke(\n",
|
||||
" \"Hi I'm Srijan Dubey.\", # input or query\n",
|
||||
" config=config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -616,26 +621,30 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation.invoke(\"I'm doing well! Just having a conversation with an AI.\")[\n",
|
||||
" \"response\"\n",
|
||||
"]"
|
||||
"conversation.invoke(\n",
|
||||
" \"I'm doing well! Just having a conversation with an AI.\",\n",
|
||||
" config=config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "LyD1xVKmVSs4",
|
||||
"id": "uHIMZxVSVNBC",
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 350
|
||||
"height": 284
|
||||
},
|
||||
"id": "LyD1xVKmVSs4",
|
||||
"outputId": "a1714513-a8fd-4d14-f974-233e39d5c4f5"
|
||||
"id": "uHIMZxVSVNBC",
|
||||
"outputId": "79acc89d-a820-4f2c-bac2-afe99da95580"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation.invoke(\"Tell me about yourself.\")[\"response\"]"
|
||||
"conversation.invoke(\n",
|
||||
" \"Tell me about yourself.\",\n",
|
||||
" config=config,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
@@ -11,6 +12,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatOllama\n",
|
||||
@@ -23,6 +25,18 @@
|
||||
"\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/jmorganca/ollama#model-library).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/ollama) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatOllama](https://api.python.langchain.com/en/latest/chat_models/langchain_ollama.chat_models.ChatOllama.html) | [langchain-ollama](https://api.python.langchain.com/en/latest/ollama_api_reference.html) | ✅ | ❌ | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
|
||||
@@ -40,307 +54,285 @@
|
||||
"* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n",
|
||||
"* To view all pulled models, use `ollama list`\n",
|
||||
"* To chat directly with a model from the command line, use `ollama run <name-of-model>`\n",
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"\n",
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain.llms.ollama.Ollama.html).\n",
|
||||
"\n",
|
||||
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` interface.\n",
|
||||
"\n",
|
||||
"This includes [special tokens](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) for system message and user input.\n",
|
||||
"\n",
|
||||
"## Interacting with Models \n",
|
||||
"\n",
|
||||
"Here are a few ways to interact with pulled local models\n",
|
||||
"\n",
|
||||
"#### In the terminal:\n",
|
||||
"\n",
|
||||
"* All of your local models are automatically served on `localhost:11434`\n",
|
||||
"* Run `ollama run <name-of-model>` to start interacting via the command line directly\n",
|
||||
"\n",
|
||||
"#### Via an API\n",
|
||||
"\n",
|
||||
"Send an `application/json` request to the API endpoint of Ollama to interact.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"curl http://localhost:11434/api/generate -d '{\n",
|
||||
" \"model\": \"llama3\",\n",
|
||||
" \"prompt\":\"Why is the sky blue?\"\n",
|
||||
"}'\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"See the Ollama [API documentation](https://github.com/jmorganca/ollama/blob/main/docs/api.md) for all endpoints.\n",
|
||||
"\n",
|
||||
"#### Via LangChain\n",
|
||||
"\n",
|
||||
"See a typical basic example of using Ollama via the `ChatOllama` chat model in your LangChain application. \n",
|
||||
"\n",
|
||||
"View the [API Reference for ChatOllama](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.ollama.ChatOllama.html#langchain_community.chat_models.ollama.ChatOllama) for more."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Why did the astronaut break up with his girlfriend?\n",
|
||||
"\n",
|
||||
"Because he needed space!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# LangChain supports many other chat models. Here, we're using Ollama\n",
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"# supports many more optional parameters. Hover on your `ChatOllama(...)`\n",
|
||||
"# class to view the latest available supported parameters\n",
|
||||
"llm = ChatOllama(model=\"llama3\")\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"Tell me a short joke about {topic}\")\n",
|
||||
"\n",
|
||||
"# using LangChain Expressive Language chain syntax\n",
|
||||
"# learn more about the LCEL on\n",
|
||||
"# /docs/concepts/#langchain-expression-language-lcel\n",
|
||||
"chain = prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"# for brevity, response is printed in terminal\n",
|
||||
"# You can use LangServe to deploy your application for\n",
|
||||
"# production\n",
|
||||
"print(chain.invoke({\"topic\": \"Space travel\"}))"
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"LCEL chains, out of the box, provide extra functionalities, such as streaming of responses, and async support"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Why\n",
|
||||
" did\n",
|
||||
" the\n",
|
||||
" astronaut\n",
|
||||
" break\n",
|
||||
" up\n",
|
||||
" with\n",
|
||||
" his\n",
|
||||
" girlfriend\n",
|
||||
" before\n",
|
||||
" going\n",
|
||||
" to\n",
|
||||
" Mars\n",
|
||||
"?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Because\n",
|
||||
" he\n",
|
||||
" needed\n",
|
||||
" space\n",
|
||||
"!\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"topic = {\"topic\": \"Space travel\"}\n",
|
||||
"\n",
|
||||
"for chunks in chain.stream(topic):\n",
|
||||
" print(chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For streaming async support, here's an example - all possible via the single chain created above."
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"topic = {\"topic\": \"Space travel\"}\n",
|
||||
"\n",
|
||||
"async for chunks in chain.astream(topic):\n",
|
||||
" print(chunks)"
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Take a look at the [LangChain Expressive Language (LCEL) Interface](/docs/concepts#interface) for the other available interfaces for use when a chain is created.\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"## Building from source\n",
|
||||
"\n",
|
||||
"For up to date instructions on building from source, check the Ollama documentation on [Building from Source](https://github.com/ollama/ollama?tab=readme-ov-file#building)"
|
||||
"The LangChain Ollama integration lives in the `langchain-ollama` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Extraction\n",
|
||||
" \n",
|
||||
"Use the latest version of Ollama and supply the [`format`](https://github.com/jmorganca/ollama/blob/main/docs/api.md#json-mode) flag. The `format` flag will force the model to produce the response in JSON.\n",
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"> **Note:** You can also try out the experimental [OllamaFunctions](/docs/integrations/chat/ollama_functions) wrapper for convenience."
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(\n",
|
||||
" model=\"llama3\",\n",
|
||||
" temperature=0,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"llama3\", format=\"json\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='{ \"morning\": \"blue\", \"noon\": \"clear blue\", \"afternoon\": \"hazy yellow\", \"evening\": \"orange-red\" }\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n ' id='run-e893700f-e2d0-4df8-ad86-17525dcee318-0'\n"
|
||||
]
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Je adore le programmation.\\n\\n(Note: \"programmation\" is not commonly used in French, but I translated it as \"le programmation\" to maintain the same grammatical structure and meaning as the original English sentence.)', response_metadata={'model': 'llama3', 'created_at': '2024-07-22T17:43:54.731273Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 11094839375, 'load_duration': 10121854667, 'prompt_eval_count': 36, 'prompt_eval_duration': 146569000, 'eval_count': 46, 'eval_duration': 816593000}, id='run-befccbdc-e1f9-42a9-85cf-e69b926d6b8b-0', usage_metadata={'input_tokens': 36, 'output_tokens': 46, 'total_tokens': 82})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.messages import AIMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"What color is the sky at different times of the day? Respond using JSON\"\n",
|
||||
" )\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"chat_model_response = llm.invoke(messages)\n",
|
||||
"print(chat_model_response)"
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Je adore le programmation.\n",
|
||||
"\n",
|
||||
"Name: John\n",
|
||||
"Age: 35\n",
|
||||
"Likes: Pizza\n"
|
||||
"(Note: \"programmation\" is not commonly used in French, but I translated it as \"le programmation\" to maintain the same grammatical structure and meaning as the original English sentence.)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"json_schema = {\n",
|
||||
" \"title\": \"Person\",\n",
|
||||
" \"description\": \"Identifying information about a person.\",\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"name\": {\"title\": \"Name\", \"description\": \"The person's name\", \"type\": \"string\"},\n",
|
||||
" \"age\": {\"title\": \"Age\", \"description\": \"The person's age\", \"type\": \"integer\"},\n",
|
||||
" \"fav_food\": {\n",
|
||||
" \"title\": \"Fav Food\",\n",
|
||||
" \"description\": \"The person's favorite food\",\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"name\", \"age\"],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"llama2\")\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Please tell me about a person using the following JSON schema:\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"{dumps}\"),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Now, considering the schema, tell me about a person named John who is 35 years old and loves pizza.\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(messages)\n",
|
||||
"dumps = json.dumps(json_schema, indent=2)\n",
|
||||
"\n",
|
||||
"chain = prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"print(chain.invoke({\"dumps\": dumps}))"
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren!\\n\\n(Note: \"Ich liebe\" means \"I love\", \"Programmieren\" is the verb for \"programming\")', response_metadata={'model': 'llama3', 'created_at': '2024-07-04T04:22:33.864132Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 1310800083, 'load_duration': 1782000, 'prompt_eval_count': 16, 'prompt_eval_duration': 250199000, 'eval_count': 29, 'eval_duration': 1057192000}, id='run-cbadbe59-2de2-4ec0-a18a-b3220226c3d2-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f51345d-0a9d-43f1-8fca-d0662cb8e21b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"We can use [tool calling](https://blog.langchain.dev/improving-core-tool-interfaces-and-docs-in-langchain/) with an LLM [that has been fine-tuned for tool use](https://ollama.com/library/llama3-groq-tool-use): \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"ollama pull llama3-groq-tool-use\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"We can just pass normal Python functions directly as tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "5250bceb-1029-41ff-b447-983518704d88",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'validate_user',\n",
|
||||
" 'args': {'addresses': ['123 Fake St, Boston MA',\n",
|
||||
" '234 Pretend Boulevard, Houston TX'],\n",
|
||||
" 'user_id': 123},\n",
|
||||
" 'id': 'fe2148d3-95fb-48e9-845a-4bfecc1f1f96',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"from typing_extensions import TypedDict\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def validate_user(user_id: int, addresses: List) -> bool:\n",
|
||||
" \"\"\"Validate user using historical addresses.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id: (int) the user ID.\n",
|
||||
" addresses: Previous addresses.\n",
|
||||
" \"\"\"\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(\n",
|
||||
" model=\"llama3-groq-tool-use\",\n",
|
||||
" temperature=0,\n",
|
||||
").bind_tools([validate_user])\n",
|
||||
"\n",
|
||||
"result = llm.invoke(\n",
|
||||
" \"Could you validate user 123? They previously lived at \"\n",
|
||||
" \"123 Fake St in Boston MA and 234 Pretend Boulevard in \"\n",
|
||||
" \"Houston TX.\"\n",
|
||||
")\n",
|
||||
"result.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bb034ff-218f-4865-afea-3f5e57d3bdee",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We look at the LangSmith trace to see that the tool call was performed: \n",
|
||||
"\n",
|
||||
"https://smith.langchain.com/public/4169348a-d6be-45df-a7cf-032f6baa4697/r\n",
|
||||
"\n",
|
||||
"In particular, the trace shows how the tool schema was populated."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4c5e0197",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal\n",
|
||||
"\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava).\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.com/library/bakllava) and [llava](https://ollama.com/library/llava).\n",
|
||||
"\n",
|
||||
"Browse the full set of versions for models with `tags`, such as [Llava](https://ollama.ai/library/llava/tags).\n",
|
||||
" ollama pull bakllava\n",
|
||||
"\n",
|
||||
"Download the desired LLM via `ollama pull bakllava`\n",
|
||||
"\n",
|
||||
"Be sure to update Ollama so that you have the most recent version to support multi-modal.\n",
|
||||
"\n",
|
||||
"Check out the typical example of how to use ChatOllama multi-modal support below:"
|
||||
"Be sure to update Ollama so that you have the most recent version to support multi-modal."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install --upgrade --quiet pillow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 11,
|
||||
"id": "36c9b1c2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -399,7 +391,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 12,
|
||||
"id": "32b3ba7b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -411,8 +404,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"bakllava\", temperature=0)\n",
|
||||
"\n",
|
||||
@@ -449,20 +442,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Concurrency Features\n",
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n",
|
||||
"\n",
|
||||
"Start the Ollama server with:\n",
|
||||
"\n",
|
||||
"* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n",
|
||||
"* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n",
|
||||
"\n",
|
||||
"Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n",
|
||||
"\n",
|
||||
"Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)."
|
||||
"For detailed documentation of all ChatOllama features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_ollama.chat_models.ChatOllama.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -486,5 +471,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Ollama Functions\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
@@ -15,16 +16,16 @@
|
||||
"source": [
|
||||
"# OllamaFunctions\n",
|
||||
"\n",
|
||||
":::warning\n",
|
||||
"\n",
|
||||
"This was an experimental wrapper that attempts to bolt-on tool calling support to models that do not natively support it. The [primary Ollama integration](/docs/integrations/chat/ollama/) now supports tool calling, and should be used instead.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"This notebook shows how to use an experimental wrapper around Ollama that gives it [tool calling capabilities](https://python.langchain.com/v0.2/docs/concepts/#functiontool-calling).\n",
|
||||
"\n",
|
||||
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use llama3 and phi3 models.\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n",
|
||||
"\n",
|
||||
":::warning\n",
|
||||
"\n",
|
||||
"This is an experimental wrapper that attempts to bolt-on tool calling support to models that do not natively support it. Use with caution.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
|
||||
@@ -82,9 +82,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# By default it will use the model which was deployed through the platform\n",
|
||||
"# in my case it will is \"claude-3-haiku\"\n",
|
||||
"# in my case it will is \"gpt-4o\"\n",
|
||||
"\n",
|
||||
"chat = ChatPremAI(project_id=8)"
|
||||
"chat = ChatPremAI(project_id=1234, model_name=\"gpt-4o\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -107,7 +107,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I am an artificial intelligence created by Anthropic. I'm here to help with a wide variety of tasks, from research and analysis to creative projects and open-ended conversation. I have general knowledge and capabilities, but I'm not a real person - I'm an AI assistant. Please let me know if you have any other questions!\n"
|
||||
"I am an AI language model created by OpenAI, designed to assist with answering questions and providing information based on the context provided. How can I help you today?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -133,7 +133,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I am an artificial intelligence created by Anthropic. My purpose is to assist and converse with humans in a friendly and helpful way. I have a broad knowledge base that I can use to provide information, answer questions, and engage in discussions on a wide range of topics. Please let me know if you have any other questions - I'm here to help!\")"
|
||||
"AIMessage(content=\"I'm your friendly assistant! How can I help you today?\", response_metadata={'document_chunks': [{'repository_id': 1985, 'document_id': 1306, 'chunk_id': 173899, 'document_name': '[D] Difference between sparse and dense informati…', 'similarity_score': 0.3209080100059509, 'content': \"with the difference or anywhere\\nwhere I can read about it?\\n\\n\\n 17 9\\n\\n\\n u/ScotiabankCanada • Promoted\\n\\n\\n Accelerate your study permit process\\n with Scotiabank's Student GIC\\n Program. We're here to help you tur…\\n\\n\\n startright.scotiabank.com Learn More\\n\\n\\n Add a Comment\\n\\n\\nSort by: Best\\n\\n\\n DinosParkour • 1y ago\\n\\n\\n Dense Retrieval (DR) m\"}]}, id='run-510bbd0e-3f8f-4095-9b1f-c2d29fd89719-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -160,10 +160,18 @@
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/anindya/prem/langchain/libs/community/langchain_community/chat_models/premai.py:355: UserWarning: WARNING: Parameter top_p is not supported in kwargs.\n",
|
||||
" warnings.warn(f\"WARNING: Parameter {key} is not supported in kwargs.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I am an artificial intelligence created by Anthropic')"
|
||||
"AIMessage(content=\"Hello! I'm your friendly assistant. How can I\", response_metadata={'document_chunks': [{'repository_id': 1985, 'document_id': 1306, 'chunk_id': 173899, 'document_name': '[D] Difference between sparse and dense informati…', 'similarity_score': 0.3209080100059509, 'content': \"with the difference or anywhere\\nwhere I can read about it?\\n\\n\\n 17 9\\n\\n\\n u/ScotiabankCanada • Promoted\\n\\n\\n Accelerate your study permit process\\n with Scotiabank's Student GIC\\n Program. We're here to help you tur…\\n\\n\\n startright.scotiabank.com Learn More\\n\\n\\n Add a Comment\\n\\n\\nSort by: Best\\n\\n\\n DinosParkour • 1y ago\\n\\n\\n Dense Retrieval (DR) m\"}]}, id='run-c4b06b98-4161-4cca-8495-fd2fc98fa8f8-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -195,13 +203,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"what is the diameter of individual Galaxy\"\n",
|
||||
"query = \"Which models are used for dense retrieval\"\n",
|
||||
"repository_ids = [\n",
|
||||
" 1991,\n",
|
||||
" 1985,\n",
|
||||
"]\n",
|
||||
"repositories = dict(ids=repository_ids, similarity_threshold=0.3, limit=3)"
|
||||
]
|
||||
@@ -219,9 +227,34 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Dense retrieval models typically include:\n",
|
||||
"\n",
|
||||
"1. **BERT-based Models**: Such as DPR (Dense Passage Retrieval) which uses BERT for encoding queries and passages.\n",
|
||||
"2. **ColBERT**: A model that combines BERT with late interaction mechanisms.\n",
|
||||
"3. **ANCE (Approximate Nearest Neighbor Negative Contrastive Estimation)**: Uses BERT and focuses on efficient retrieval.\n",
|
||||
"4. **TCT-ColBERT**: A variant of ColBERT that uses a two-tower\n",
|
||||
"{\n",
|
||||
" \"document_chunks\": [\n",
|
||||
" {\n",
|
||||
" \"repository_id\": 1985,\n",
|
||||
" \"document_id\": 1306,\n",
|
||||
" \"chunk_id\": 173899,\n",
|
||||
" \"document_name\": \"[D] Difference between sparse and dense informati\\u2026\",\n",
|
||||
" \"similarity_score\": 0.3209080100059509,\n",
|
||||
" \"content\": \"with the difference or anywhere\\nwhere I can read about it?\\n\\n\\n 17 9\\n\\n\\n u/ScotiabankCanada \\u2022 Promoted\\n\\n\\n Accelerate your study permit process\\n with Scotiabank's Student GIC\\n Program. We're here to help you tur\\u2026\\n\\n\\n startright.scotiabank.com Learn More\\n\\n\\n Add a Comment\\n\\n\\nSort by: Best\\n\\n\\n DinosParkour \\u2022 1y ago\\n\\n\\n Dense Retrieval (DR) m\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
@@ -262,7 +295,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -288,7 +321,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template_id = \"78069ce8-xxxxx-xxxxx-xxxx-xxx\"\n",
|
||||
"response = chat.invoke([human_message], template_id=template_id)\n",
|
||||
"response = chat.invoke([human_messages], template_id=template_id)\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
@@ -310,14 +343,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello! As an AI language model, I don't have feelings or a physical state, but I'm functioning properly and ready to assist you with any questions or tasks you might have. How can I help you today?"
|
||||
"It looks like your message got cut off. If you need information about Dense Retrieval (DR) or any other topic, please provide more details or clarify your question."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -338,14 +371,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello! As an AI language model, I don't have feelings or a physical form, but I'm functioning properly and ready to assist you. How can I help you today?"
|
||||
"Woof! 🐾 How can I help you today? Want to play fetch or maybe go for a walk 🐶🦴"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -365,6 +398,275 @@
|
||||
" sys.stdout.write(chunk.content)\n",
|
||||
" sys.stdout.flush()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Tool/Function Calling\n",
|
||||
"\n",
|
||||
"LangChain PremAI supports tool/function calling. Tool/function calling allows a model to respond to a given prompt by generating output that matches a user-defined schema. \n",
|
||||
"\n",
|
||||
"- You can learn all about tool calling in details [in our documentation here](https://docs.premai.io/get-started/function-calling).\n",
|
||||
"- You can learn more about langchain tool calling in [this part of the docs](https://python.langchain.com/v0.1/docs/modules/model_io/chat/function_calling).\n",
|
||||
"\n",
|
||||
"**NOTE:**\n",
|
||||
"The current version of LangChain ChatPremAI do not support function/tool calling with streaming support. Streaming support along with function calling will come soon. \n",
|
||||
"\n",
|
||||
"#### Passing tools to model\n",
|
||||
"\n",
|
||||
"In order to pass tools and let the LLM choose the tool it needs to call, we need to pass a tool schema. A tool schema is the function definition along with proper docstring on what does the function do, what each argument of the function is etc. Below are some simple arithmetic functions with their schema. \n",
|
||||
"\n",
|
||||
"**NOTE:** When defining function/tool schema, do not forget to add information around the function arguments, otherwise it would throw error."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the schema for function arguments\n",
|
||||
"class OperationInput(BaseModel):\n",
|
||||
" a: int = Field(description=\"First number\")\n",
|
||||
" b: int = Field(description=\"Second number\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Now define the function where schema for argument will be OperationInput\n",
|
||||
"@tool(\"add\", args_schema=OperationInput, return_direct=True)\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" a: first int\n",
|
||||
" b: second int\n",
|
||||
" \"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(\"multiply\", args_schema=OperationInput, return_direct=True)\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" a: first int\n",
|
||||
" b: second int\n",
|
||||
" \"\"\"\n",
|
||||
" return a * b"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Binding tool schemas with our LLM\n",
|
||||
"\n",
|
||||
"We will now use the `bind_tools` method to convert our above functions to a \"tool\" and binding it with the model. This means we are going to pass these tool informations everytime we invoke the model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [add, multiply]\n",
|
||||
"llm_with_tools = chat.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After this, we get the response from the model which is now binded with the tools. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What is 3 * 12? Also, what is 11 + 49?\"\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(query)]\n",
|
||||
"ai_msg = llm_with_tools.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, when our chat model is binded with tools, then based on the given prompt, it calls the correct set of the tools and sequentially. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'multiply',\n",
|
||||
" 'args': {'a': 3, 'b': 12},\n",
|
||||
" 'id': 'call_A9FL20u12lz6TpOLaiS6rFa8'},\n",
|
||||
" {'name': 'add',\n",
|
||||
" 'args': {'a': 11, 'b': 49},\n",
|
||||
" 'id': 'call_MPKYGLHbf39csJIyb5BZ9xIk'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We append this message shown above to the LLM which acts as a context and makes the LLM aware that what all functions it has called. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages.append(ai_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Since tool calling happens into two phases, where:\n",
|
||||
"\n",
|
||||
"1. in our first call, we gathered all the tools that the LLM decided to tool, so that it can get the result as an added context to give more accurate and hallucination free result. \n",
|
||||
"\n",
|
||||
"2. in our second call, we will parse those set of tools decided by LLM and run them (in our case it will be the functions we defined, with the LLM's extracted arguments) and pass this result to the LLM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import ToolMessage\n",
|
||||
"\n",
|
||||
"for tool_call in ai_msg.tool_calls:\n",
|
||||
" selected_tool = {\"add\": add, \"multiply\": multiply}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
|
||||
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, we call the LLM (binded with the tools) with the function response added in it's context. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The final answers are:\n",
|
||||
"\n",
|
||||
"- 3 * 12 = 36\n",
|
||||
"- 11 + 49 = 60\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = llm_with_tools.invoke(messages)\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Defining tool schemas: Pydantic class\n",
|
||||
"\n",
|
||||
"Above we have shown how to define schema using `tool` decorator, however we can equivalently define the schema using Pydantic. Pydantic is useful when your tool inputs are more complex:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class add(BaseModel):\n",
|
||||
" \"\"\"Add two integers together.\"\"\"\n",
|
||||
"\n",
|
||||
" a: int = Field(..., description=\"First integer\")\n",
|
||||
" b: int = Field(..., description=\"Second integer\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class multiply(BaseModel):\n",
|
||||
" \"\"\"Multiply two integers together.\"\"\"\n",
|
||||
"\n",
|
||||
" a: int = Field(..., description=\"First integer\")\n",
|
||||
" b: int = Field(..., description=\"Second integer\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, we can bind them to chat models and directly get the result:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[multiply(a=3, b=12), add(a=11, b=49)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = llm_with_tools | PydanticToolsParser(tools=[multiply, add])\n",
|
||||
"chain.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, as done above, we parse this and run this functions and call the LLM once again to get the result."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -383,7 +685,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
"version": "3.9.19"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
484
docs/docs/integrations/document_loaders/dedoc.ipynb
Normal file
484
docs/docs/integrations/document_loaders/dedoc.ipynb
Normal file
@@ -0,0 +1,484 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6b74f73d-1763-42d0-9c24-8f65f445bb72",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Dedoc\n",
|
||||
"\n",
|
||||
"This sample demonstrates the use of `Dedoc` in combination with `LangChain` as a `DocumentLoader`.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"[Dedoc](https://dedoc.readthedocs.io) is an [open-source](https://github.com/ispras/dedoc)\n",
|
||||
"library/service that extracts texts, tables, attached files and document structure\n",
|
||||
"(e.g., titles, list items, etc.) from files of various formats.\n",
|
||||
"\n",
|
||||
"`Dedoc` supports `DOCX`, `XLSX`, `PPTX`, `EML`, `HTML`, `PDF`, images and more.\n",
|
||||
"Full list of supported formats can be found [here](https://dedoc.readthedocs.io/en/latest/#id1).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support |\n",
|
||||
"|:-----------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------|:-----:|:------------:|:----------:|\n",
|
||||
"| [DedocFileLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocFileLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ |\n",
|
||||
"| [DedocPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.DedocPDFLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ | \n",
|
||||
"| [DedocAPIFileLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocAPIFileLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ | \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Loader features\n",
|
||||
"\n",
|
||||
"Methods for lazy loading and async loading are available, but in fact, document loading is executed synchronously.\n",
|
||||
"\n",
|
||||
"| Source | Document Lazy Loading | Async Support |\n",
|
||||
"|:------------------:|:---------------------:|:-------------:| \n",
|
||||
"| DedocFileLoader | ❌ | ❌ |\n",
|
||||
"| DedocPDFLoader | ❌ | ❌ | \n",
|
||||
"| DedocAPIFileLoader | ❌ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"* To access `DedocFileLoader` and `DedocPDFLoader` document loaders, you'll need to install the `dedoc` integration package.\n",
|
||||
"* To access `DedocAPIFileLoader`, you'll need to run the `Dedoc` service, e.g. `Docker` container (please see [the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker) \n",
|
||||
"for more details):\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"docker pull dedocproject/dedoc\n",
|
||||
"docker run -p 1231:1231\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"`Dedoc` installation instruction is given [here](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "511c109d-a5c3-42ba-914e-5d1b385bc40f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Install package\n",
|
||||
"%pip install --quiet \"dedoc[torch]\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6820c0e9-d56d-4899-b8c8-374760360e2b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "c1f98cae-71ec-4d60-87fb-96c1a76851d8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocFileLoader\n",
|
||||
"\n",
|
||||
"loader = DedocFileLoader(\"./example_data/state_of_the_union.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5d7bc2b3-73a0-4cd6-8014-cc7184aa9d4a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "b9097c14-6168-4726-819e-24abb9a63b13",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and t'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0].page_content[:100]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ed8bd46-0047-4ccc-b2d6-beb7761f7312",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "6ae12d7e-8105-4bbe-9031-0e968475f6bf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and t\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.lazy_load()\n",
|
||||
"\n",
|
||||
"for doc in docs:\n",
|
||||
" print(doc.page_content[:100])\n",
|
||||
" break"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8772ae40-6239-4751-bb2d-b4a9415c1ad1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed information on configuring and calling `Dedoc` loaders, please see the API references: \n",
|
||||
"\n",
|
||||
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocFileLoader.html\n",
|
||||
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.DedocPDFLoader.html\n",
|
||||
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocAPIFileLoader.html"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c4d5e702-0e21-4cad-a4c3-b9b3bff77203",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading any file\n",
|
||||
"\n",
|
||||
"For automatic handling of any file in a [supported format](https://dedoc.readthedocs.io/en/latest/#id1),\n",
|
||||
"`DedocFileLoader` can be useful.\n",
|
||||
"The file loader automatically detects the file type with a correct extension.\n",
|
||||
"\n",
|
||||
"File parsing process can be configured through `dedoc_kwargs` during the `DedocFileLoader` class initialization.\n",
|
||||
"Here the basic examples of some options usage are given, \n",
|
||||
"please see the documentation of `DedocFileLoader` and \n",
|
||||
"[dedoc documentation](https://dedoc.readthedocs.io/en/latest/parameters/parameters.html) \n",
|
||||
"to get more details about configuration parameters."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "de97d0ed-d6b1-44e0-b392-1f3d89c762f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Basic example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "50ffeeee-db12-4801-b208-7e32ea3d72ad",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\n\\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\n\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\n\\n\\nWith a duty to one another to the American people to '"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocFileLoader\n",
|
||||
"\n",
|
||||
"loader = DedocFileLoader(\"./example_data/state_of_the_union.txt\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "457e5d4c-a4ee-4f31-ae74-3f75a1bbd0af",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Modes of split\n",
|
||||
"\n",
|
||||
"`DedocFileLoader` supports different types of document splitting into parts (each part is returned separately).\n",
|
||||
"For this purpose, `split` parameter is used with the following options:\n",
|
||||
"* `document` (default value): document text is returned as a single langchain `Document` object (don't split);\n",
|
||||
"* `page`: split document text into pages (works for `PDF`, `DJVU`, `PPTX`, `PPT`, `ODP`);\n",
|
||||
"* `node`: split document text into `Dedoc` tree nodes (title nodes, list item nodes, raw text nodes);\n",
|
||||
"* `line`: split document text into textual lines."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "eec54d31-ae7a-4a3c-aa10-4ae276b1e4c4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"2"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = DedocFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\",\n",
|
||||
" split=\"page\",\n",
|
||||
" pages=\":2\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "61e11769-4780-4f77-b10e-27db6936f226",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Handling tables\n",
|
||||
"\n",
|
||||
"`DedocFileLoader` supports tables handling when `with_tables` parameter is \n",
|
||||
"set to `True` during loader initialization (`with_tables=True` by default). \n",
|
||||
"\n",
|
||||
"Tables are not split - each table corresponds to one langchain `Document` object.\n",
|
||||
"For tables, `Document` object has additional `metadata` fields `type=\"table\"` \n",
|
||||
"and `text_as_html` with table `HTML` representation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "bbeb2f8a-ac5e-4b59-8026-7ea3fc14c928",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"('table',\n",
|
||||
" '<table border=\"1\" style=\"border-collapse: collapse; width: 100%;\">\\n<tbody>\\n<tr>\\n<td colspan=\"1\" rowspan=\"1\">Team</td>\\n<td colspan=\"1\" rowspan=\"1\"> "Payroll (millions)"</td>\\n<td colspan=\"1\" r')"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = DedocFileLoader(\"./example_data/mlb_teams_2012.csv\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[1].metadata[\"type\"], docs[1].metadata[\"text_as_html\"][:200]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4a2b872-2aba-4e4c-8b2f-83a5a81ee1da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Handling attached files\n",
|
||||
"\n",
|
||||
"`DedocFileLoader` supports attached files handling when `with_attachments` is set \n",
|
||||
"to `True` during loader initialization (`with_attachments=False` by default). \n",
|
||||
"\n",
|
||||
"Attachments are split according to the `split` parameter.\n",
|
||||
"For attachments, langchain `Document` object has an additional metadata \n",
|
||||
"field `type=\"attachment\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "bb9d6c1c-e24c-4979-88a0-38d54abd6332",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"('attachment',\n",
|
||||
" '\\nContent-Type\\nmultipart/mixed; boundary=\"0000000000005d654405f082adb7\"\\nDate\\nFri, 23 Dec 2022 12:08:48 -0600\\nFrom\\nMallori Harrell <mallori@unstructured.io>\\nMIME-Version\\n1.0\\nMessage-ID\\n<CAPgNNXSzLVJ-d1OCX_TjFgJU7ugtQrjFybPtAMmmYZzphxNFYg@mail.gmail.com>\\nSubject\\nFake email with attachment\\nTo\\nMallori Harrell <mallori@unstructured.io>')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = DedocFileLoader(\n",
|
||||
" \"./example_data/fake-email-attachment.eml\",\n",
|
||||
" with_attachments=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[1].metadata[\"type\"], docs[1].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d435c3f6-703a-4064-8307-ace140de967a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading PDF file\n",
|
||||
"\n",
|
||||
"If you want to handle only `PDF` documents, you can use `DedocPDFLoader` with only `PDF` support.\n",
|
||||
"The loader supports the same parameters for document split, tables and attachments extraction.\n",
|
||||
"\n",
|
||||
"`Dedoc` can extract `PDF` with or without a textual layer, \n",
|
||||
"as well as automatically detect its presence and correctness.\n",
|
||||
"Several `PDF` handlers are available, you can use `pdf_with_text_layer` \n",
|
||||
"parameter to choose one of them.\n",
|
||||
"Please see [parameters description](https://dedoc.readthedocs.io/en/latest/parameters/pdf_handling.html) \n",
|
||||
"to get more details.\n",
|
||||
"\n",
|
||||
"For `PDF` without a textual layer, `Tesseract OCR` and its language packages should be installed.\n",
|
||||
"In this case, [the instruction](https://dedoc.readthedocs.io/en/latest/tutorials/add_new_language.html) can be useful."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0103a7f3-6b5e-4444-8f4d-83dd3724a9af",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n2\\n\\nZ. Shen et al.\\n\\n37], layout detection [38, 22], table detection [26], and scene text detection [4].\\n\\nA generalized learning-based framework dramatically reduces the need for the\\n\\nmanual specification of complicated rules, which is the status quo with traditional\\n\\nmethods. DL has the potential to transform DIA pipelines and benefit a broad\\n\\nspectrum of large-scale document digitization projects.\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocPDFLoader\n",
|
||||
"\n",
|
||||
"loader = DedocPDFLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\", pdf_with_text_layer=\"true\", pages=\"2:2\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "13061995-1805-40c2-a77a-a6cd80999e20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dedoc API\n",
|
||||
"\n",
|
||||
"If you want to get up and running with less set up, you can use `Dedoc` as a service.\n",
|
||||
"**`DedocAPIFileLoader` can be used without installation of `dedoc` library.**\n",
|
||||
"The loader supports the same parameters as `DedocFileLoader` and\n",
|
||||
"also automatically detects input file types.\n",
|
||||
"\n",
|
||||
"To use `DedocAPIFileLoader`, you should run the `Dedoc` service, e.g. `Docker` container (please see [the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker) \n",
|
||||
"for more details):\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"docker pull dedocproject/dedoc\n",
|
||||
"docker run -p 1231:1231\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Please do not use our demo URL `https://dedoc-readme.hf.space` in your code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "211fc0b5-6080-4974-a6c1-f982bafd87d6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\n\\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\n\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\n\\n\\nWith a duty to one another to the American people to '"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocAPIFileLoader\n",
|
||||
"\n",
|
||||
"loader = DedocAPIFileLoader(\n",
|
||||
" \"./example_data/state_of_the_union.txt\",\n",
|
||||
" url=\"https://dedoc-readme.hf.space\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "faaff475-5209-436f-bcde-97d58daed05c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.19"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -162,7 +162,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib"
|
||||
"!poetry run pip install --upgrade langchain-openai tiktoken langchain-chroma hnswlib"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -211,7 +211,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embedding = OpenAIEmbeddings()\n",
|
||||
@@ -365,7 +365,7 @@
|
||||
"source": [
|
||||
"from langchain.chains.query_constructor.schema import AttributeInfo\n",
|
||||
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"\n",
|
||||
"EXCLUDE_KEYS = [\"id\", \"xpath\", \"structure\"]\n",
|
||||
"metadata_field_info = [\n",
|
||||
@@ -540,7 +540,7 @@
|
||||
"source": [
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -37,7 +37,7 @@
|
||||
"scrapfly_loader = ScrapflyLoader(\n",
|
||||
" [\"https://web-scraping.dev/products\"],\n",
|
||||
" api_key=\"Your ScrapFly API key\", # Get your API key from https://www.scrapfly.io/\n",
|
||||
" ignore_scrape_failures=True, # Ignore unprocessable web pages and log their exceptions\n",
|
||||
" continue_on_failure=True, # Ignore unprocessable web pages and log their exceptions\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Load documents from URLs as markdown\n",
|
||||
@@ -72,7 +72,7 @@
|
||||
"scrapfly_loader = ScrapflyLoader(\n",
|
||||
" [\"https://web-scraping.dev/products\"],\n",
|
||||
" api_key=\"Your ScrapFly API key\", # Get your API key from https://www.scrapfly.io/\n",
|
||||
" ignore_scrape_failures=True, # Ignore unprocessable web pages and log their exceptions\n",
|
||||
" continue_on_failure=True, # Ignore unprocessable web pages and log their exceptions\n",
|
||||
" scrape_config=scrapfly_scrape_config, # Pass the scrape_config object\n",
|
||||
" scrape_format=\"markdown\", # The scrape result format, either `markdown`(default) or `text`\n",
|
||||
")\n",
|
||||
|
||||
188
docs/docs/integrations/document_loaders/scrapingant.ipynb
Normal file
188
docs/docs/integrations/document_loaders/scrapingant.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -5,7 +5,7 @@
|
||||
"id": "20deed05",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Unstructured File\n",
|
||||
"# Unstructured\n",
|
||||
"\n",
|
||||
"This notebook covers how to use `Unstructured` package to load files of many types. `Unstructured` currently supports loading of text files, powerpoints, html, pdfs, images, and more.\n",
|
||||
"\n",
|
||||
@@ -14,79 +14,69 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "2886982e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# # Install package\n",
|
||||
"%pip install --upgrade --quiet \"unstructured[all-docs]\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "54d62efd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# # Install other dependencies\n",
|
||||
"# # https://github.com/Unstructured-IO/unstructured/blob/main/docs/source/installing.rst\n",
|
||||
"# !brew install libmagic\n",
|
||||
"# !brew install poppler\n",
|
||||
"# !brew install tesseract\n",
|
||||
"# # If parsing xml / html documents:\n",
|
||||
"# !brew install libxml2\n",
|
||||
"# !brew install libxslt"
|
||||
"# Install package, compatible with API partitioning\n",
|
||||
"%pip install --upgrade --quiet \"langchain-unstructured\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "af6a64f5",
|
||||
"cell_type": "markdown",
|
||||
"id": "e75e2a6d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import nltk\n",
|
||||
"# nltk.download('punkt')"
|
||||
"### Local Partitioning (Optional)\n",
|
||||
"\n",
|
||||
"By default, `langchain-unstructured` installs a smaller footprint that requires\n",
|
||||
"offloading of the partitioning logic to the Unstructured API.\n",
|
||||
"\n",
|
||||
"If you would like to run the partitioning logic locally, you will need to install\n",
|
||||
"a combination of system dependencies, as outlined in the \n",
|
||||
"[Unstructured documentation here](https://docs.unstructured.io/open-source/installation/full-installation).\n",
|
||||
"\n",
|
||||
"For example, on Macs you can install the required dependencies with:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"# base dependencies\n",
|
||||
"brew install libmagic poppler tesseract\n",
|
||||
"\n",
|
||||
"# If parsing xml / html documents:\n",
|
||||
"brew install libxml2 libxslt\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You can install the required `pip` dependencies with:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install \"langchain-unstructured[local]\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a9c1c775",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Quickstart\n",
|
||||
"\n",
|
||||
"To simply load a file as a document, you can use the LangChain `DocumentLoader.load` \n",
|
||||
"interface:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"id": "79d3e549",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.\\n\\nLast year COVID-19 kept us apart. This year we are finally together again.\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.\\n\\nWith a duty to one another to the American people to the Constit'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(\"./example_data/state_of_the_union.txt\")\n",
|
||||
"loader = UnstructuredLoader(\"./example_data/state_of_the_union.txt\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -99,113 +89,31 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"id": "092d9a0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'1/22/23, 6:30 PM - User 1: Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!\\n\\n1/22/23, 8:24 PM - User 2: Goodmorning! $50 is too low.\\n\\n1/23/23, 2:59 AM - User 1: How much do you want?\\n\\n1/23/23, 3:00 AM - User 2: Online is at least $100\\n\\n1/23/23, 3:01 AM - User 2: Here is $129\\n\\n1/23/23, 3:01 AM - User 2: <Media omitted>\\n\\n1/23/23, 3:01 AM - User 1: Im not int'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"whatsapp_chat.txt : 1/22/23, 6:30 PM - User 1: Hi! Im interested in your bag. Im offering $50. Let me know if you are in\n",
|
||||
"state_of_the_union.txt : May God bless you all. May God protect our troops.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"files = [\"./example_data/whatsapp_chat.txt\", \"./example_data/layout-parser-paper.pdf\"]\n",
|
||||
"file_paths = [\n",
|
||||
" \"./example_data/whatsapp_chat.txt\",\n",
|
||||
" \"./example_data/state_of_the_union.txt\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(files)\n",
|
||||
"loader = UnstructuredLoader(file_paths)\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7874d01d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retain Elements\n",
|
||||
"\n",
|
||||
"Under the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `mode=\"elements\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ff5b616d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
|
||||
" Document(page_content='Last year COVID-19 kept us apart. This year we are finally together again.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
|
||||
" Document(page_content='Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
|
||||
" Document(page_content='With a duty to one another to the American people to the Constitution.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='And with an unwavering resolve that freedom will always triumph over tyranny.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/state_of_the_union.txt\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[:5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "672733fd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define a Partitioning Strategy\n",
|
||||
"\n",
|
||||
"Unstructured document loader allow users to pass in a `strategy` parameter that lets `unstructured` know how to partition the document. Currently supported strategies are `\"hi_res\"` (the default) and `\"fast\"`. Hi res partitioning strategies are more accurate, but take longer to process. Fast strategies partition the document more quickly, but trade-off accuracy. Not all document types have separate hi res and fast partitioning strategies. For those document types, the `strategy` kwarg is ignored. In some cases, the high res strategy will fallback to fast if there is a dependency missing (i.e. a model for document partitioning). You can see how to apply a strategy to an `UnstructuredFileLoader` below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "767238a4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
|
||||
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\", strategy=\"fast\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[5:10]"
|
||||
"print(docs[0].metadata.get(\"filename\"), \": \", docs[0].page_content[:100])\n",
|
||||
"print(docs[-1].metadata.get(\"filename\"), \": \", docs[-1].page_content[:100])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -215,37 +123,52 @@
|
||||
"source": [
|
||||
"## PDF Example\n",
|
||||
"\n",
|
||||
"Processing PDF documents works exactly the same way. Unstructured detects the file type and extracts the same types of elements. Modes of operation are \n",
|
||||
"- `single` all the text from all elements are combined into one (default)\n",
|
||||
"- `elements` maintain individual elements\n",
|
||||
"- `paged` texts from each page are only combined"
|
||||
"Processing PDF documents works exactly the same way. Unstructured detects the file type and extracts the same types of elements."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "672733fd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Define a Partitioning Strategy\n",
|
||||
"\n",
|
||||
"Unstructured document loader allow users to pass in a `strategy` parameter that lets Unstructured\n",
|
||||
"know how to partition pdf and other OCR'd documents. Currently supported strategies are `\"auto\"`,\n",
|
||||
"`\"hi_res\"`, `\"ocr_only\"`, and `\"fast\"`. Learn more about the different strategies\n",
|
||||
"[here](https://docs.unstructured.io/open-source/core-functionality/partitioning#partition-pdf). \n",
|
||||
"\n",
|
||||
"Not all document types have separate hi res and fast partitioning strategies. For those document types, the `strategy` kwarg is\n",
|
||||
"ignored. In some cases, the high res strategy will fallback to fast if there is a dependency missing\n",
|
||||
"(i.e. a model for document partitioning). You can see how to apply a strategy to an\n",
|
||||
"`UnstructuredLoader` below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "686e5eb4",
|
||||
"execution_count": 6,
|
||||
"id": "60685353",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
|
||||
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
|
||||
"[Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': 'e9fa370aef7ee5c05744eb7bb7d9981b'}, page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title', 'element_id': 'bde0b230a1aa488e3ce837d33015181b'}, page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': '54700f902899f0c8c90488fa8d825bce'}, page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': 'b650f5867bad9bb4e30384282c79bcfe'}, page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText', 'element_id': 'cfc957c94fe63c8fd7c7f4bcb56e75a7'}, page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredLoader(\"./example_data/layout-parser-paper.pdf\", strategy=\"fast\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
@@ -257,37 +180,39 @@
|
||||
"id": "1cf27fc8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you need to post process the `unstructured` elements after extraction, you can pass in a list of `str` -> `str` functions to the `post_processors` kwarg when you instantiate the `UnstructuredFileLoader`. This applies to other Unstructured loaders as well. Below is an example."
|
||||
"## Post Processing\n",
|
||||
"\n",
|
||||
"If you need to post process the `unstructured` elements after extraction, you can pass in a list of\n",
|
||||
"`str` -> `str` functions to the `post_processors` kwarg when you instantiate the `UnstructuredLoader`. This applies to other Unstructured loaders as well. Below is an example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 7,
|
||||
"id": "112e5538",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
|
||||
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
|
||||
"[Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': 'e9fa370aef7ee5c05744eb7bb7d9981b'}, page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title', 'element_id': 'bde0b230a1aa488e3ce837d33015181b'}, page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': '54700f902899f0c8c90488fa8d825bce'}, page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': 'b650f5867bad9bb4e30384282c79bcfe'}, page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText', 'element_id': 'cfc957c94fe63c8fd7c7f4bcb56e75a7'}, page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"from unstructured.cleaners.core import clean_extra_whitespace\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
"loader = UnstructuredLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\",\n",
|
||||
" mode=\"elements\",\n",
|
||||
" post_processors=[clean_extra_whitespace],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
@@ -303,34 +228,70 @@
|
||||
"source": [
|
||||
"## Unstructured API\n",
|
||||
"\n",
|
||||
"If you want to get up and running with less set up, you can simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or `UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API. You can generate a free Unstructured API key [here](https://www.unstructured.io/api-key/). The [Unstructured documentation](https://unstructured-io.github.io/unstructured/) page will have instructions on how to generate an API key once they’re available. Check out the instructions [here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image) if you’d like to self-host the Unstructured API or run it locally."
|
||||
"If you want to get up and running with smaller packages and get the most up-to-date partitioning you can `pip install\n",
|
||||
"unstructured-client` and `pip install langchain-unstructured`. For\n",
|
||||
"more information about the `UnstructuredLoader`, refer to the\n",
|
||||
"[Unstructured provider page](https://python.langchain.com/v0.1/docs/integrations/document_loaders/unstructured_file/).\n",
|
||||
"\n",
|
||||
"The loader will process your document using the hosted Unstructured serverless API when you pass in\n",
|
||||
"your `api_key` and set `partition_via_api=True`. You can generate a free\n",
|
||||
"Unstructured API key [here](https://unstructured.io/api-key/).\n",
|
||||
"\n",
|
||||
"Check out the instructions [here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image)\n",
|
||||
"if you’d like to self-host the Unstructured API or run it locally."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": null,
|
||||
"id": "6e5fde16",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Install package\n",
|
||||
"%pip install \"langchain-unstructured\"\n",
|
||||
"%pip install \"unstructured-client\"\n",
|
||||
"\n",
|
||||
"# Set API key\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"UNSTRUCTURED_API_KEY\"] = \"FAKE_API_KEY\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "386eb63c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO: Preparing to split document for partition.\n",
|
||||
"INFO: Given file doesn't have '.pdf' extension, so splitting is not enabled.\n",
|
||||
"INFO: Partitioning without split.\n",
|
||||
"INFO: Successfully partitioned the document.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': 'example_data/fake.docx'})"
|
||||
"Document(metadata={'source': 'example_data/fake.docx', 'category_depth': 0, 'filename': 'fake.docx', 'languages': ['por', 'cat'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'Title', 'element_id': '56d531394823d81787d77a04462ed096'}, page_content='Lorem ipsum dolor sit amet.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredAPIFileLoader\n",
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"\n",
|
||||
"filenames = [\"example_data/fake.docx\", \"example_data/fake-email.eml\"]\n",
|
||||
"\n",
|
||||
"loader = UnstructuredAPIFileLoader(\n",
|
||||
" file_path=filenames[0],\n",
|
||||
" api_key=\"FAKE_API_KEY\",\n",
|
||||
"loader = UnstructuredLoader(\n",
|
||||
" file_path=\"example_data/fake.docx\",\n",
|
||||
" api_key=os.getenv(\"UNSTRUCTURED_API_KEY\"),\n",
|
||||
" partition_via_api=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
@@ -342,43 +303,197 @@
|
||||
"id": "94158999",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also batch multiple files through the Unstructured API in a single API using `UnstructuredAPIFileLoader`."
|
||||
"You can also batch multiple files through the Unstructured API in a single API using `UnstructuredLoader`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 10,
|
||||
"id": "a3d7c846",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Lorem ipsum dolor sit amet.\\n\\nThis is a test email to use for unit tests.\\n\\nImportant points:\\n\\nRoses are red\\n\\nViolets are blue', metadata={'source': ['example_data/fake.docx', 'example_data/fake-email.eml']})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO: Preparing to split document for partition.\n",
|
||||
"INFO: Given file doesn't have '.pdf' extension, so splitting is not enabled.\n",
|
||||
"INFO: Partitioning without split.\n",
|
||||
"INFO: Successfully partitioned the document.\n",
|
||||
"INFO: Preparing to split document for partition.\n",
|
||||
"INFO: Given file doesn't have '.pdf' extension, so splitting is not enabled.\n",
|
||||
"INFO: Partitioning without split.\n",
|
||||
"INFO: Successfully partitioned the document.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"fake.docx : Lorem ipsum dolor sit amet.\n",
|
||||
"fake-email.eml : Violets are blue\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredAPIFileLoader(\n",
|
||||
" file_path=filenames,\n",
|
||||
" api_key=\"FAKE_API_KEY\",\n",
|
||||
"loader = UnstructuredLoader(\n",
|
||||
" file_path=[\"example_data/fake.docx\", \"example_data/fake-email.eml\"],\n",
|
||||
" api_key=os.getenv(\"UNSTRUCTURED_API_KEY\"),\n",
|
||||
" partition_via_api=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
"\n",
|
||||
"print(docs[0].metadata[\"filename\"], \": \", docs[0].page_content[:100])\n",
|
||||
"print(docs[-1].metadata[\"filename\"], \": \", docs[-1].page_content[:100])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a324a0db",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Unstructured SDK Client\n",
|
||||
"\n",
|
||||
"Partitioning with the Unstructured API relies on the [Unstructured SDK\n",
|
||||
"Client](https://docs.unstructured.io/api-reference/api-services/sdk).\n",
|
||||
"\n",
|
||||
"Below is an example showing how you can customize some features of the client and use your own\n",
|
||||
"`requests.Session()`, pass in an alternative `server_url`, or customize the `RetryConfig` object for more control over how failed requests are handled."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0e510495",
|
||||
"execution_count": 11,
|
||||
"id": "58e55264",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO: Preparing to split document for partition.\n",
|
||||
"INFO: Concurrency level set to 5\n",
|
||||
"INFO: Splitting pages 1 to 16 (16 total)\n",
|
||||
"INFO: Determined optimal split size of 4 pages.\n",
|
||||
"INFO: Partitioning 4 files with 4 page(s) each.\n",
|
||||
"INFO: Partitioning set #1 (pages 1-4).\n",
|
||||
"INFO: Partitioning set #2 (pages 5-8).\n",
|
||||
"INFO: Partitioning set #3 (pages 9-12).\n",
|
||||
"INFO: Partitioning set #4 (pages 13-16).\n",
|
||||
"INFO: HTTP Request: POST https://api.unstructuredapp.io/general/v0/general \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO: HTTP Request: POST https://api.unstructuredapp.io/general/v0/general \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO: HTTP Request: POST https://api.unstructuredapp.io/general/v0/general \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO: Successfully partitioned set #1, elements added to the final result.\n",
|
||||
"INFO: Successfully partitioned set #2, elements added to the final result.\n",
|
||||
"INFO: Successfully partitioned set #3, elements added to the final result.\n",
|
||||
"INFO: Successfully partitioned set #4, elements added to the final result.\n",
|
||||
"INFO: Successfully partitioned the document.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"layout-parser-paper.pdf : LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"from unstructured_client import UnstructuredClient\n",
|
||||
"from unstructured_client.utils import BackoffStrategy, RetryConfig\n",
|
||||
"\n",
|
||||
"client = UnstructuredClient(\n",
|
||||
" api_key_auth=os.getenv(\n",
|
||||
" \"UNSTRUCTURED_API_KEY\"\n",
|
||||
" ), # Note: the client API param is \"api_key_auth\" instead of \"api_key\"\n",
|
||||
" client=requests.Session(),\n",
|
||||
" server_url=\"https://api.unstructuredapp.io/general/v0/general\",\n",
|
||||
" retry_config=RetryConfig(\n",
|
||||
" strategy=\"backoff\",\n",
|
||||
" retry_connection_errors=True,\n",
|
||||
" backoff=BackoffStrategy(\n",
|
||||
" initial_interval=500,\n",
|
||||
" max_interval=60000,\n",
|
||||
" exponent=1.5,\n",
|
||||
" max_elapsed_time=900000,\n",
|
||||
" ),\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"loader = UnstructuredLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\",\n",
|
||||
" partition_via_api=True,\n",
|
||||
" client=client,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"print(docs[0].metadata[\"filename\"], \": \", docs[0].page_content[:100])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c66fbeb3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chunking\n",
|
||||
"\n",
|
||||
"The `UnstructuredLoader` does not support `mode` as parameter for grouping text like the older\n",
|
||||
"loader `UnstructuredFileLoader` and others did. It instead supports \"chunking\". Chunking in\n",
|
||||
"unstructured differs from other chunking mechanisms you may be familiar with that form chunks based\n",
|
||||
"on plain-text features--character sequences like \"\\n\\n\" or \"\\n\" that might indicate a paragraph\n",
|
||||
"boundary or list-item boundary. Instead, all documents are split using specific knowledge about each\n",
|
||||
"document format to partition the document into semantic units (document elements) and we only need to\n",
|
||||
"resort to text-splitting when a single element exceeds the desired maximum chunk size. In general,\n",
|
||||
"chunking combines consecutive elements to form chunks as large as possible without exceeding the\n",
|
||||
"maximum chunk size. Chunking produces a sequence of CompositeElement, Table, or TableChunk elements.\n",
|
||||
"Each “chunk” is an instance of one of these three types.\n",
|
||||
"\n",
|
||||
"See this [page](https://docs.unstructured.io/open-source/core-functionality/chunking) for more\n",
|
||||
"details about chunking options, but to reproduce the same behavior as `mode=\"single\"`, you can set\n",
|
||||
"`chunking_strategy=\"basic\"`, `max_characters=<some-really-big-number>`, and `include_orig_elements=False`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "e9f1c20d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING: Partitioning locally even though api_key is defined since partition_via_api=False.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of LangChain documents: 1\n",
|
||||
"Length of text in the document: 42772\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\",\n",
|
||||
" chunking_strategy=\"basic\",\n",
|
||||
" max_characters=1000000,\n",
|
||||
" include_orig_elements=False,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"print(\"Number of LangChain documents:\", len(docs))\n",
|
||||
"print(\"Length of text in the document:\", len(docs[0].page_content))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -397,7 +512,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -316,7 +316,7 @@
|
||||
"id": "eb00a625-a6c9-4766-b3f0-eaed024851c9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Return SQARQL query\n",
|
||||
"## Return SPARQL query\n",
|
||||
"You can return the SPARQL query step from the Sparql QA Chain using the `return_sparql_query` parameter"
|
||||
]
|
||||
},
|
||||
@@ -358,7 +358,7 @@
|
||||
"\u001b[32;1m\u001b[1;3m[]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"SQARQL query: PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n",
|
||||
"SPARQL query: PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n",
|
||||
"SELECT ?workHomepage\n",
|
||||
"WHERE {\n",
|
||||
" ?person foaf:name \"Tim Berners-Lee\" .\n",
|
||||
@@ -370,7 +370,7 @@
|
||||
],
|
||||
"source": [
|
||||
"result = chain(\"What is Tim Berners-Lee's work homepage?\")\n",
|
||||
"print(f\"SQARQL query: {result['sparql_query']}\")\n",
|
||||
"print(f\"SPARQL query: {result['sparql_query']}\")\n",
|
||||
"print(f\"Final answer: {result['result']}\")"
|
||||
]
|
||||
},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -33,7 +33,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet transformers --quiet"
|
||||
"%pip install --upgrade --quiet transformers"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -194,12 +194,37 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e4a1e0f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For certain requirements, there is an option to pass the IBM's [`APIClient`](https://ibm.github.io/watsonx-ai-python-sdk/base.html#apiclient) object into the `WatsonxLLM` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4b28afc1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from ibm_watsonx_ai import APIClient\n",
|
||||
"\n",
|
||||
"api_client = APIClient(...)\n",
|
||||
"\n",
|
||||
"watsonx_llm = WatsonxLLM(\n",
|
||||
" model_id=\"ibm/granite-13b-instruct-v2\",\n",
|
||||
" watsonx_client=api_client,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7c4a632b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also pass the IBM's [`ModelInference`](https://ibm.github.io/watsonx-ai-python-sdk/fm_model_inference.html) object into `WatsonxLLM` class."
|
||||
"You can also pass the IBM's [`ModelInference`](https://ibm.github.io/watsonx-ai-python-sdk/fm_model_inference.html) object into the `WatsonxLLM` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,10 +1,21 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Ollama\n",
|
||||
"---\n",
|
||||
"sidebar_label: Ollama\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OllamaLLM\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Ollama models as [text completion models](/docs/concepts/#llms). Many popular Ollama models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
@@ -12,21 +23,35 @@
|
||||
"You may be looking for [this page instead](/docs/integrations/chat/ollama/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally.\n",
|
||||
"\n",
|
||||
"Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. \n",
|
||||
"\n",
|
||||
"It optimizes setup and configuration details, including GPU usage.\n",
|
||||
"\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/ollama/ollama#model-library).\n",
|
||||
"This page goes over how to use LangChain to interact with `Ollama` models.\n",
|
||||
"\n",
|
||||
"## Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# install package\n",
|
||||
"%pip install -U langchain-ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, follow [these instructions](https://github.com/ollama/ollama) to set up and run a local Ollama instance:\n",
|
||||
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
|
||||
"\n",
|
||||
"* [Download](https://ollama.ai/download) and install Ollama onto the available supported platforms (including Windows Subsystem for Linux)\n",
|
||||
"* Fetch available LLM model via `ollama pull <name-of-model>`\n",
|
||||
" * View a list of available models via the [model library](https://ollama.ai/library) and pull to use locally with the command `ollama pull llama3`\n",
|
||||
" * View a list of available models via the [model library](https://ollama.ai/library)\n",
|
||||
" * e.g., `ollama pull llama3`\n",
|
||||
"* This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.\n",
|
||||
"\n",
|
||||
"> On Mac, the models will be download to `~/.ollama/models`\n",
|
||||
@@ -34,194 +59,67 @@
|
||||
"> On Linux (or WSL), the models will be stored at `/usr/share/ollama/.ollama/models`\n",
|
||||
"\n",
|
||||
"* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n",
|
||||
"* To view all pulled models on your local instance, use `ollama list`\n",
|
||||
"* To view all pulled models, use `ollama list`\n",
|
||||
"* To chat directly with a model from the command line, use `ollama run <name-of-model>`\n",
|
||||
"* View the [Ollama documentation](https://github.com/ollama/ollama) for more commands. \n",
|
||||
"* Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"\n",
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html).\n",
|
||||
"\n",
|
||||
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` [interface](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/).\n",
|
||||
"\n",
|
||||
"This includes [special tokens](https://ollama.com/library/llama3) for system message and user input.\n",
|
||||
"\n",
|
||||
"## Interacting with Models \n",
|
||||
"\n",
|
||||
"Here are a few ways to interact with pulled local models\n",
|
||||
"\n",
|
||||
"#### In the terminal:\n",
|
||||
"\n",
|
||||
"* All of your local models are automatically served on `localhost:11434`\n",
|
||||
"* Run `ollama run <name-of-model>` to start interacting via the command line directly\n",
|
||||
"\n",
|
||||
"#### Via the API\n",
|
||||
"\n",
|
||||
"Send an `application/json` request to the API endpoint of Ollama to interact.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"curl http://localhost:11434/api/generate -d '{\n",
|
||||
" \"model\": \"llama3\",\n",
|
||||
" \"prompt\":\"Why is the sky blue?\"\n",
|
||||
"}'\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"See the Ollama [API documentation](https://github.com/ollama/ollama/blob/main/docs/api.md) for all endpoints.\n",
|
||||
"\n",
|
||||
"#### via LangChain\n",
|
||||
"\n",
|
||||
"See a typical basic example of using [Ollama chat model](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/) in your LangChain application."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Here's one:\\n\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\\n\\nHope that made you smile! Do you want to hear another one?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.llms import Ollama\n",
|
||||
"\n",
|
||||
"llm = Ollama(\n",
|
||||
" model=\"llama3\"\n",
|
||||
") # assuming you have Ollama installed and have llama3 model pulled with `ollama pull llama3 `\n",
|
||||
"\n",
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To stream tokens, use the `.stream(...)` method:"
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"S\n",
|
||||
"ure\n",
|
||||
",\n",
|
||||
" here\n",
|
||||
"'\n",
|
||||
"s\n",
|
||||
" one\n",
|
||||
":\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Why\n",
|
||||
" don\n",
|
||||
"'\n",
|
||||
"t\n",
|
||||
" scient\n",
|
||||
"ists\n",
|
||||
" trust\n",
|
||||
" atoms\n",
|
||||
"?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"B\n",
|
||||
"ecause\n",
|
||||
" they\n",
|
||||
" make\n",
|
||||
" up\n",
|
||||
" everything\n",
|
||||
"!\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"I\n",
|
||||
" hope\n",
|
||||
" you\n",
|
||||
" found\n",
|
||||
" that\n",
|
||||
" am\n",
|
||||
"using\n",
|
||||
"!\n",
|
||||
" Do\n",
|
||||
" you\n",
|
||||
" want\n",
|
||||
" to\n",
|
||||
" hear\n",
|
||||
" another\n",
|
||||
" one\n",
|
||||
"?\n",
|
||||
"\n"
|
||||
]
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'A great start!\\n\\nLangChain is a type of AI model that uses language processing techniques to generate human-like text based on input prompts or chains of reasoning. In other words, it can have a conversation with humans, understanding the context and responding accordingly.\\n\\nHere\\'s a possible breakdown:\\n\\n* \"Lang\" likely refers to its focus on natural language processing (NLP) and linguistic analysis.\\n* \"Chain\" suggests that LangChain is designed to generate text in response to a series of connected ideas or prompts, rather than simply generating random text.\\n\\nSo, what do you think LangChain\\'s capabilities might be?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"Tell me a joke\"\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_ollama.llms import OllamaLLM\n",
|
||||
"\n",
|
||||
"for chunks in llm.stream(query):\n",
|
||||
" print(chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To learn more about the LangChain Expressive Language and the available methods on an LLM, see the [LCEL Interface](/docs/concepts#interface)"
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"model = OllamaLLM(model=\"llama3\")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"\n",
|
||||
"chain.invoke({\"question\": \"What is LangChain?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e2d85456",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal\n",
|
||||
"\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava).\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.com/library/bakllava) and [llava](https://ollama.com/library/llava).\n",
|
||||
"\n",
|
||||
"`ollama pull bakllava`\n",
|
||||
" ollama pull bakllava\n",
|
||||
"\n",
|
||||
"Be sure to update Ollama so that you have the most recent version to support multi-modal."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import Ollama\n",
|
||||
"\n",
|
||||
"bakllava = Ollama(model=\"bakllava\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "4043e202",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -279,7 +177,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 4,
|
||||
"id": "79aaf863",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -288,38 +187,24 @@
|
||||
"'90%'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_image_context = bakllava.bind(images=[image_b64])\n",
|
||||
"from langchain_ollama import OllamaLLM\n",
|
||||
"\n",
|
||||
"llm = OllamaLLM(model=\"bakllava\")\n",
|
||||
"\n",
|
||||
"llm_with_image_context = llm.bind(images=[image_b64])\n",
|
||||
"llm_with_image_context.invoke(\"What is the dollar based gross retention rate:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Concurrency Features\n",
|
||||
"\n",
|
||||
"Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n",
|
||||
"\n",
|
||||
"Start the Ollama server with:\n",
|
||||
"\n",
|
||||
"* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n",
|
||||
"* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n",
|
||||
"\n",
|
||||
"Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n",
|
||||
"\n",
|
||||
"Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -333,9 +218,14 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.8"
|
||||
"version": "3.12.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -50,8 +50,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_community.llms import PipelineAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
@@ -123,7 +123,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
"llm_chain = prompt | llm | StrOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -142,7 +142,7 @@
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
"llm_chain.invoke(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -88,6 +88,7 @@
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
|
||||
" \"process_prompt\": False,\n",
|
||||
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
@@ -116,6 +117,7 @@
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
|
||||
" \"process_prompt\": False,\n",
|
||||
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
@@ -175,9 +177,7 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"sambastudio_base_url = \"<Your SambaStudio environment URL>\"\n",
|
||||
"sambastudio_base_uri = (\n",
|
||||
" \"<Your SambaStudio endpoint base URI>\" # optional, \"api/predict/nlp\" set as default\n",
|
||||
")\n",
|
||||
"sambastudio_base_uri = \"<Your SambaStudio endpoint base URI>\" # optional, \"api/predict/generic\" set as default\n",
|
||||
"sambastudio_project_id = \"<Your SambaStudio project id>\"\n",
|
||||
"sambastudio_endpoint_id = \"<Your SambaStudio endpoint id>\"\n",
|
||||
"sambastudio_api_key = \"<Your SambaStudio endpoint API key>\"\n",
|
||||
@@ -271,6 +271,7 @@
|
||||
" \"do_sample\": True,\n",
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"process_prompt\": False,\n",
|
||||
" \"select_expert\": \"Meta-Llama-3-8B-Instruct\",\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Install the package\n",
|
||||
"%pip install --upgrade --quiet dashscope"
|
||||
"%pip install --upgrade --quiet langchain-community dashscope"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -0,0 +1,325 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a283d2fd-e26e-4811-a486-d3cf0ecf6749",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Couchbase\n",
|
||||
"> Couchbase is an award-winning distributed NoSQL cloud database that delivers unmatched versatility, performance, scalability, and financial value for all of your cloud, mobile, AI, and edge computing applications. Couchbase embraces AI with coding assistance for developers and vector search for their applications.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use the `CouchbaseChatMessageHistory` class to store the chat message history in a Couchbase cluster\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ff868a6c-3e17-4c3d-8d32-67b01f4d7bcc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set Up Couchbase Cluster\n",
|
||||
"To run this demo, you need a Couchbase Cluster. \n",
|
||||
"\n",
|
||||
"You can work with both [Couchbase Capella](https://www.couchbase.com/products/capella/) and your self-managed Couchbase Server."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "41fa85e7-6968-45e4-a445-de305d80f332",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Dependencies\n",
|
||||
"`CouchbaseChatMessageHistory` lives inside the `langchain-couchbase` package. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "b744ca05-b8c6-458c-91df-f50ca2c20b3c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-couchbase"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "41f29205-6452-493b-ba18-8a3b006bcca4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Couchbase Connection Object\n",
|
||||
"We create a connection to the Couchbase cluster initially and then pass the cluster object to the Vector Store. \n",
|
||||
"\n",
|
||||
"Here, we are connecting using the username and password. You can also connect using any other supported way to your cluster. \n",
|
||||
"\n",
|
||||
"For more information on connecting to the Couchbase cluster, please check the [Python SDK documentation](https://docs.couchbase.com/python-sdk/current/hello-world/start-using-sdk.html#connect)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f394908e-f5fe-408a-84d7-b97fdebcfa26",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"COUCHBASE_CONNECTION_STRING = (\n",
|
||||
" \"couchbase://localhost\" # or \"couchbases://localhost\" if using TLS\n",
|
||||
")\n",
|
||||
"DB_USERNAME = \"Administrator\"\n",
|
||||
"DB_PASSWORD = \"Password\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "ad4dce21-d80c-465a-b709-fd366ba5ce35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import timedelta\n",
|
||||
"\n",
|
||||
"from couchbase.auth import PasswordAuthenticator\n",
|
||||
"from couchbase.cluster import Cluster\n",
|
||||
"from couchbase.options import ClusterOptions\n",
|
||||
"\n",
|
||||
"auth = PasswordAuthenticator(DB_USERNAME, DB_PASSWORD)\n",
|
||||
"options = ClusterOptions(auth)\n",
|
||||
"cluster = Cluster(COUCHBASE_CONNECTION_STRING, options)\n",
|
||||
"\n",
|
||||
"# Wait until the cluster is ready for use.\n",
|
||||
"cluster.wait_until_ready(timedelta(seconds=5))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e3d0210c-e2e6-437a-86f3-7397a1899fef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will now set the bucket, scope, and collection names in the Couchbase cluster that we want to use for storing the message history.\n",
|
||||
"\n",
|
||||
"Note that the bucket, scope, and collection need to exist before using them to store the message history."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e8c7f846-a5c4-4465-a40e-4a9a23ac71bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"BUCKET_NAME = \"langchain-testing\"\n",
|
||||
"SCOPE_NAME = \"_default\"\n",
|
||||
"COLLECTION_NAME = \"conversational_cache\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "283959e1-6af7-4768-9211-5b0facc6ef65",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"In order to store the messages, you need the following:\n",
|
||||
"- Couchbase Cluster object: Valid connection to the Couchbase cluster\n",
|
||||
"- bucket_name: Bucket in cluster to store the chat message history\n",
|
||||
"- scope_name: Scope in bucket to store the message history\n",
|
||||
"- collection_name: Collection in scope to store the message history\n",
|
||||
"- session_id: Unique identifier for the session\n",
|
||||
"\n",
|
||||
"Optionally you can configure the following:\n",
|
||||
"- session_id_key: Field in the chat message documents to store the `session_id`\n",
|
||||
"- message_key: Field in the chat message documents to store the message content\n",
|
||||
"- create_index: Used to specify if the index needs to be created on the collection. By default, an index is created on the `message_key` and the `session_id_key` of the documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "43c3b2d5-aae2-44a9-9e9f-f10adf054cfa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_couchbase.chat_message_histories import CouchbaseChatMessageHistory\n",
|
||||
"\n",
|
||||
"message_history = CouchbaseChatMessageHistory(\n",
|
||||
" cluster=cluster,\n",
|
||||
" bucket_name=BUCKET_NAME,\n",
|
||||
" scope_name=SCOPE_NAME,\n",
|
||||
" collection_name=COLLECTION_NAME,\n",
|
||||
" session_id=\"test-session\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"message_history.add_user_message(\"hi!\")\n",
|
||||
"\n",
|
||||
"message_history.add_ai_message(\"how are you doing?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e7e348ef-79e9-481c-aeef-969ae03dea6a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='hi!'), AIMessage(content='how are you doing?')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message_history.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c8b942a7-93fa-4cd9-8414-d047135c2733",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"The chat message history class can be used with [LCEL Runnables](https://python.langchain.com/v0.2/docs/how_to/message_history/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8a9f0d91-d1d6-481d-8137-ea11229f485a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "946d45aa-5a61-49ae-816b-1c3949c56d9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are a helpful assistant.\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create the LCEL runnable\n",
|
||||
"chain = prompt | ChatOpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "20dfd838-b549-42ed-b3ba-ac005f7e024c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_with_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: CouchbaseChatMessageHistory(\n",
|
||||
" cluster=cluster,\n",
|
||||
" bucket_name=BUCKET_NAME,\n",
|
||||
" scope_name=SCOPE_NAME,\n",
|
||||
" collection_name=COLLECTION_NAME,\n",
|
||||
" session_id=session_id,\n",
|
||||
" ),\n",
|
||||
" input_messages_key=\"question\",\n",
|
||||
" history_messages_key=\"history\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "17bd09f4-896d-433d-bb9a-369a06e7aa8a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is where we configure the session id\n",
|
||||
"config = {\"configurable\": {\"session_id\": \"testing\"}}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "4bda1096-2fc2-40d7-a046-0d5d8e3a8f75",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 22, 'total_tokens': 32}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-a0f8a29e-ddf4-4e06-a1fe-cf8c325a2b72-0', usage_metadata={'input_tokens': 22, 'output_tokens': 10, 'total_tokens': 32})"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke({\"question\": \"Hi! I'm bob\"}, config=config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "1cfb31da-51bb-4c5f-909a-b7118b0ae08d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Your name is Bob.', response_metadata={'token_usage': {'completion_tokens': 5, 'prompt_tokens': 43, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f764a9eb-999e-4042-96b6-fe47b7ae4779-0', usage_metadata={'input_tokens': 43, 'output_tokens': 5, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke({\"question\": \"Whats my name\"}, config=config)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -40,6 +40,7 @@ These providers have standalone `langchain-{provider}` packages for improved ver
|
||||
- [Qdrant](/docs/integrations/providers/qdrant)
|
||||
- [Robocorp](/docs/integrations/providers/robocorp)
|
||||
- [Together AI](/docs/integrations/providers/together)
|
||||
- [Unstructured](/docs/integrations/providers/unstructured)
|
||||
- [Upstage](/docs/integrations/providers/upstage)
|
||||
- [Voyage AI](/docs/integrations/providers/voyageai)
|
||||
|
||||
|
||||
@@ -27,3 +27,85 @@ See a [usage example](/docs/integrations/document_loaders/couchbase).
|
||||
```python
|
||||
from langchain_community.document_loaders.couchbase import CouchbaseLoader
|
||||
```
|
||||
|
||||
## LLM Caches
|
||||
|
||||
### CouchbaseCache
|
||||
Use Couchbase as a cache for prompts and responses.
|
||||
|
||||
See a [usage example](/docs/integrations/llm_caching/#couchbase-cache).
|
||||
|
||||
To import this cache:
|
||||
```python
|
||||
from langchain_couchbase.cache import CouchbaseCache
|
||||
```
|
||||
|
||||
To use this cache with your LLMs:
|
||||
```python
|
||||
from langchain_core.globals import set_llm_cache
|
||||
|
||||
cluster = couchbase_cluster_connection_object
|
||||
|
||||
set_llm_cache(
|
||||
CouchbaseCache(
|
||||
cluster=cluster,
|
||||
bucket_name=BUCKET_NAME,
|
||||
scope_name=SCOPE_NAME,
|
||||
collection_name=COLLECTION_NAME,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
### CouchbaseSemanticCache
|
||||
Semantic caching allows users to retrieve cached prompts based on the semantic similarity between the user input and previously cached inputs. Under the hood it uses Couchbase as both a cache and a vectorstore.
|
||||
The CouchbaseSemanticCache needs a Search Index defined to work. Please look at the [usage example](/docs/integrations/vectorstores/couchbase) on how to set up the index.
|
||||
|
||||
See a [usage example](/docs/integrations/llm_caching/#couchbase-semantic-cache).
|
||||
|
||||
To import this cache:
|
||||
```python
|
||||
from langchain_couchbase.cache import CouchbaseSemanticCache
|
||||
```
|
||||
|
||||
To use this cache with your LLMs:
|
||||
```python
|
||||
from langchain_core.globals import set_llm_cache
|
||||
|
||||
# use any embedding provider...
|
||||
from langchain_openai.Embeddings import OpenAIEmbeddings
|
||||
|
||||
embeddings = OpenAIEmbeddings()
|
||||
cluster = couchbase_cluster_connection_object
|
||||
|
||||
set_llm_cache(
|
||||
CouchbaseSemanticCache(
|
||||
cluster=cluster,
|
||||
embedding = embeddings,
|
||||
bucket_name=BUCKET_NAME,
|
||||
scope_name=SCOPE_NAME,
|
||||
collection_name=COLLECTION_NAME,
|
||||
index_name=INDEX_NAME,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
## Chat Message History
|
||||
Use Couchbase as the storage for your chat messages.
|
||||
|
||||
See a [usage example](/docs/integrations/memory/couchbase_chat_message_history).
|
||||
|
||||
To use the chat message history in your applications:
|
||||
```python
|
||||
from langchain_couchbase.chat_message_histories import CouchbaseChatMessageHistory
|
||||
|
||||
message_history = CouchbaseChatMessageHistory(
|
||||
cluster=cluster,
|
||||
bucket_name=BUCKET_NAME,
|
||||
scope_name=SCOPE_NAME,
|
||||
collection_name=COLLECTION_NAME,
|
||||
session_id="test-session",
|
||||
)
|
||||
|
||||
message_history.add_user_message("hi!")
|
||||
```
|
||||
56
docs/docs/integrations/providers/dedoc.mdx
Normal file
56
docs/docs/integrations/providers/dedoc.mdx
Normal file
@@ -0,0 +1,56 @@
|
||||
# Dedoc
|
||||
|
||||
>[Dedoc](https://dedoc.readthedocs.io) is an [open-source](https://github.com/ispras/dedoc)
|
||||
library/service that extracts texts, tables, attached files and document structure
|
||||
(e.g., titles, list items, etc.) from files of various formats.
|
||||
|
||||
`Dedoc` supports `DOCX`, `XLSX`, `PPTX`, `EML`, `HTML`, `PDF`, images and more.
|
||||
Full list of supported formats can be found [here](https://dedoc.readthedocs.io/en/latest/#id1).
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
### Dedoc library
|
||||
|
||||
You can install `Dedoc` using `pip`.
|
||||
In this case, you will need to install dependencies,
|
||||
please go [here](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html)
|
||||
to get more information.
|
||||
|
||||
```bash
|
||||
pip install dedoc
|
||||
```
|
||||
|
||||
### Dedoc API
|
||||
|
||||
If you are going to use `Dedoc` API, you don't need to install `dedoc` library.
|
||||
In this case, you should run the `Dedoc` service, e.g. `Docker` container (please see
|
||||
[the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker)
|
||||
for more details):
|
||||
|
||||
```bash
|
||||
docker pull dedocproject/dedoc
|
||||
docker run -p 1231:1231
|
||||
```
|
||||
|
||||
## Document Loader
|
||||
|
||||
* For handling files of any formats (supported by `Dedoc`), you can use `DedocFileLoader`:
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import DedocFileLoader
|
||||
```
|
||||
|
||||
* For handling PDF files (with or without a textual layer), you can use `DedocPDFLoader`:
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import DedocPDFLoader
|
||||
```
|
||||
|
||||
* For handling files of any formats without library installation,
|
||||
you can use `Dedoc API` with `DedocAPIFileLoader`:
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import DedocAPIFileLoader
|
||||
```
|
||||
|
||||
Please see a [usage example](/docs/integrations/document_loaders/dedoc) for more details.
|
||||
@@ -61,7 +61,7 @@ When ready to deploy, you can self-host models with NVIDIA NIM—which is includ
|
||||
```python
|
||||
from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank
|
||||
|
||||
# connect to an chat NIM running at localhost:8000, specifyig a specific model
|
||||
# connect to a chat NIM running at localhost:8000, specifying a model
|
||||
llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta/llama3-8b-instruct")
|
||||
|
||||
# connect to an embedding NIM running at localhost:8080
|
||||
|
||||
@@ -38,7 +38,7 @@ import getpass
|
||||
if "PREMAI_API_KEY" not in os.environ:
|
||||
os.environ["PREMAI_API_KEY"] = getpass.getpass("PremAI API Key:")
|
||||
|
||||
chat = ChatPremAI(project_id=8)
|
||||
chat = ChatPremAI(project_id=1234, model_name="gpt-4o")
|
||||
```
|
||||
|
||||
### Chat Completions
|
||||
@@ -50,7 +50,8 @@ The first one will give us a static result. Whereas the second one will stream t
|
||||
```python
|
||||
human_message = HumanMessage(content="Who are you?")
|
||||
|
||||
chat.invoke([human_message])
|
||||
response = chat.invoke([human_message])
|
||||
print(response.content)
|
||||
```
|
||||
|
||||
You can provide system prompt here like this:
|
||||
@@ -84,8 +85,8 @@ Repositories are also supported in langchain premai. Here is how you can do it.
|
||||
|
||||
```python
|
||||
|
||||
query = "what is the diameter of individual Galaxy"
|
||||
repository_ids = [1991, ]
|
||||
query = "Which models are used for dense retrieval"
|
||||
repository_ids = [1985,]
|
||||
repositories = dict(
|
||||
ids=repository_ids,
|
||||
similarity_threshold=0.3,
|
||||
@@ -100,6 +101,8 @@ First we start by defining our repository with some repository ids. Make sure th
|
||||
Now, we connect the repository with our chat object to invoke RAG based generations.
|
||||
|
||||
```python
|
||||
import json
|
||||
|
||||
response = chat.invoke(query, max_tokens=100, repositories=repositories)
|
||||
|
||||
print(response.content)
|
||||
@@ -109,25 +112,22 @@ print(json.dumps(response.response_metadata, indent=4))
|
||||
This is how an output looks like.
|
||||
|
||||
```bash
|
||||
The diameters of individual galaxies range from 80,000-150,000 light-years.
|
||||
Dense retrieval models typically include:
|
||||
|
||||
1. **BERT-based Models**: Such as DPR (Dense Passage Retrieval) which uses BERT for encoding queries and passages.
|
||||
2. **ColBERT**: A model that combines BERT with late interaction mechanisms.
|
||||
3. **ANCE (Approximate Nearest Neighbor Negative Contrastive Estimation)**: Uses BERT and focuses on efficient retrieval.
|
||||
4. **TCT-ColBERT**: A variant of ColBERT that uses a two-tower
|
||||
{
|
||||
"document_chunks": [
|
||||
{
|
||||
"repository_id": 19xx,
|
||||
"document_id": 13xx,
|
||||
"chunk_id": 173xxx,
|
||||
"document_name": "Kegy 202 Chapter 2",
|
||||
"similarity_score": 0.586126983165741,
|
||||
"content": "n thousands\n of light-years. The diameters of individual\n galaxies range from 80,000-150,000 light\n "
|
||||
},
|
||||
{
|
||||
"repository_id": 19xx,
|
||||
"document_id": 13xx,
|
||||
"chunk_id": 173xxx,
|
||||
"document_name": "Kegy 202 Chapter 2",
|
||||
"similarity_score": 0.4815782308578491,
|
||||
"content": " for development of galaxies. A galaxy contains\n a large number of stars. Galaxies spread over\n vast distances that are measured in thousands\n "
|
||||
},
|
||||
"repository_id": 1985,
|
||||
"document_id": 1306,
|
||||
"chunk_id": 173899,
|
||||
"document_name": "[D] Difference between sparse and dense informati\u2026",
|
||||
"similarity_score": 0.3209080100059509,
|
||||
"content": "with the difference or anywhere\nwhere I can read about it?\n\n\n 17 9\n\n\n u/ScotiabankCanada \u2022 Promoted\n\n\n Accelerate your study permit process\n with Scotiabank's Student GIC\n Program. We're here to help you tur\u2026\n\n\n startright.scotiabank.com Learn More\n\n\n Add a Comment\n\n\nSort by: Best\n\n\n DinosParkour \u2022 1y ago\n\n\n Dense Retrieval (DR) m"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -202,7 +202,7 @@ Prem Templates are also available for Streaming too.
|
||||
|
||||
## Prem Embeddings
|
||||
|
||||
In this section we are going to dicuss how we can get access to different embedding model using `PremEmbeddings` with LangChain. Lets start by importing our modules and setting our API Key.
|
||||
In this section we cover how we can get access to different embedding models using `PremEmbeddings` with LangChain. Let's start by importing our modules and setting our API Key.
|
||||
|
||||
```python
|
||||
import os
|
||||
@@ -264,4 +264,164 @@ doc_result[:5]
|
||||
0.0008162345038726926,
|
||||
-0.004556538071483374,
|
||||
0.02918623760342598,
|
||||
-0.02547479420900345]
|
||||
-0.02547479420900345]
|
||||
|
||||
## Tool/Function Calling
|
||||
|
||||
LangChain PremAI supports tool/function calling. Tool/function calling allows a model to respond to a given prompt by generating output that matches a user-defined schema.
|
||||
|
||||
- You can learn all about tool calling in details [in our documentation here](https://docs.premai.io/get-started/function-calling).
|
||||
- You can learn more about langchain tool calling in [this part of the docs](https://python.langchain.com/v0.1/docs/modules/model_io/chat/function_calling).
|
||||
|
||||
**NOTE:**
|
||||
|
||||
> The current version of LangChain ChatPremAI do not support function/tool calling with streaming support. Streaming support along with function calling will come soon.
|
||||
|
||||
### Passing tools to model
|
||||
|
||||
In order to pass tools and let the LLM choose the tool it needs to call, we need to pass a tool schema. A tool schema is the function definition along with proper docstring on what does the function do, what each argument of the function is etc. Below are some simple arithmetic functions with their schema.
|
||||
|
||||
**NOTE:**
|
||||
> When defining function/tool schema, do not forget to add information around the function arguments, otherwise it would throw error.
|
||||
|
||||
```python
|
||||
from langchain_core.tools import tool
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
# Define the schema for function arguments
|
||||
class OperationInput(BaseModel):
|
||||
a: int = Field(description="First number")
|
||||
b: int = Field(description="Second number")
|
||||
|
||||
|
||||
# Now define the function where schema for argument will be OperationInput
|
||||
@tool("add", args_schema=OperationInput, return_direct=True)
|
||||
def add(a: int, b: int) -> int:
|
||||
"""Adds a and b.
|
||||
|
||||
Args:
|
||||
a: first int
|
||||
b: second int
|
||||
"""
|
||||
return a + b
|
||||
|
||||
|
||||
@tool("multiply", args_schema=OperationInput, return_direct=True)
|
||||
def multiply(a: int, b: int) -> int:
|
||||
"""Multiplies a and b.
|
||||
|
||||
Args:
|
||||
a: first int
|
||||
b: second int
|
||||
"""
|
||||
return a * b
|
||||
```
|
||||
|
||||
### Binding tool schemas with our LLM
|
||||
|
||||
We will now use the `bind_tools` method to convert our above functions to a "tool" and binding it with the model. This means we are going to pass these tool informations everytime we invoke the model.
|
||||
|
||||
```python
|
||||
tools = [add, multiply]
|
||||
llm_with_tools = chat.bind_tools(tools)
|
||||
```
|
||||
|
||||
After this, we get the response from the model which is now binded with the tools.
|
||||
|
||||
```python
|
||||
query = "What is 3 * 12? Also, what is 11 + 49?"
|
||||
|
||||
messages = [HumanMessage(query)]
|
||||
ai_msg = llm_with_tools.invoke(messages)
|
||||
```
|
||||
|
||||
As we can see, when our chat model is binded with tools, then based on the given prompt, it calls the correct set of the tools and sequentially.
|
||||
|
||||
```python
|
||||
ai_msg.tool_calls
|
||||
```
|
||||
**Output**
|
||||
|
||||
```python
|
||||
[{'name': 'multiply',
|
||||
'args': {'a': 3, 'b': 12},
|
||||
'id': 'call_A9FL20u12lz6TpOLaiS6rFa8'},
|
||||
{'name': 'add',
|
||||
'args': {'a': 11, 'b': 49},
|
||||
'id': 'call_MPKYGLHbf39csJIyb5BZ9xIk'}]
|
||||
```
|
||||
|
||||
We append this message shown above to the LLM which acts as a context and makes the LLM aware that what all functions it has called.
|
||||
|
||||
```python
|
||||
messages.append(ai_msg)
|
||||
```
|
||||
|
||||
Since tool calling happens into two phases, where:
|
||||
|
||||
1. in our first call, we gathered all the tools that the LLM decided to tool, so that it can get the result as an added context to give more accurate and hallucination free result.
|
||||
|
||||
2. in our second call, we will parse those set of tools decided by LLM and run them (in our case it will be the functions we defined, with the LLM's extracted arguments) and pass this result to the LLM
|
||||
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
for tool_call in ai_msg.tool_calls:
|
||||
selected_tool = {"add": add, "multiply": multiply}[tool_call["name"].lower()]
|
||||
tool_output = selected_tool.invoke(tool_call["args"])
|
||||
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"]))
|
||||
```
|
||||
|
||||
Finally, we call the LLM (binded with the tools) with the function response added in it's context.
|
||||
|
||||
```python
|
||||
response = llm_with_tools.invoke(messages)
|
||||
print(response.content)
|
||||
```
|
||||
**Output**
|
||||
|
||||
```txt
|
||||
The final answers are:
|
||||
|
||||
- 3 * 12 = 36
|
||||
- 11 + 49 = 60
|
||||
```
|
||||
|
||||
### Defining tool schemas: Pydantic class `Optional`
|
||||
|
||||
Above we have shown how to define schema using `tool` decorator, however we can equivalently define the schema using Pydantic. Pydantic is useful when your tool inputs are more complex:
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
|
||||
|
||||
class add(BaseModel):
|
||||
"""Add two integers together."""
|
||||
|
||||
a: int = Field(..., description="First integer")
|
||||
b: int = Field(..., description="Second integer")
|
||||
|
||||
|
||||
class multiply(BaseModel):
|
||||
"""Multiply two integers together."""
|
||||
|
||||
a: int = Field(..., description="First integer")
|
||||
b: int = Field(..., description="Second integer")
|
||||
|
||||
|
||||
tools = [add, multiply]
|
||||
```
|
||||
|
||||
Now, we can bind them to chat models and directly get the result:
|
||||
|
||||
```python
|
||||
chain = llm_with_tools | PydanticToolsParser(tools=[multiply, add])
|
||||
chain.invoke(query)
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
```txt
|
||||
[multiply(a=3, b=12), add(a=11, b=49)]
|
||||
```
|
||||
|
||||
Now, as done above, we parse this and run this functions and call the LLM once again to get the result.
|
||||
@@ -21,7 +21,7 @@ whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain_qdrant import Qdrant
|
||||
from langchain_qdrant import QdrantVectorStore
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Qdrant wrapper, see [this notebook](/docs/integrations/vectorstores/qdrant)
|
||||
|
||||
@@ -21,7 +21,7 @@ For example
|
||||
```python
|
||||
from langchain_openai import OpenAI
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from langchain.chains import LLMChain
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
|
||||
import os
|
||||
os.environ['OPENAI_API_BASE'] = "https://shale.live/v1"
|
||||
@@ -35,10 +35,11 @@ template = """Question: {question}
|
||||
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
|
||||
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
||||
|
||||
llm_chain = prompt | llm | StrOutputParser()
|
||||
|
||||
question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
|
||||
|
||||
llm_chain.run(question)
|
||||
llm_chain.invoke(question)
|
||||
|
||||
```
|
||||
|
||||
@@ -8,11 +8,21 @@ ecosystem within LangChain.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
If you are using a loader that runs locally, use the following steps to get `unstructured` and
|
||||
its dependencies running locally.
|
||||
If you are using a loader that runs locally, use the following steps to get `unstructured` and its
|
||||
dependencies running.
|
||||
|
||||
- Install the Python SDK with `pip install unstructured`.
|
||||
- You can install document specific dependencies with extras, i.e. `pip install "unstructured[docx]"`.
|
||||
- For the smallest installation footprint and to take advantage of features not available in the
|
||||
open-source `unstructured` package, install the Python SDK with `pip install unstructured-client`
|
||||
along with `pip install langchain-unstructured` to use the `UnstructuredLoader` and partition
|
||||
remotely against the Unstructured API. This loader lives
|
||||
in a LangChain partner repo instead of the `langchain-community` repo and you will need an
|
||||
`api_key`, which you can generate a free key [here](https://unstructured.io/api-key/).
|
||||
- Unstructured's documentation for the sdk can be found here:
|
||||
https://docs.unstructured.io/api-reference/api-services/sdk
|
||||
|
||||
- To run everything locally, install the open-source python package with `pip install unstructured`
|
||||
along with `pip install langchain-community` and use the same `UnstructuredLoader` as mentioned above.
|
||||
- You can install document specific dependencies with extras, e.g. `pip install "unstructured[docx]"`.
|
||||
- To install the dependencies for all document types, use `pip install "unstructured[all-docs]"`.
|
||||
- Install the following system dependencies if they are not already available on your system with e.g. `brew install` for Mac.
|
||||
Depending on what document types you're parsing, you may not need all of these.
|
||||
@@ -22,16 +32,11 @@ its dependencies running locally.
|
||||
- `qpdf` (PDFs)
|
||||
- `libreoffice` (MS Office docs)
|
||||
- `pandoc` (EPUBs)
|
||||
- When running locally, Unstructured also recommends using Docker [by following this
|
||||
guide](https://docs.unstructured.io/open-source/installation/docker-installation) to ensure all
|
||||
system dependencies are installed correctly.
|
||||
|
||||
When running locally, Unstructured also recommends using Docker [by following this guide](https://docs.unstructured.io/open-source/installation/docker-installation)
|
||||
to ensure all system dependencies are installed correctly.
|
||||
|
||||
If you want to get up and running with less set up, you can
|
||||
simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or
|
||||
`UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API.
|
||||
|
||||
|
||||
The `Unstructured API` requires API keys to make requests.
|
||||
The Unstructured API requires API keys to make requests.
|
||||
You can request an API key [here](https://unstructured.io/api-key-hosted) and start using it today!
|
||||
Checkout the README [here](https://github.com/Unstructured-IO/unstructured-api) here to get started making API calls.
|
||||
We'd love to hear your feedback, let us know how it goes in our [community slack](https://join.slack.com/t/unstructuredw-kbe4326/shared_invite/zt-1x7cgo0pg-PTptXWylzPQF9xZolzCnwQ).
|
||||
@@ -42,30 +47,21 @@ Check out the instructions
|
||||
|
||||
## Data Loaders
|
||||
|
||||
The primary usage of the `Unstructured` is in data loaders.
|
||||
The primary usage of `Unstructured` is in data loaders.
|
||||
|
||||
### UnstructuredAPIFileIOLoader
|
||||
### UnstructuredLoader
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/unstructured_file#unstructured-api).
|
||||
See a [usage example](/docs/integrations/document_loaders/unstructured_file) to see how you can use
|
||||
this loader for both partitioning locally and remotely with the serverless Unstructured API.
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import UnstructuredAPIFileIOLoader
|
||||
```
|
||||
|
||||
### UnstructuredAPIFileLoader
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/unstructured_file#unstructured-api).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import UnstructuredAPIFileLoader
|
||||
from langchain_unstructured import UnstructuredLoader
|
||||
```
|
||||
|
||||
### UnstructuredCHMLoader
|
||||
|
||||
`CHM` means `Microsoft Compiled HTML Help`.
|
||||
|
||||
See a usage example in the API documentation.
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import UnstructuredCHMLoader
|
||||
```
|
||||
@@ -119,15 +115,6 @@ See a [usage example](/docs/integrations/document_loaders/google_drive#passing-i
|
||||
from langchain_community.document_loaders import UnstructuredFileIOLoader
|
||||
```
|
||||
|
||||
### UnstructuredFileLoader
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/unstructured_file).
|
||||
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import UnstructuredFileLoader
|
||||
```
|
||||
|
||||
### UnstructuredHTMLLoader
|
||||
|
||||
See a [usage example](/docs/how_to/document_loader_html).
|
||||
|
||||
@@ -309,9 +309,9 @@
|
||||
"documents = TextLoader(\"../../how_to/state_of_the_union.txt\").load()\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"retriever = FAISS.from_documents(texts, CohereEmbeddings()).as_retriever(\n",
|
||||
" search_kwargs={\"k\": 20}\n",
|
||||
")\n",
|
||||
"retriever = FAISS.from_documents(\n",
|
||||
" texts, CohereEmbeddings(model=\"embed-english-v3.0\")\n",
|
||||
").as_retriever(search_kwargs={\"k\": 20})\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
@@ -324,7 +324,8 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Doing reranking with CohereRerank\n",
|
||||
"Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll add an `CohereRerank`, uses the Cohere rerank endpoint to rerank the returned results."
|
||||
"Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll add an `CohereRerank`, uses the Cohere rerank endpoint to rerank the returned results.\n",
|
||||
"Do note that it is mandatory to specify the model name in CohereRerank!"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -339,7 +340,7 @@
|
||||
"from langchain_community.llms import Cohere\n",
|
||||
"\n",
|
||||
"llm = Cohere(temperature=0)\n",
|
||||
"compressor = CohereRerank()\n",
|
||||
"compressor = CohereRerank(model=\"rerank-english-v3.0\")\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=compressor, base_retriever=retriever\n",
|
||||
")\n",
|
||||
|
||||
135
docs/docs/integrations/retrievers/nanopq.ipynb
Normal file
135
docs/docs/integrations/retrievers/nanopq.ipynb
Normal file
@@ -0,0 +1,135 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "661d5123-8ed2-4504-a846-7df0984e79f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# NanoPQ (Product Quantization)\n",
|
||||
"\n",
|
||||
">[Product Quantization algorithm (k-NN)](https://towardsdatascience.com/similarity-search-product-quantization-b2a1a6397701) in brief is a quantization algorithm that helps in compression of database vectors which helps in semantic search when large datasets are involved. In a nutshell, the embedding is split into M subspaces which further goes through clustering. Upon clustering the vectors the centroid vector gets mapped to the vectors present in the each of the clusters of the subspace. \n",
|
||||
"\n",
|
||||
"This notebook goes over how to use a retriever that under the hood uses a Product Quantization which has been implemented by the [nanopq](https://github.com/matsui528/nanopq) package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "68794637-c13b-4145-944f-3b0c2f1258f9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community langchain-openai nanopq"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "39ecbf50-4623-4ee6-9c8e-fea5da21767e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings\n",
|
||||
"from langchain_community.retrievers import NanoPQRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c1ce742a-5085-408a-a2c2-4bae0f605880",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create New Retriever with Texts"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "6c80020e-bc9e-49e8-8f93-5f75fd823738",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = NanoPQRetriever.from_texts(\n",
|
||||
" [\"Great world\", \"great words\", \"world\", \"planets of the world\"],\n",
|
||||
" SpacyEmbeddings(model_name=\"en_core_web_sm\"),\n",
|
||||
" clusters=2,\n",
|
||||
" subspace=2,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "743c26c1-0072-4e46-b41b-c28b3f1737c8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use Retriever\n",
|
||||
"\n",
|
||||
"We can now use the retriever!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "f496de2d-9b8f-4f8b-a30f-279ef199259a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"M: 2, Ks: 2, metric : <class 'numpy.uint8'>, code_dtype: l2\n",
|
||||
"iter: 20, seed: 123\n",
|
||||
"Training the subspace: 0 / 2\n",
|
||||
"Training the subspace: 1 / 2\n",
|
||||
"Encoding the subspace: 0 / 2\n",
|
||||
"Encoding the subspace: 1 / 2\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='world'),\n",
|
||||
" Document(page_content='Great world'),\n",
|
||||
" Document(page_content='great words'),\n",
|
||||
" Document(page_content='planets of the world')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.invoke(\"earth\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "617202a7-e3a6-49a8-b807-4b4d771159d5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -40,7 +40,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = CohereEmbeddings(model=\"embed-english-light-v3.0\")"
|
||||
"embeddings = CohereEmbeddings(\n",
|
||||
" model=\"embed-english-light-v3.0\"\n",
|
||||
") # It is mandatory to pass a model parameter to initialize the CohereEmbeddings object"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user