mirror of
https://github.com/hwchase17/langchain.git
synced 2026-01-23 21:31:02 +00:00
Compare commits
411 Commits
langchain-
...
eugene/foo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6cf75a87bb | ||
|
|
d46ab19954 | ||
|
|
c2a3021bb0 | ||
|
|
d9813bdbbc | ||
|
|
7fc9e99e21 | ||
|
|
0f2b32ffa9 | ||
|
|
e32adad17a | ||
|
|
8a02fd9c01 | ||
|
|
1d98937e8d | ||
|
|
28ad244e77 | ||
|
|
c0dd293f10 | ||
|
|
54c85087e2 | ||
|
|
396c0aee4d | ||
|
|
feb351737c | ||
|
|
d87feb1b04 | ||
|
|
398718e1cb | ||
|
|
7feae62ad7 | ||
|
|
b993172702 | ||
|
|
17659ca2cd | ||
|
|
212c688ee0 | ||
|
|
979232257b | ||
|
|
4ffd27c4d0 | ||
|
|
16d41eab1e | ||
|
|
fa229d6c02 | ||
|
|
9cf7ae0a52 | ||
|
|
56580b5fff | ||
|
|
e235a572a0 | ||
|
|
bab9de581c | ||
|
|
3e48c728d5 | ||
|
|
76bce42629 | ||
|
|
bca51ca164 | ||
|
|
fa17b145bb | ||
|
|
181e4fc0e0 | ||
|
|
b3c7ed4913 | ||
|
|
042e84170b | ||
|
|
97a8e365ec | ||
|
|
1b3bd52e0e | ||
|
|
9bd4f1dfa8 | ||
|
|
2f80d67dc1 | ||
|
|
ffdc370200 | ||
|
|
5052e87d7c | ||
|
|
6e82d2184b | ||
|
|
262e19b15d | ||
|
|
854f37be87 | ||
|
|
a03141ac51 | ||
|
|
5c1ebd3086 | ||
|
|
de97d50644 | ||
|
|
1241a004cb | ||
|
|
4ba14ae9e5 | ||
|
|
dba308447d | ||
|
|
fdf6fbde18 | ||
|
|
576574c82c | ||
|
|
7bf54636ff | ||
|
|
3ec93c2817 | ||
|
|
af11fbfbf6 | ||
|
|
c812237217 | ||
|
|
c649b449d7 | ||
|
|
34fc00aff1 | ||
|
|
4b99426a4f | ||
|
|
bc3b851f08 | ||
|
|
0207dc1431 | ||
|
|
9eb9ff52c0 | ||
|
|
bc3b02651c | ||
|
|
ac922105ad | ||
|
|
0af447c90b | ||
|
|
f49da71e87 | ||
|
|
035d8cf51b | ||
|
|
1dfc8c01af | ||
|
|
fb642e1e27 | ||
|
|
7457949619 | ||
|
|
0c69c9fb3f | ||
|
|
fa8402ea09 | ||
|
|
fdeaff4149 | ||
|
|
c7154a4045 | ||
|
|
8d784db107 | ||
|
|
da113f6363 | ||
|
|
5b99bb2437 | ||
|
|
ef329f6819 | ||
|
|
f872c50b3f | ||
|
|
4833375200 | ||
|
|
78ff51ce83 | ||
|
|
150251fd49 | ||
|
|
64dfdaa924 | ||
|
|
933bc0d6ff | ||
|
|
51dae57357 | ||
|
|
66828f4ecc | ||
|
|
3145995ed9 | ||
|
|
a09e2afee4 | ||
|
|
6a8f8a56ac | ||
|
|
ab527027ac | ||
|
|
464dae8ac2 | ||
|
|
36bbdc776e | ||
|
|
f49cce739b | ||
|
|
96b99a5022 | ||
|
|
086556d466 | ||
|
|
1ff8c36aa6 | ||
|
|
6cd452d985 | ||
|
|
566e9ba164 | ||
|
|
d19e074374 | ||
|
|
7f857a02d5 | ||
|
|
38dfde6946 | ||
|
|
9cdb99bd60 | ||
|
|
8732cfc6ef | ||
|
|
08b9715845 | ||
|
|
60054db1c4 | ||
|
|
654da27255 | ||
|
|
f882824eac | ||
|
|
64b62f6ae4 | ||
|
|
0a752a74cc | ||
|
|
28e2ec7603 | ||
|
|
ca1c3bd9c0 | ||
|
|
fabe32c06d | ||
|
|
9992a1db43 | ||
|
|
c6f00e6bdc | ||
|
|
54d2b861f6 | ||
|
|
c8b1c3a7e7 | ||
|
|
617a4e617b | ||
|
|
28f6ff6fcd | ||
|
|
ade4bfdff1 | ||
|
|
862ef32fdc | ||
|
|
f75d5621e2 | ||
|
|
fd0f147df3 | ||
|
|
5bb810c5c6 | ||
|
|
6377185291 | ||
|
|
b5d670498f | ||
|
|
09b04c7e3b | ||
|
|
f7e62754a1 | ||
|
|
6047138379 | ||
|
|
1759ff5836 | ||
|
|
24f0c232fe | ||
|
|
1640872059 | ||
|
|
e7c856c298 | ||
|
|
fabd3295fa | ||
|
|
09c2d8faca | ||
|
|
201bdf7148 | ||
|
|
08c9c683a7 | ||
|
|
a8af396a82 | ||
|
|
69f9acb60f | ||
|
|
fcf9230257 | ||
|
|
37f5ba416e | ||
|
|
426333ff6f | ||
|
|
9ac953a948 | ||
|
|
bdce9a47d0 | ||
|
|
703af9ffe3 | ||
|
|
1ad621120d | ||
|
|
c410545075 | ||
|
|
83327ac43a | ||
|
|
31f55781b3 | ||
|
|
a017f49fd3 | ||
|
|
850bf89e48 | ||
|
|
3555882a0d | ||
|
|
9decd0b243 | ||
|
|
63a1569d5f | ||
|
|
e5ae988505 | ||
|
|
c8b8335b82 | ||
|
|
ff168aaec0 | ||
|
|
783397eacb | ||
|
|
afe8ccaaa6 | ||
|
|
9fa172bc26 | ||
|
|
8fb594fd2a | ||
|
|
095b712a26 | ||
|
|
5db6c6d96d | ||
|
|
d6c4803ab0 | ||
|
|
5186325bc7 | ||
|
|
aff50a1e6f | ||
|
|
754f3c41f9 | ||
|
|
3f1d652f15 | ||
|
|
555f97becb | ||
|
|
a052173b55 | ||
|
|
b0ac6fe8d3 | ||
|
|
85aef7641c | ||
|
|
0d3fd0aeb9 | ||
|
|
25a6790e1a | ||
|
|
ff0df5ea15 | ||
|
|
d5ddaac1fc | ||
|
|
3c784e10a8 | ||
|
|
1023fbc98a | ||
|
|
2e5c379632 | ||
|
|
2aa35d80a0 | ||
|
|
48b579f6b5 | ||
|
|
f359e6b0a5 | ||
|
|
995305fdd5 | ||
|
|
99f9a664a5 | ||
|
|
49b0bc7b5a | ||
|
|
44e3e2391c | ||
|
|
815f59dba5 | ||
|
|
17dffd9741 | ||
|
|
a566a15930 | ||
|
|
ec99f0d193 | ||
|
|
af3b3a4474 | ||
|
|
dcf2278a05 | ||
|
|
7205057c3e | ||
|
|
fbb4761199 | ||
|
|
745d1c2b8d | ||
|
|
58a98c7d8a | ||
|
|
6703d795c5 | ||
|
|
30f1bf24ac | ||
|
|
038c287b3a | ||
|
|
9a29398fe6 | ||
|
|
c7a8af2e75 | ||
|
|
71c039571a | ||
|
|
58e72febeb | ||
|
|
3dc7d447aa | ||
|
|
f6491ceb7d | ||
|
|
0022ae1b31 | ||
|
|
90439b12f6 | ||
|
|
080741d336 | ||
|
|
5ce9a716a7 | ||
|
|
92abf62292 | ||
|
|
64ace25eb8 | ||
|
|
f355a98bb6 | ||
|
|
bc557a5663 | ||
|
|
8170bd636f | ||
|
|
3d5808ef27 | ||
|
|
b365ee996b | ||
|
|
580fbd9ada | ||
|
|
6a60a2a435 | ||
|
|
2cd77a53a3 | ||
|
|
8788a34bfa | ||
|
|
22f9ae489f | ||
|
|
b38c83ff93 | ||
|
|
644e0d3463 | ||
|
|
7f1e444efa | ||
|
|
642f9530cd | ||
|
|
818267bbc3 | ||
|
|
3c7f12cbf5 | ||
|
|
583b0449eb | ||
|
|
bcd5842b5d | ||
|
|
163ef35dd1 | ||
|
|
1b2ae40d45 | ||
|
|
b865ee49a0 | ||
|
|
b28bc252c4 | ||
|
|
ba89933c2c | ||
|
|
6096c80b71 | ||
|
|
c316361115 | ||
|
|
0258cb96fa | ||
|
|
b35ee09b3f | ||
|
|
ee98da4f4e | ||
|
|
294f7fcb38 | ||
|
|
46d344c33d | ||
|
|
1c31234eed | ||
|
|
4b63a217c2 | ||
|
|
e7abee034e | ||
|
|
5fb8aa82b9 | ||
|
|
cf9c484715 | ||
|
|
ac7b71e0d7 | ||
|
|
7d13a2f958 | ||
|
|
0f3fe44e44 | ||
|
|
8bde04079b | ||
|
|
6fbd53bc60 | ||
|
|
fad6fc866a | ||
|
|
e5bb4cb646 | ||
|
|
01ded5e2f9 | ||
|
|
5b9290a449 | ||
|
|
8230ba47f3 | ||
|
|
b4fcda7657 | ||
|
|
61228da1c4 | ||
|
|
d886f4e107 | ||
|
|
3da752c7bb | ||
|
|
624e0747b9 | ||
|
|
9447925d94 | ||
|
|
47adc7f32b | ||
|
|
16fc0a866e | ||
|
|
e499caa9cd | ||
|
|
29c873dd69 | ||
|
|
4ff2f4499e | ||
|
|
1f1679e960 | ||
|
|
5e3a321f71 | ||
|
|
820da64983 | ||
|
|
67b6e6c2e3 | ||
|
|
6247259438 | ||
|
|
0091947efd | ||
|
|
e958f76160 | ||
|
|
3981d736df | ||
|
|
fb1d67edf6 | ||
|
|
4f347cbcb9 | ||
|
|
4591bc0b01 | ||
|
|
f535e8a99e | ||
|
|
766b650fdc | ||
|
|
9daff60698 | ||
|
|
c8be0a9f70 | ||
|
|
f4b3c90886 | ||
|
|
b71ae52e65 | ||
|
|
39c44817ae | ||
|
|
4feda41ab6 | ||
|
|
71c2ec6782 | ||
|
|
628574b9c2 | ||
|
|
0bc3845e1e | ||
|
|
a78843bb77 | ||
|
|
10a2ce2a26 | ||
|
|
d457d7d121 | ||
|
|
b002702af6 | ||
|
|
34d0417eb5 | ||
|
|
e7d6b25653 | ||
|
|
55fd2e2158 | ||
|
|
be27e1787f | ||
|
|
f878df404f | ||
|
|
60cf49a618 | ||
|
|
e37caa9b9a | ||
|
|
3e296e39c8 | ||
|
|
d40bdd6257 | ||
|
|
8a71f1b41b | ||
|
|
8e3e532e7d | ||
|
|
12e490ea56 | ||
|
|
498a482e76 | ||
|
|
d324fd1821 | ||
|
|
4bd005adb6 | ||
|
|
e01c6789c4 | ||
|
|
dd2d094adc | ||
|
|
6b98207eda | ||
|
|
c5bf114c0f | ||
|
|
015ab91b83 | ||
|
|
5a3aaae6dc | ||
|
|
75c3c81b8c | ||
|
|
0f7b8adddf | ||
|
|
09c0823c3a | ||
|
|
32f5147523 | ||
|
|
4255a30f20 | ||
|
|
49dea06af1 | ||
|
|
937b3904eb | ||
|
|
bda3becbe7 | ||
|
|
f6e6a17878 | ||
|
|
c1bd4e05bc | ||
|
|
a2e90a5a43 | ||
|
|
a06818a654 | ||
|
|
df98552b6f | ||
|
|
b83f1eb0d5 | ||
|
|
9f0c76bf89 | ||
|
|
01ecd0acba | ||
|
|
1fd1c1dca5 | ||
|
|
253ceca76a | ||
|
|
e18511bb22 | ||
|
|
34da8be60b | ||
|
|
b297af5482 | ||
|
|
4cdaca67dc | ||
|
|
d72a08a60d | ||
|
|
8eb63a609e | ||
|
|
5150ec3a04 | ||
|
|
2b4fbcb4b4 | ||
|
|
eb3870e9d8 | ||
|
|
75ae585deb | ||
|
|
b7c070d437 | ||
|
|
60b65528c5 | ||
|
|
2ef9d12372 | ||
|
|
6910b0b3aa | ||
|
|
831708beb7 | ||
|
|
a114255b82 | ||
|
|
6f68c8d6ab | ||
|
|
8afbab4cf6 | ||
|
|
66e30efa61 | ||
|
|
ba167dc158 | ||
|
|
44f69063b1 | ||
|
|
f18b77fd59 | ||
|
|
966b408634 | ||
|
|
bd261456f6 | ||
|
|
ec8ffc8f40 | ||
|
|
2494cecabf | ||
|
|
df632b8cde | ||
|
|
1050e890c6 | ||
|
|
c4779f5b9c | ||
|
|
0a99935794 | ||
|
|
63aba3fe5b | ||
|
|
dc80be5efe | ||
|
|
ab29ee79a3 | ||
|
|
1d3f7231b8 | ||
|
|
a58d4ba340 | ||
|
|
d178fb9dc3 | ||
|
|
414154fa59 | ||
|
|
94c9cb7321 | ||
|
|
012929551c | ||
|
|
63c483ea01 | ||
|
|
eec7bb4f51 | ||
|
|
f0f125dac7 | ||
|
|
f4196f1fb8 | ||
|
|
d0ad713937 | ||
|
|
ddd7919f6a | ||
|
|
493e474063 | ||
|
|
4a812e3193 | ||
|
|
5f5e8c9a60 | ||
|
|
d00176e523 | ||
|
|
dc51cc5690 | ||
|
|
27690506d0 | ||
|
|
4029f5650c | ||
|
|
10e6725a7e | ||
|
|
967b6f21f6 | ||
|
|
4a78be7861 | ||
|
|
d6c180996f | ||
|
|
93dcc47463 | ||
|
|
27def6bddb | ||
|
|
b4e3bdb714 | ||
|
|
f82c3f622a | ||
|
|
d55d99222b | ||
|
|
0f6217f507 | ||
|
|
8645a49f31 | ||
|
|
a4ef830480 | ||
|
|
b1aed44540 | ||
|
|
f4ffd692a3 | ||
|
|
e0bbb81d04 | ||
|
|
d5b548b4ce | ||
|
|
0478f7f5e4 | ||
|
|
9d08369442 | ||
|
|
6bc451b942 | ||
|
|
2b15518c5f | ||
|
|
b6df3405fb | ||
|
|
089f5e6cad | ||
|
|
35e2230f56 | ||
|
|
24155aa1ac | ||
|
|
ebbe609193 | ||
|
|
f679ed72ca | ||
|
|
2907ab2297 | ||
|
|
06f8bd9946 |
120
.github/scripts/check_diff.py
vendored
120
.github/scripts/check_diff.py
vendored
@@ -2,10 +2,12 @@ import glob
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tomllib
|
||||
from collections import defaultdict
|
||||
from typing import Dict, List, Set
|
||||
from pathlib import Path
|
||||
import tomllib
|
||||
|
||||
from get_min_versions import get_min_version_from_toml
|
||||
|
||||
|
||||
LANGCHAIN_DIRS = [
|
||||
@@ -16,6 +18,21 @@ LANGCHAIN_DIRS = [
|
||||
"libs/experimental",
|
||||
]
|
||||
|
||||
# when set to True, we are ignoring core dependents
|
||||
# in order to be able to get CI to pass for each individual
|
||||
# package that depends on core
|
||||
# e.g. if you touch core, we don't then add textsplitters/etc to CI
|
||||
IGNORE_CORE_DEPENDENTS = False
|
||||
|
||||
# ignored partners are removed from dependents
|
||||
# but still run if directly edited
|
||||
IGNORED_PARTNERS = [
|
||||
# remove huggingface from dependents because of CI instability
|
||||
# specifically in huggingface jobs
|
||||
# https://github.com/langchain-ai/langchain/issues/25558
|
||||
"huggingface",
|
||||
]
|
||||
|
||||
|
||||
def all_package_dirs() -> Set[str]:
|
||||
return {
|
||||
@@ -68,6 +85,11 @@ def dependents_graph() -> dict:
|
||||
|
||||
if "langchain" in dep:
|
||||
dependents[dep].add(pkg_dir)
|
||||
|
||||
for k in dependents:
|
||||
for partner in IGNORED_PARTNERS:
|
||||
if f"libs/partners/{partner}" in dependents[k]:
|
||||
dependents[k].remove(f"libs/partners/{partner}")
|
||||
return dependents
|
||||
|
||||
|
||||
@@ -85,44 +107,96 @@ def add_dependents(dirs_to_eval: Set[str], dependents: dict) -> List[str]:
|
||||
|
||||
|
||||
def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
|
||||
if dir_ == "libs/core":
|
||||
return [
|
||||
{"working-directory": dir_, "python-version": f"3.{v}"}
|
||||
for v in range(8, 13)
|
||||
]
|
||||
min_python = "3.8"
|
||||
max_python = "3.12"
|
||||
if job == "test-pydantic":
|
||||
return _get_pydantic_test_configs(dir_)
|
||||
|
||||
if dir_ == "libs/core":
|
||||
py_versions = ["3.9", "3.10", "3.11", "3.12"]
|
||||
# custom logic for specific directories
|
||||
if dir_ == "libs/partners/milvus":
|
||||
elif dir_ == "libs/partners/milvus":
|
||||
# milvus poetry doesn't allow 3.12 because they
|
||||
# declare deps in funny way
|
||||
max_python = "3.11"
|
||||
py_versions = ["3.9", "3.11"]
|
||||
|
||||
if dir_ in ["libs/community", "libs/langchain"] and job == "extended-tests":
|
||||
elif dir_ in ["libs/community", "libs/langchain"] and job == "extended-tests":
|
||||
# community extended test resolution in 3.12 is slow
|
||||
# even in uv
|
||||
max_python = "3.11"
|
||||
py_versions = ["3.9", "3.11"]
|
||||
|
||||
if dir_ == "libs/community" and job == "compile-integration-tests":
|
||||
elif dir_ == "libs/community" and job == "compile-integration-tests":
|
||||
# community integration deps are slow in 3.12
|
||||
max_python = "3.11"
|
||||
py_versions = ["3.9", "3.11"]
|
||||
else:
|
||||
py_versions = ["3.9", "3.12"]
|
||||
|
||||
return [
|
||||
{"working-directory": dir_, "python-version": min_python},
|
||||
{"working-directory": dir_, "python-version": max_python},
|
||||
return [{"working-directory": dir_, "python-version": py_v} for py_v in py_versions]
|
||||
|
||||
|
||||
def _get_pydantic_test_configs(
|
||||
dir_: str, *, python_version: str = "3.11"
|
||||
) -> List[Dict[str, str]]:
|
||||
with open("./libs/core/poetry.lock", "rb") as f:
|
||||
core_poetry_lock_data = tomllib.load(f)
|
||||
for package in core_poetry_lock_data["package"]:
|
||||
if package["name"] == "pydantic":
|
||||
core_max_pydantic_minor = package["version"].split(".")[1]
|
||||
break
|
||||
|
||||
with open(f"./{dir_}/poetry.lock", "rb") as f:
|
||||
dir_poetry_lock_data = tomllib.load(f)
|
||||
|
||||
for package in dir_poetry_lock_data["package"]:
|
||||
if package["name"] == "pydantic":
|
||||
dir_max_pydantic_minor = package["version"].split(".")[1]
|
||||
break
|
||||
|
||||
core_min_pydantic_version = get_min_version_from_toml(
|
||||
"./libs/core/pyproject.toml", "release", python_version, include=["pydantic"]
|
||||
)["pydantic"]
|
||||
core_min_pydantic_minor = core_min_pydantic_version.split(".")[1] if "." in core_min_pydantic_version else "0"
|
||||
dir_min_pydantic_version = (
|
||||
get_min_version_from_toml(
|
||||
f"./{dir_}/pyproject.toml", "release", python_version, include=["pydantic"]
|
||||
)
|
||||
.get("pydantic", "0.0.0")
|
||||
)
|
||||
dir_min_pydantic_minor = dir_min_pydantic_version.split(".")[1] if "." in dir_min_pydantic_version else "0"
|
||||
|
||||
custom_mins = {
|
||||
# depends on pydantic-settings 2.4 which requires pydantic 2.7
|
||||
"libs/community": 7,
|
||||
}
|
||||
|
||||
max_pydantic_minor = min(
|
||||
int(dir_max_pydantic_minor),
|
||||
int(core_max_pydantic_minor),
|
||||
)
|
||||
min_pydantic_minor = max(
|
||||
int(dir_min_pydantic_minor),
|
||||
int(core_min_pydantic_minor),
|
||||
custom_mins.get(dir_, 0),
|
||||
)
|
||||
|
||||
configs = [
|
||||
{
|
||||
"working-directory": dir_,
|
||||
"pydantic-version": f"2.{v}.0",
|
||||
"python-version": python_version,
|
||||
}
|
||||
for v in range(min_pydantic_minor, max_pydantic_minor + 1)
|
||||
]
|
||||
return configs
|
||||
|
||||
|
||||
def _get_configs_for_multi_dirs(
|
||||
job: str, dirs_to_run: List[str], dependents: dict
|
||||
job: str, dirs_to_run: Dict[str, Set[str]], dependents: dict
|
||||
) -> List[Dict[str, str]]:
|
||||
if job == "lint":
|
||||
dirs = add_dependents(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"],
|
||||
dependents,
|
||||
)
|
||||
elif job in ["test", "compile-integration-tests", "dependencies"]:
|
||||
elif job in ["test", "compile-integration-tests", "dependencies", "test-pydantic"]:
|
||||
dirs = add_dependents(
|
||||
dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
|
||||
)
|
||||
@@ -151,6 +225,7 @@ if __name__ == "__main__":
|
||||
dirs_to_run["lint"] = all_package_dirs()
|
||||
dirs_to_run["test"] = all_package_dirs()
|
||||
dirs_to_run["extended-test"] = set(LANGCHAIN_DIRS)
|
||||
|
||||
for file in files:
|
||||
if any(
|
||||
file.startswith(dir_)
|
||||
@@ -168,8 +243,12 @@ if __name__ == "__main__":
|
||||
if any(file.startswith(dir_) for dir_ in LANGCHAIN_DIRS):
|
||||
# add that dir and all dirs after in LANGCHAIN_DIRS
|
||||
# for extended testing
|
||||
|
||||
found = False
|
||||
for dir_ in LANGCHAIN_DIRS:
|
||||
if dir_ == "libs/core" and IGNORE_CORE_DEPENDENTS:
|
||||
dirs_to_run["extended-test"].add(dir_)
|
||||
continue
|
||||
if file.startswith(dir_):
|
||||
found = True
|
||||
if found:
|
||||
@@ -181,7 +260,6 @@ if __name__ == "__main__":
|
||||
dirs_to_run["test"].add("libs/partners/mistralai")
|
||||
dirs_to_run["test"].add("libs/partners/openai")
|
||||
dirs_to_run["test"].add("libs/partners/anthropic")
|
||||
dirs_to_run["test"].add("libs/partners/ai21")
|
||||
dirs_to_run["test"].add("libs/partners/fireworks")
|
||||
dirs_to_run["test"].add("libs/partners/groq")
|
||||
|
||||
@@ -211,7 +289,6 @@ if __name__ == "__main__":
|
||||
|
||||
# we now have dirs_by_job
|
||||
# todo: clean this up
|
||||
|
||||
map_job_to_configs = {
|
||||
job: _get_configs_for_multi_dirs(job, dirs_to_run, dependents)
|
||||
for job in [
|
||||
@@ -220,6 +297,7 @@ if __name__ == "__main__":
|
||||
"extended-tests",
|
||||
"compile-integration-tests",
|
||||
"dependencies",
|
||||
"test-pydantic",
|
||||
]
|
||||
}
|
||||
map_job_to_configs["test-doc-imports"] = (
|
||||
|
||||
@@ -11,7 +11,7 @@ if __name__ == "__main__":
|
||||
|
||||
# see if we're releasing an rc
|
||||
version = toml_data["tool"]["poetry"]["version"]
|
||||
releasing_rc = "rc" in version
|
||||
releasing_rc = "rc" in version or "dev" in version
|
||||
|
||||
# if not, iterate through dependencies and make sure none allow prereleases
|
||||
if not releasing_rc:
|
||||
|
||||
42
.github/scripts/get_min_versions.py
vendored
42
.github/scripts/get_min_versions.py
vendored
@@ -1,4 +1,5 @@
|
||||
import sys
|
||||
from typing import Optional
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
import tomllib
|
||||
@@ -7,6 +8,9 @@ else:
|
||||
import tomli as tomllib
|
||||
|
||||
from packaging.version import parse as parse_version
|
||||
from packaging.specifiers import SpecifierSet
|
||||
from packaging.version import Version
|
||||
|
||||
import re
|
||||
|
||||
MIN_VERSION_LIBS = [
|
||||
@@ -15,6 +19,7 @@ MIN_VERSION_LIBS = [
|
||||
"langchain",
|
||||
"langchain-text-splitters",
|
||||
"SQLAlchemy",
|
||||
"pydantic",
|
||||
]
|
||||
|
||||
SKIP_IF_PULL_REQUEST = ["langchain-core"]
|
||||
@@ -45,7 +50,13 @@ def get_min_version(version: str) -> str:
|
||||
raise ValueError(f"Unrecognized version format: {version}")
|
||||
|
||||
|
||||
def get_min_version_from_toml(toml_path: str, versions_for: str):
|
||||
def get_min_version_from_toml(
|
||||
toml_path: str,
|
||||
versions_for: str,
|
||||
python_version: str,
|
||||
*,
|
||||
include: Optional[list] = None,
|
||||
):
|
||||
# Parse the TOML file
|
||||
with open(toml_path, "rb") as file:
|
||||
toml_data = tomllib.load(file)
|
||||
@@ -64,11 +75,20 @@ def get_min_version_from_toml(toml_path: str, versions_for: str):
|
||||
continue
|
||||
# Check if the lib is present in the dependencies
|
||||
if lib in dependencies:
|
||||
if include and lib not in include:
|
||||
continue
|
||||
# Get the version string
|
||||
version_string = dependencies[lib]
|
||||
|
||||
if isinstance(version_string, dict):
|
||||
version_string = version_string["version"]
|
||||
if isinstance(version_string, list):
|
||||
version_string = [
|
||||
vs
|
||||
for vs in version_string
|
||||
if check_python_version(python_version, vs["python"])
|
||||
][0]["version"]
|
||||
|
||||
|
||||
# Use parse_version to get the minimum supported version from version_string
|
||||
min_version = get_min_version(version_string)
|
||||
@@ -79,13 +99,31 @@ def get_min_version_from_toml(toml_path: str, versions_for: str):
|
||||
return min_versions
|
||||
|
||||
|
||||
def check_python_version(version_string, constraint_string):
|
||||
"""
|
||||
Check if the given Python version matches the given constraints.
|
||||
|
||||
:param version_string: A string representing the Python version (e.g. "3.8.5").
|
||||
:param constraint_string: A string representing the package's Python version constraints (e.g. ">=3.6, <4.0").
|
||||
:return: True if the version matches the constraints, False otherwise.
|
||||
"""
|
||||
try:
|
||||
version = Version(version_string)
|
||||
constraints = SpecifierSet(constraint_string)
|
||||
return version in constraints
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
versions_for = sys.argv[2]
|
||||
python_version = sys.argv[3]
|
||||
assert versions_for in ["release", "pull_request"]
|
||||
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file, versions_for)
|
||||
min_versions = get_min_version_from_toml(toml_file, versions_for, python_version)
|
||||
|
||||
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))
|
||||
|
||||
114
.github/workflows/_dependencies.yml
vendored
114
.github/workflows/_dependencies.yml
vendored
@@ -1,114 +0,0 @@
|
||||
name: dependencies
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
langchain-location:
|
||||
required: false
|
||||
type: string
|
||||
description: "Relative path to the langchain library folder"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
name: dependency checks ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: pydantic-cross-compat
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: poetry install
|
||||
|
||||
- name: Check imports with base dependencies
|
||||
shell: bash
|
||||
run: poetry run make check_imports
|
||||
|
||||
- name: Install test dependencies
|
||||
shell: bash
|
||||
run: poetry install --with test
|
||||
|
||||
- name: Install langchain editable
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
if: ${{ inputs.langchain-location }}
|
||||
env:
|
||||
LANGCHAIN_LOCATION: ${{ inputs.langchain-location }}
|
||||
run: |
|
||||
poetry run pip install -e "$LANGCHAIN_LOCATION"
|
||||
|
||||
- name: Install the opposite major version of pydantic
|
||||
# If normal tests use pydantic v1, here we'll use v2, and vice versa.
|
||||
shell: bash
|
||||
# airbyte currently doesn't support pydantic v2
|
||||
if: ${{ !startsWith(inputs.working-directory, 'libs/partners/airbyte') }}
|
||||
run: |
|
||||
# Determine the major part of pydantic version
|
||||
REGULAR_VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
|
||||
|
||||
if [[ "$REGULAR_VERSION" == "1" ]]; then
|
||||
PYDANTIC_DEP=">=2.1,<3"
|
||||
TEST_WITH_VERSION="2"
|
||||
elif [[ "$REGULAR_VERSION" == "2" ]]; then
|
||||
PYDANTIC_DEP="<2"
|
||||
TEST_WITH_VERSION="1"
|
||||
else
|
||||
echo "Unexpected pydantic major version '$REGULAR_VERSION', cannot determine which version to use for cross-compatibility test."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install via `pip` instead of `poetry add` to avoid changing lockfile,
|
||||
# which would prevent caching from working: the cache would get saved
|
||||
# to a different key than where it gets loaded from.
|
||||
poetry run pip install "pydantic${PYDANTIC_DEP}"
|
||||
|
||||
# Ensure that the correct pydantic is installed now.
|
||||
echo "Checking pydantic version... Expecting ${TEST_WITH_VERSION}"
|
||||
|
||||
# Determine the major part of pydantic version
|
||||
CURRENT_VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
|
||||
|
||||
# Check that the major part of pydantic version is as expected, if not
|
||||
# raise an error
|
||||
if [[ "$CURRENT_VERSION" != "$TEST_WITH_VERSION" ]]; then
|
||||
echo "Error: expected pydantic version ${CURRENT_VERSION} to have been installed, but found: ${TEST_WITH_VERSION}"
|
||||
exit 1
|
||||
fi
|
||||
echo "Found pydantic version ${CURRENT_VERSION}, as expected"
|
||||
- name: Run pydantic compatibility tests
|
||||
# airbyte currently doesn't support pydantic v2
|
||||
if: ${{ !startsWith(inputs.working-directory, 'libs/partners/airbyte') }}
|
||||
shell: bash
|
||||
run: make test
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
1
.github/workflows/_integration_test.yml
vendored
1
.github/workflows/_integration_test.yml
vendored
@@ -67,6 +67,7 @@ jobs:
|
||||
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
|
||||
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
|
||||
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
|
||||
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
|
||||
|
||||
12
.github/workflows/_lint.yml
vendored
12
.github/workflows/_lint.yml
vendored
@@ -7,10 +7,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
langchain-location:
|
||||
required: false
|
||||
type: string
|
||||
description: "Relative path to the langchain library folder"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
@@ -63,14 +59,6 @@ jobs:
|
||||
run: |
|
||||
poetry install --with lint,typing
|
||||
|
||||
- name: Install langchain editable
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
if: ${{ inputs.langchain-location }}
|
||||
env:
|
||||
LANGCHAIN_LOCATION: ${{ inputs.langchain-location }}
|
||||
run: |
|
||||
poetry run pip install -e "$LANGCHAIN_LOCATION"
|
||||
|
||||
- name: Get .mypy_cache to speed up mypy
|
||||
uses: actions/cache@v4
|
||||
env:
|
||||
|
||||
4
.github/workflows/_release.yml
vendored
4
.github/workflows/_release.yml
vendored
@@ -164,6 +164,7 @@ jobs:
|
||||
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
id: setup-python
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
@@ -231,7 +232,7 @@ jobs:
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release)"
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release ${{ steps.setup-python.outputs.installed-python-version }})"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
@@ -273,6 +274,7 @@ jobs:
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
|
||||
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
|
||||
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
|
||||
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
|
||||
|
||||
53
.github/workflows/_test.yml
vendored
53
.github/workflows/_test.yml
vendored
@@ -7,10 +7,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
langchain-location:
|
||||
required: false
|
||||
type: string
|
||||
description: "Relative path to the langchain library folder"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
@@ -31,29 +27,42 @@ jobs:
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
id: setup-python
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: poetry install --with test
|
||||
|
||||
- name: Install langchain editable
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
if: ${{ inputs.langchain-location }}
|
||||
env:
|
||||
LANGCHAIN_LOCATION: ${{ inputs.langchain-location }}
|
||||
run: |
|
||||
poetry run pip install -e "$LANGCHAIN_LOCATION"
|
||||
|
||||
- name: Run core tests
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
shell: bash
|
||||
run: |
|
||||
poetry run pip install packaging tomli
|
||||
echo "Python version ${{ steps.setup-python.outputs.installed-python-version }}"
|
||||
python_version="$(poetry run python --version | awk '{print $2}')"
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request $python_version)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
- name: Run unit tests with minimum dependency versions
|
||||
if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
env:
|
||||
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
run: |
|
||||
poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
|
||||
make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -66,21 +75,3 @@ jobs:
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging tomli
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
# Temporarily disabled until we can get the minimum versions working
|
||||
# - name: Run unit tests with minimum dependency versions
|
||||
# if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
# env:
|
||||
# MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
# run: |
|
||||
# poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
|
||||
# make tests
|
||||
# working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
64
.github/workflows/_test_pydantic.yml
vendored
Normal file
64
.github/workflows/_test_pydantic.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
name: test pydantic intermediate versions
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
python-version:
|
||||
required: false
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
default: "3.11"
|
||||
pydantic-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Pydantic version to test."
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
name: "make test # pydantic: ~=${{ inputs.pydantic-version }}, python: ${{ inputs.python-version }}, "
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: poetry install --with test
|
||||
|
||||
- name: Overwrite pydantic version
|
||||
shell: bash
|
||||
run: poetry run pip install pydantic~=${{ inputs.pydantic-version }}
|
||||
|
||||
- name: Run core tests
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
36
.github/workflows/check_diffs.yml
vendored
36
.github/workflows/check_diffs.yml
vendored
@@ -31,6 +31,7 @@ jobs:
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
- id: set-matrix
|
||||
run: |
|
||||
python -m pip install packaging
|
||||
python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT
|
||||
outputs:
|
||||
lint: ${{ steps.set-matrix.outputs.lint }}
|
||||
@@ -39,6 +40,7 @@ jobs:
|
||||
compile-integration-tests: ${{ steps.set-matrix.outputs.compile-integration-tests }}
|
||||
dependencies: ${{ steps.set-matrix.outputs.dependencies }}
|
||||
test-doc-imports: ${{ steps.set-matrix.outputs.test-doc-imports }}
|
||||
test-pydantic: ${{ steps.set-matrix.outputs.test-pydantic }}
|
||||
lint:
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
@@ -46,6 +48,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
job-configs: ${{ fromJson(needs.build.outputs.lint) }}
|
||||
fail-fast: false
|
||||
uses: ./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
@@ -59,18 +62,34 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
job-configs: ${{ fromJson(needs.build.outputs.test) }}
|
||||
fail-fast: false
|
||||
uses: ./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
test-pydantic:
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.test-pydantic != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
job-configs: ${{ fromJson(needs.build.outputs.test-pydantic) }}
|
||||
fail-fast: false
|
||||
uses: ./.github/workflows/_test_pydantic.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
pydantic-version: ${{ matrix.job-configs.pydantic-version }}
|
||||
secrets: inherit
|
||||
|
||||
test-doc-imports:
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.test-doc-imports != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
job-configs: ${{ fromJson(needs.build.outputs.test-doc-imports) }}
|
||||
fail-fast: false
|
||||
uses: ./.github/workflows/_test_doc_imports.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
@@ -83,25 +102,13 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
job-configs: ${{ fromJson(needs.build.outputs.compile-integration-tests) }}
|
||||
fail-fast: false
|
||||
uses: ./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
dependencies:
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dependencies != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
job-configs: ${{ fromJson(needs.build.outputs.dependencies) }}
|
||||
uses: ./.github/workflows/_dependencies.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
extended-tests:
|
||||
name: "cd ${{ matrix.job-configs.working-directory }} / make extended_tests #${{ matrix.job-configs.python-version }}"
|
||||
needs: [ build ]
|
||||
@@ -110,6 +117,7 @@ jobs:
|
||||
matrix:
|
||||
# note different variable for extended test dirs
|
||||
job-configs: ${{ fromJson(needs.build.outputs.extended-tests) }}
|
||||
fail-fast: false
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
@@ -149,7 +157,7 @@ jobs:
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
ci_success:
|
||||
name: "CI Success"
|
||||
needs: [build, lint, test, compile-integration-tests, dependencies, extended-tests, test-doc-imports]
|
||||
needs: [build, lint, test, compile-integration-tests, extended-tests, test-doc-imports, test-pydantic]
|
||||
if: |
|
||||
always()
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
3
.github/workflows/codespell.yml
vendored
3
.github/workflows/codespell.yml
vendored
@@ -3,9 +3,8 @@ name: CI / cd . / make spell_check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, v0.1]
|
||||
branches: [master, v0.1, v0.2]
|
||||
pull_request:
|
||||
branches: [master, v0.1]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
7
.github/workflows/scheduled_test.yml
vendored
7
.github/workflows/scheduled_test.yml
vendored
@@ -17,16 +17,14 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.11"
|
||||
working-directory:
|
||||
- "libs/partners/openai"
|
||||
- "libs/partners/anthropic"
|
||||
- "libs/partners/ai21"
|
||||
- "libs/partners/fireworks"
|
||||
- "libs/partners/groq"
|
||||
- "libs/partners/mistralai"
|
||||
- "libs/partners/together"
|
||||
- "libs/partners/google-vertexai"
|
||||
- "libs/partners/google-genai"
|
||||
- "libs/partners/aws"
|
||||
@@ -90,11 +88,10 @@ jobs:
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
|
||||
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -167,11 +167,14 @@ docs/.docusaurus/
|
||||
docs/.cache-loader/
|
||||
docs/_dist
|
||||
docs/api_reference/*api_reference.rst
|
||||
docs/api_reference/*.md
|
||||
docs/api_reference/_build
|
||||
docs/api_reference/*/
|
||||
!docs/api_reference/_static/
|
||||
!docs/api_reference/templates/
|
||||
!docs/api_reference/themes/
|
||||
!docs/api_reference/_extensions/
|
||||
!docs/api_reference/scripts/
|
||||
docs/docs/build
|
||||
docs/docs/node_modules
|
||||
docs/docs/yarn.lock
|
||||
|
||||
6
Makefile
6
Makefile
@@ -31,19 +31,21 @@ docs_linkcheck:
|
||||
api_docs_build:
|
||||
poetry run python docs/api_reference/create_api_rst.py
|
||||
cd docs/api_reference && poetry run make html
|
||||
poetry run python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
|
||||
|
||||
API_PKG ?= text-splitters
|
||||
|
||||
api_docs_quick_preview:
|
||||
poetry run pip install "pydantic<2"
|
||||
poetry run python docs/api_reference/create_api_rst.py $(API_PKG)
|
||||
cd docs/api_reference && poetry run make html
|
||||
open docs/api_reference/_build/html/$(shell echo $(API_PKG) | sed 's/-/_/g')_api_reference.html
|
||||
poetry run python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
|
||||
open docs/api_reference/_build/html/reference.html
|
||||
|
||||
## api_docs_clean: Clean the API Reference documentation build artifacts.
|
||||
api_docs_clean:
|
||||
find ./docs/api_reference -name '*_api_reference.rst' -delete
|
||||
git clean -fdX ./docs/api_reference
|
||||
rm docs/api_reference/index.md
|
||||
|
||||
|
||||
## api_docs_linkcheck: Run linkchecker on the API Reference documentation.
|
||||
|
||||
22
README.md
22
README.md
@@ -14,18 +14,20 @@
|
||||
|
||||
Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
|
||||
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
|
||||
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
|
||||
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
|
||||
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
|
||||
Fill out [this form](https://www.langchain.com/contact-sales) to speak with our sales team.
|
||||
|
||||
## Quick Install
|
||||
|
||||
With pip:
|
||||
|
||||
```bash
|
||||
pip install langchain
|
||||
```
|
||||
|
||||
With conda:
|
||||
|
||||
```bash
|
||||
conda install langchain -c conda-forge
|
||||
```
|
||||
@@ -36,22 +38,25 @@ conda install langchain -c conda-forge
|
||||
|
||||
For these applications, LangChain simplifies the entire application lifecycle:
|
||||
|
||||
- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://python.langchain.com/v0.2/docs/concepts#langchain-expression-language-lcel), [components](https://python.langchain.com/v0.2/docs/concepts), and [third-party integrations](https://python.langchain.com/v0.2/docs/integrations/platforms/).
|
||||
Use [LangGraph](/docs/concepts/#langgraph) to build stateful agents with first-class streaming and human-in-the-loop support.
|
||||
- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://python.langchain.com/v0.2/docs/concepts#langchain-expression-language-lcel), [components](https://python.langchain.com/v0.2/docs/concepts), and [third-party integrations](https://python.langchain.com/v0.2/docs/integrations/platforms/).
|
||||
Use [LangGraph](/docs/concepts/#langgraph) to build stateful agents with first-class streaming and human-in-the-loop support.
|
||||
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://docs.smith.langchain.com/) so that you can constantly optimize and deploy with confidence.
|
||||
- **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/).
|
||||
|
||||
### Open-source libraries
|
||||
|
||||
- **`langchain-core`**: Base abstractions and LangChain Expression Language.
|
||||
- **`langchain-community`**: Third party integrations.
|
||||
- Some integrations have been further split into **partner packages** that only rely on **`langchain-core`**. Examples include **`langchain_openai`** and **`langchain_anthropic`**.
|
||||
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it.
|
||||
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it. To learn more about LangGraph, check out our first LangChain Academy course, *Introduction to LangGraph*, available [here](https://academy.langchain.com/courses/intro-to-langgraph).
|
||||
|
||||
### Productionization:
|
||||
|
||||
- **[LangSmith](https://docs.smith.langchain.com/)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
|
||||
### Deployment:
|
||||
|
||||
- **[LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/)**: Turn your LangGraph applications into production-ready APIs and Assistants.
|
||||
|
||||

|
||||
@@ -76,15 +81,17 @@ Use [LangGraph](/docs/concepts/#langgraph) to build stateful agents with first-c
|
||||
And much more! Head to the [Tutorials](https://python.langchain.com/v0.2/docs/tutorials/) section of the docs for more.
|
||||
|
||||
## 🚀 How does LangChain help?
|
||||
|
||||
The main value props of the LangChain libraries are:
|
||||
|
||||
1. **Components**: composable building blocks, tools and integrations for working with language models. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not
|
||||
2. **Off-the-shelf chains**: built-in assemblages of components for accomplishing higher-level tasks
|
||||
|
||||
Off-the-shelf chains make it easy to get started. Components make it easy to customize existing chains and build new ones.
|
||||
Off-the-shelf chains make it easy to get started. Components make it easy to customize existing chains and build new ones.
|
||||
|
||||
## LangChain Expression Language (LCEL)
|
||||
|
||||
LCEL is the foundation of many of LangChain's components, and is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains.
|
||||
LCEL is a key part of LangChain, allowing you to build and organize chains of processes in a straightforward, declarative manner. It was designed to support taking prototypes directly into production without needing to alter any code. This means you can use LCEL to set up everything from basic "prompt + LLM" setups to intricate, multi-step workflows.
|
||||
|
||||
- **[Overview](https://python.langchain.com/v0.2/docs/concepts/#langchain-expression-language-lcel)**: LCEL and its benefits
|
||||
- **[Interface](https://python.langchain.com/v0.2/docs/concepts/#runnable-interface)**: The standard Runnable interface for LCEL objects
|
||||
@@ -123,7 +130,6 @@ Please see [here](https://python.langchain.com) for full documentation, which in
|
||||
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Create stateful, multi-actor applications with LLMs. Integrates smoothly with LangChain, but can be used without it.
|
||||
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploy LangChain runnables and chains as REST APIs.
|
||||
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
@@ -445,7 +445,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"def plt_img_base64(img_base64):\n",
|
||||
" \"\"\"Disply base64 encoded string as image\"\"\"\n",
|
||||
" \"\"\"Display base64 encoded string as image\"\"\"\n",
|
||||
" # Create an HTML img tag with the base64 string as the source\n",
|
||||
" image_html = f'<img src=\"data:image/jpeg;base64,{img_base64}\" />'\n",
|
||||
" # Display the image by rendering the HTML\n",
|
||||
|
||||
@@ -4,6 +4,8 @@ Example code for building applications with LangChain, with an emphasis on more
|
||||
|
||||
Notebook | Description
|
||||
:- | :-
|
||||
[agent_fireworks_ai_langchain_mongodb.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/agent_fireworks_ai_langchain_mongodb.ipynb) | Build an AI Agent With Memory Using MongoDB, LangChain and FireWorksAI.
|
||||
[mongodb-langchain-cache-memory.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/mongodb-langchain-cache-memory.ipynb) | Build a RAG Application with Semantic Cache Using MongoDB and LangChain.
|
||||
[LLaMA2_sql_chat.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/LLaMA2_sql_chat.ipynb) | Build a chat application that interacts with a SQL database using an open source llm (llama2), specifically demonstrated on an SQLite database containing rosters.
|
||||
[Semi_Structured_RAG.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/Semi_Structured_RAG.ipynb) | Perform retrieval-augmented generation (rag) on documents with semi-structured data, including text and tables, using unstructured for parsing, multi-vector retriever for storing, and lcel for implementing chains.
|
||||
[Semi_structured_and_multi_moda...](https://github.com/langchain-ai/langchain/tree/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb) | Perform retrieval-augmented generation (rag) on documents with semi-structured data and images, using unstructured for parsing, multi-vector retriever for storage and retrieval, and lcel for implementing chains.
|
||||
|
||||
1593
cookbook/agent_fireworks_ai_langchain_mongodb.ipynb
Normal file
1593
cookbook/agent_fireworks_ai_langchain_mongodb.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -135,7 +135,7 @@
|
||||
"source": [
|
||||
"## Instantiate model, DB, code interpreter\n",
|
||||
"\n",
|
||||
"We'll use the LangChain [SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase) interface to connect to our DB and query it. This works with any SQL database supported by [SQLAlchemy](https://www.sqlalchemy.org/)."
|
||||
"We'll use the LangChain [SQLDatabase](https://python.langchain.com/v0.2/api_reference/community/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase) interface to connect to our DB and query it. This works with any SQL database supported by [SQLAlchemy](https://www.sqlalchemy.org/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -90,7 +90,8 @@
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"# Please manually enter OpenAI Key"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
"source": [
|
||||
"Connection is via `cassio` using `auto=True` parameter, and the notebook uses OpenAI. You should create a `.env` file accordingly.\n",
|
||||
"\n",
|
||||
"For Casssandra, set:\n",
|
||||
"For Cassandra, set:\n",
|
||||
"```bash\n",
|
||||
"CASSANDRA_CONTACT_POINTS\n",
|
||||
"CASSANDRA_USERNAME\n",
|
||||
|
||||
@@ -336,7 +336,7 @@
|
||||
" # Create a prompt template with format instructions and the query\n",
|
||||
" prompt = PromptTemplate(\n",
|
||||
" template=\"\"\"You are generating questions that is well optimized for retrieval. \\n \n",
|
||||
" Look at the input and try to reason about the underlying sematic intent / meaning. \\n \n",
|
||||
" Look at the input and try to reason about the underlying semantic intent / meaning. \\n \n",
|
||||
" Here is the initial question:\n",
|
||||
" \\n ------- \\n\n",
|
||||
" {question} \n",
|
||||
@@ -643,7 +643,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -657,7 +657,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.11.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "1a1af0ee75eeea9e2e1ee996c87e7a2b11a0bebd85af04bb136d915cefc0abce"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -647,7 +647,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Now we see the results are correct as it is mentioned in earnings release.** <br>\n",
|
||||
"**To further automate, we will create a chain that will take input as question and retriever so that we don't need to retrieve documents seperately**"
|
||||
"**To further automate, we will create a chain that will take input as question and retriever so that we don't need to retrieve documents separately**"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -734,9 +734,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "rag-on-intel",
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "rag-on-intel"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -748,7 +748,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "1a1af0ee75eeea9e2e1ee996c87e7a2b11a0bebd85af04bb136d915cefc0abce"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -18,7 +18,7 @@ for dir; do \
|
||||
if find "$$dir" -maxdepth 1 -type f \( -name "pyproject.toml" -o -name "setup.py" \) | grep -q .; then \
|
||||
echo "$$dir"; \
|
||||
fi \
|
||||
done' sh {} + | grep -vE "airbyte|ibm|couchbase" | tr '\n' ' ')
|
||||
done' sh {} + | grep -vE "airbyte|ibm|couchbase|databricks" | tr '\n' ' ')
|
||||
|
||||
PORT ?= 3001
|
||||
|
||||
@@ -33,26 +33,19 @@ install-py-deps:
|
||||
python3 -m venv .venv
|
||||
$(PYTHON) -m pip install --upgrade pip
|
||||
$(PYTHON) -m pip install --upgrade uv
|
||||
$(PYTHON) -m uv pip install -r vercel_requirements.txt
|
||||
$(PYTHON) -m uv pip install --editable $(PARTNER_DEPS_LIST)
|
||||
$(PYTHON) -m uv pip install --pre -r vercel_requirements.txt
|
||||
$(PYTHON) -m uv pip install --pre --editable $(PARTNER_DEPS_LIST)
|
||||
|
||||
generate-files:
|
||||
mkdir -p $(INTERMEDIATE_DIR)
|
||||
cp -r $(SOURCE_DIR)/* $(INTERMEDIATE_DIR)
|
||||
mkdir -p $(INTERMEDIATE_DIR)/templates
|
||||
|
||||
$(PYTHON) scripts/model_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/tool_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/document_loader_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/kv_store_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/partner_pkg_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/copy_templates.py $(INTERMEDIATE_DIR)
|
||||
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O $(INTERMEDIATE_DIR)/langserve.md
|
||||
$(PYTHON) scripts/resolve_local_links.py $(INTERMEDIATE_DIR)/langserve.md https://github.com/langchain-ai/langserve/tree/main/
|
||||
|
||||
@@ -80,17 +73,19 @@ append-related:
|
||||
generate-references:
|
||||
$(PYTHON) scripts/generate_api_reference_links.py --docs_dir $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
update-md: generate-files md-sync
|
||||
|
||||
build: install-py-deps generate-files copy-infra render md-sync append-related
|
||||
|
||||
vercel-build: install-vercel-deps build generate-references
|
||||
rm -rf docs
|
||||
mv $(OUTPUT_NEW_DOCS_DIR) docs
|
||||
rm -rf build
|
||||
yarn run docusaurus build
|
||||
mv build v0.2
|
||||
mkdir build
|
||||
mv v0.2 build
|
||||
mv build/v0.2/404.html build
|
||||
mkdir static/api_reference
|
||||
git clone --depth=1 https://github.com/baskaryan/langchain-api-docs-build.git
|
||||
mv langchain-api-docs-build/api_reference_build/html/* static/api_reference/
|
||||
rm -rf langchain-api-docs-build
|
||||
NODE_OPTIONS="--max-old-space-size=5000" yarn run docusaurus build
|
||||
|
||||
start:
|
||||
cd $(OUTPUT_NEW_DIR) && yarn && yarn start --port=$(PORT)
|
||||
|
||||
144
docs/api_reference/_extensions/gallery_directive.py
Normal file
144
docs/api_reference/_extensions/gallery_directive.py
Normal file
@@ -0,0 +1,144 @@
|
||||
"""A directive to generate a gallery of images from structured data.
|
||||
|
||||
Generating a gallery of images that are all the same size is a common
|
||||
pattern in documentation, and this can be cumbersome if the gallery is
|
||||
generated programmatically. This directive wraps this particular use-case
|
||||
in a helper-directive to generate it with a single YAML configuration file.
|
||||
|
||||
It currently exists for maintainers of the pydata-sphinx-theme,
|
||||
but might be abstracted into a standalone package if it proves useful.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any, ClassVar, Dict, List
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.parsers.rst import directives
|
||||
from sphinx.application import Sphinx
|
||||
from sphinx.util import logging
|
||||
from sphinx.util.docutils import SphinxDirective
|
||||
from yaml import safe_load
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
TEMPLATE_GRID = """
|
||||
`````{{grid}} {columns}
|
||||
{options}
|
||||
|
||||
{content}
|
||||
|
||||
`````
|
||||
"""
|
||||
|
||||
GRID_CARD = """
|
||||
````{{grid-item-card}} {title}
|
||||
{options}
|
||||
|
||||
{content}
|
||||
````
|
||||
"""
|
||||
|
||||
|
||||
class GalleryGridDirective(SphinxDirective):
|
||||
"""A directive to show a gallery of images and links in a Bootstrap grid.
|
||||
|
||||
The grid can be generated from a YAML file that contains a list of items, or
|
||||
from the content of the directive (also formatted in YAML). Use the parameter
|
||||
"class-card" to add an additional CSS class to all cards. When specifying the grid
|
||||
items, you can use all parameters from "grid-item-card" directive to customize
|
||||
individual cards + ["image", "header", "content", "title"].
|
||||
|
||||
Danger:
|
||||
This directive can only be used in the context of a Myst documentation page as
|
||||
the templates use Markdown flavored formatting.
|
||||
"""
|
||||
|
||||
name = "gallery-grid"
|
||||
has_content = True
|
||||
required_arguments = 0
|
||||
optional_arguments = 1
|
||||
final_argument_whitespace = True
|
||||
option_spec: ClassVar[dict[str, Any]] = {
|
||||
# A class to be added to the resulting container
|
||||
"grid-columns": directives.unchanged,
|
||||
"class-container": directives.unchanged,
|
||||
"class-card": directives.unchanged,
|
||||
}
|
||||
|
||||
def run(self) -> List[nodes.Node]:
|
||||
"""Create the gallery grid."""
|
||||
if self.arguments:
|
||||
# If an argument is given, assume it's a path to a YAML file
|
||||
# Parse it and load it into the directive content
|
||||
path_data_rel = Path(self.arguments[0])
|
||||
path_doc, _ = self.get_source_info()
|
||||
path_doc = Path(path_doc).parent
|
||||
path_data = (path_doc / path_data_rel).resolve()
|
||||
if not path_data.exists():
|
||||
logger.info(f"Could not find grid data at {path_data}.")
|
||||
nodes.text("No grid data found at {path_data}.")
|
||||
return
|
||||
yaml_string = path_data.read_text()
|
||||
else:
|
||||
yaml_string = "\n".join(self.content)
|
||||
|
||||
# Use all the element with an img-bottom key as sites to show
|
||||
# and generate a card item for each of them
|
||||
grid_items = []
|
||||
for item in safe_load(yaml_string):
|
||||
# remove parameters that are not needed for the card options
|
||||
title = item.pop("title", "")
|
||||
|
||||
# build the content of the card using some extra parameters
|
||||
header = f"{item.pop('header')} \n^^^ \n" if "header" in item else ""
|
||||
image = f"}) \n" if "image" in item else ""
|
||||
content = f"{item.pop('content')} \n" if "content" in item else ""
|
||||
|
||||
# optional parameter that influence all cards
|
||||
if "class-card" in self.options:
|
||||
item["class-card"] = self.options["class-card"]
|
||||
|
||||
loc_options_str = "\n".join(f":{k}: {v}" for k, v in item.items()) + " \n"
|
||||
|
||||
card = GRID_CARD.format(
|
||||
options=loc_options_str, content=header + image + content, title=title
|
||||
)
|
||||
grid_items.append(card)
|
||||
|
||||
# Parse the template with Sphinx Design to create an output container
|
||||
# Prep the options for the template grid
|
||||
class_ = "gallery-directive" + f' {self.options.get("class-container", "")}'
|
||||
options = {"gutter": 2, "class-container": class_}
|
||||
options_str = "\n".join(f":{k}: {v}" for k, v in options.items())
|
||||
|
||||
# Create the directive string for the grid
|
||||
grid_directive = TEMPLATE_GRID.format(
|
||||
columns=self.options.get("grid-columns", "1 2 3 4"),
|
||||
options=options_str,
|
||||
content="\n".join(grid_items),
|
||||
)
|
||||
|
||||
# Parse content as a directive so Sphinx Design processes it
|
||||
container = nodes.container()
|
||||
self.state.nested_parse([grid_directive], 0, container)
|
||||
|
||||
# Sphinx Design outputs a container too, so just use that
|
||||
return [container.children[0]]
|
||||
|
||||
|
||||
def setup(app: Sphinx) -> Dict[str, Any]:
|
||||
"""Add custom configuration to sphinx app.
|
||||
|
||||
Args:
|
||||
app: the Sphinx application
|
||||
|
||||
Returns:
|
||||
the 2 parallel parameters set to ``True``.
|
||||
"""
|
||||
app.add_directive("gallery-grid", GalleryGridDirective)
|
||||
|
||||
return {
|
||||
"parallel_read_safe": True,
|
||||
"parallel_write_safe": True,
|
||||
}
|
||||
@@ -1,26 +1,411 @@
|
||||
pre {
|
||||
white-space: break-spaces;
|
||||
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;700&display=swap');
|
||||
|
||||
/*******************************************************************************
|
||||
* master color map. Only the colors that actually differ between light and dark
|
||||
* themes are specified separately.
|
||||
*
|
||||
* To see the full list of colors see https://www.figma.com/file/rUrrHGhUBBIAAjQ82x6pz9/PyData-Design-system---proposal-for-implementation-(2)?node-id=1234%3A765&t=ifcFT1JtnrSshGfi-1
|
||||
*/
|
||||
/**
|
||||
* Function to get items from nested maps
|
||||
*/
|
||||
/* Assign base colors for the PyData theme */
|
||||
:root {
|
||||
--pst-teal-50: #f4fbfc;
|
||||
--pst-teal-100: #e9f6f8;
|
||||
--pst-teal-200: #d0ecf1;
|
||||
--pst-teal-300: #abdde6;
|
||||
--pst-teal-400: #3fb1c5;
|
||||
--pst-teal-500: #0a7d91;
|
||||
--pst-teal-600: #085d6c;
|
||||
--pst-teal-700: #064752;
|
||||
--pst-teal-800: #042c33;
|
||||
--pst-teal-900: #021b1f;
|
||||
--pst-violet-50: #f4eefb;
|
||||
--pst-violet-100: #e0c7ff;
|
||||
--pst-violet-200: #d5b4fd;
|
||||
--pst-violet-300: #b780ff;
|
||||
--pst-violet-400: #9c5ffd;
|
||||
--pst-violet-500: #8045e5;
|
||||
--pst-violet-600: #6432bd;
|
||||
--pst-violet-700: #4b258f;
|
||||
--pst-violet-800: #341a61;
|
||||
--pst-violet-900: #1e0e39;
|
||||
--pst-gray-50: #f9f9fa;
|
||||
--pst-gray-100: #f3f4f5;
|
||||
--pst-gray-200: #e5e7ea;
|
||||
--pst-gray-300: #d1d5da;
|
||||
--pst-gray-400: #9ca4af;
|
||||
--pst-gray-500: #677384;
|
||||
--pst-gray-600: #48566b;
|
||||
--pst-gray-700: #29313d;
|
||||
--pst-gray-800: #222832;
|
||||
--pst-gray-900: #14181e;
|
||||
--pst-pink-50: #fcf8fd;
|
||||
--pst-pink-100: #fcf0fa;
|
||||
--pst-pink-200: #f8dff5;
|
||||
--pst-pink-300: #f3c7ee;
|
||||
--pst-pink-400: #e47fd7;
|
||||
--pst-pink-500: #c132af;
|
||||
--pst-pink-600: #912583;
|
||||
--pst-pink-700: #6e1c64;
|
||||
--pst-pink-800: #46123f;
|
||||
--pst-pink-900: #2b0b27;
|
||||
--pst-foundation-white: #ffffff;
|
||||
--pst-foundation-black: #14181e;
|
||||
--pst-green-10: #f1fdfd;
|
||||
--pst-green-50: #E0F7F6;
|
||||
--pst-green-100: #B3E8E6;
|
||||
--pst-green-200: #80D6D3;
|
||||
--pst-green-300: #4DC4C0;
|
||||
--pst-green-400: #4FB2AD;
|
||||
--pst-green-500: #287977;
|
||||
--pst-green-600: #246161;
|
||||
--pst-green-700: #204F4F;
|
||||
--pst-green-800: #1C3C3C;
|
||||
--pst-green-900: #0D2427;
|
||||
--pst-lilac-50: #f4eefb;
|
||||
--pst-lilac-100: #DAD6FE;
|
||||
--pst-lilac-200: #BCB2FD;
|
||||
--pst-lilac-300: #9F8BFA;
|
||||
--pst-lilac-400: #7F5CF6;
|
||||
--pst-lilac-500: #6F3AED;
|
||||
--pst-lilac-600: #6028D9;
|
||||
--pst-lilac-700: #5021B6;
|
||||
--pst-lilac-800: #431D95;
|
||||
--pst-lilac-900: #1e0e39;
|
||||
--pst-header-height: 2.5rem;
|
||||
}
|
||||
|
||||
@media (min-width: 1200px) {
|
||||
.container,
|
||||
.container-lg,
|
||||
.container-md,
|
||||
.container-sm,
|
||||
.container-xl {
|
||||
max-width: 2560px !important;
|
||||
}
|
||||
html {
|
||||
--pst-font-family-base: 'Inter';
|
||||
--pst-font-family-heading: 'Inter Tight', sans-serif;
|
||||
}
|
||||
|
||||
#my-component-root *,
|
||||
#headlessui-portal-root * {
|
||||
z-index: 10000;
|
||||
/*******************************************************************************
|
||||
* write the color rules for each theme (light/dark)
|
||||
*/
|
||||
/* NOTE:
|
||||
* Mixins enable us to reuse the same definitions for the different modes
|
||||
* https://sass-lang.com/documentation/at-rules/mixin
|
||||
* something inserts a variable into a CSS selector or property name
|
||||
* https://sass-lang.com/documentation/interpolation
|
||||
*/
|
||||
/* Defaults to light mode if data-theme is not set */
|
||||
html:not([data-theme]) {
|
||||
--pst-color-primary: #287977;
|
||||
--pst-color-primary-bg: #80D6D3;
|
||||
--pst-color-secondary: #6F3AED;
|
||||
--pst-color-secondary-bg: #DAD6FE;
|
||||
--pst-color-accent: #c132af;
|
||||
--pst-color-accent-bg: #f8dff5;
|
||||
--pst-color-info: #276be9;
|
||||
--pst-color-info-bg: #dce7fc;
|
||||
--pst-color-warning: #f66a0a;
|
||||
--pst-color-warning-bg: #f8e3d0;
|
||||
--pst-color-success: #00843f;
|
||||
--pst-color-success-bg: #d6ece1;
|
||||
--pst-color-attention: var(--pst-color-warning);
|
||||
--pst-color-attention-bg: var(--pst-color-warning-bg);
|
||||
--pst-color-danger: #d72d47;
|
||||
--pst-color-danger-bg: #f9e1e4;
|
||||
--pst-color-text-base: #222832;
|
||||
--pst-color-text-muted: #48566b;
|
||||
--pst-color-heading-color: #ffffff;
|
||||
--pst-color-shadow: rgba(0, 0, 0, 0.1);
|
||||
--pst-color-border: #d1d5da;
|
||||
--pst-color-border-muted: rgba(23, 23, 26, 0.2);
|
||||
--pst-color-inline-code: #912583;
|
||||
--pst-color-inline-code-links: #246161;
|
||||
--pst-color-target: #f3cf95;
|
||||
--pst-color-background: #ffffff;
|
||||
--pst-color-on-background: #F4F9F8;
|
||||
--pst-color-surface: #F4F9F8;
|
||||
--pst-color-on-surface: #222832;
|
||||
}
|
||||
html:not([data-theme]) {
|
||||
--pst-color-link: var(--pst-color-primary);
|
||||
--pst-color-link-hover: var(--pst-color-secondary);
|
||||
}
|
||||
html:not([data-theme]) .only-dark,
|
||||
html:not([data-theme]) .only-dark ~ figcaption {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
table.longtable code {
|
||||
white-space: normal;
|
||||
/* NOTE: @each {...} is like a for-loop
|
||||
* https://sass-lang.com/documentation/at-rules/control/each
|
||||
*/
|
||||
html[data-theme=light] {
|
||||
--pst-color-primary: #287977;
|
||||
--pst-color-primary-bg: #80D6D3;
|
||||
--pst-color-secondary: #6F3AED;
|
||||
--pst-color-secondary-bg: #DAD6FE;
|
||||
--pst-color-accent: #c132af;
|
||||
--pst-color-accent-bg: #f8dff5;
|
||||
--pst-color-info: #276be9;
|
||||
--pst-color-info-bg: #dce7fc;
|
||||
--pst-color-warning: #f66a0a;
|
||||
--pst-color-warning-bg: #f8e3d0;
|
||||
--pst-color-success: #00843f;
|
||||
--pst-color-success-bg: #d6ece1;
|
||||
--pst-color-attention: var(--pst-color-warning);
|
||||
--pst-color-attention-bg: var(--pst-color-warning-bg);
|
||||
--pst-color-danger: #d72d47;
|
||||
--pst-color-danger-bg: #f9e1e4;
|
||||
--pst-color-text-base: #222832;
|
||||
--pst-color-text-muted: #48566b;
|
||||
--pst-color-heading-color: #ffffff;
|
||||
--pst-color-shadow: rgba(0, 0, 0, 0.1);
|
||||
--pst-color-border: #d1d5da;
|
||||
--pst-color-border-muted: rgba(23, 23, 26, 0.2);
|
||||
--pst-color-inline-code: #912583;
|
||||
--pst-color-inline-code-links: #246161;
|
||||
--pst-color-target: #f3cf95;
|
||||
--pst-color-background: #ffffff;
|
||||
--pst-color-on-background: #F4F9F8;
|
||||
--pst-color-surface: #F4F9F8;
|
||||
--pst-color-on-surface: #222832;
|
||||
color-scheme: light;
|
||||
}
|
||||
html[data-theme=light] {
|
||||
--pst-color-link: var(--pst-color-primary);
|
||||
--pst-color-link-hover: var(--pst-color-secondary);
|
||||
}
|
||||
html[data-theme=light] .only-dark,
|
||||
html[data-theme=light] .only-dark ~ figcaption {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
table.longtable td {
|
||||
max-width: 600px;
|
||||
html[data-theme=dark] {
|
||||
--pst-color-primary: #4FB2AD;
|
||||
--pst-color-primary-bg: #1C3C3C;
|
||||
--pst-color-secondary: #7F5CF6;
|
||||
--pst-color-secondary-bg: #431D95;
|
||||
--pst-color-accent: #e47fd7;
|
||||
--pst-color-accent-bg: #46123f;
|
||||
--pst-color-info: #79a3f2;
|
||||
--pst-color-info-bg: #06245d;
|
||||
--pst-color-warning: #ff9245;
|
||||
--pst-color-warning-bg: #652a02;
|
||||
--pst-color-success: #5fb488;
|
||||
--pst-color-success-bg: #002f17;
|
||||
--pst-color-attention: var(--pst-color-warning);
|
||||
--pst-color-attention-bg: var(--pst-color-warning-bg);
|
||||
--pst-color-danger: #e78894;
|
||||
--pst-color-danger-bg: #4e111b;
|
||||
--pst-color-text-base: #ced6dd;
|
||||
--pst-color-text-muted: #9ca4af;
|
||||
--pst-color-heading-color: #14181e;
|
||||
--pst-color-shadow: rgba(0, 0, 0, 0.2);
|
||||
--pst-color-border: #48566b;
|
||||
--pst-color-border-muted: #29313d;
|
||||
--pst-color-inline-code: #f3c7ee;
|
||||
--pst-color-inline-code-links: #4FB2AD;
|
||||
--pst-color-target: #675c04;
|
||||
--pst-color-background: #14181e;
|
||||
--pst-color-on-background: #222832;
|
||||
--pst-color-surface: #29313d;
|
||||
--pst-color-on-surface: #f3f4f5;
|
||||
/* Adjust images in dark mode (unless they have class .only-dark or
|
||||
* .dark-light, in which case assume they're already optimized for dark
|
||||
* mode).
|
||||
*/
|
||||
/* Give images a light background in dark mode in case they have
|
||||
* transparency and black text (unless they have class .only-dark or .dark-light, in
|
||||
* which case assume they're already optimized for dark mode).
|
||||
*/
|
||||
color-scheme: dark;
|
||||
}
|
||||
html[data-theme=dark] {
|
||||
--pst-color-link: var(--pst-color-primary);
|
||||
--pst-color-link-hover: var(--pst-color-secondary);
|
||||
}
|
||||
html[data-theme=dark] .only-light,
|
||||
html[data-theme=dark] .only-light ~ figcaption {
|
||||
display: none !important;
|
||||
}
|
||||
html[data-theme=dark] img:not(.only-dark):not(.dark-light) {
|
||||
filter: brightness(0.8) contrast(1.2);
|
||||
}
|
||||
html[data-theme=dark] .bd-content img:not(.only-dark):not(.dark-light) {
|
||||
background: rgb(255, 255, 255);
|
||||
border-radius: 0.25rem;
|
||||
}
|
||||
html[data-theme=dark] .MathJax_SVG * {
|
||||
fill: var(--pst-color-text-base);
|
||||
}
|
||||
|
||||
.pst-color-primary {
|
||||
color: var(--pst-color-primary);
|
||||
}
|
||||
|
||||
.pst-color-secondary {
|
||||
color: var(--pst-color-secondary);
|
||||
}
|
||||
|
||||
.pst-color-accent {
|
||||
color: var(--pst-color-accent);
|
||||
}
|
||||
|
||||
.pst-color-info {
|
||||
color: var(--pst-color-info);
|
||||
}
|
||||
|
||||
.pst-color-warning {
|
||||
color: var(--pst-color-warning);
|
||||
}
|
||||
|
||||
.pst-color-success {
|
||||
color: var(--pst-color-success);
|
||||
}
|
||||
|
||||
.pst-color-attention {
|
||||
color: var(--pst-color-attention);
|
||||
}
|
||||
|
||||
.pst-color-danger {
|
||||
color: var(--pst-color-danger);
|
||||
}
|
||||
|
||||
.pst-color-text-base {
|
||||
color: var(--pst-color-text-base);
|
||||
}
|
||||
|
||||
.pst-color-text-muted {
|
||||
color: var(--pst-color-text-muted);
|
||||
}
|
||||
|
||||
.pst-color-heading-color {
|
||||
color: var(--pst-color-heading-color);
|
||||
}
|
||||
|
||||
.pst-color-shadow {
|
||||
color: var(--pst-color-shadow);
|
||||
}
|
||||
|
||||
.pst-color-border {
|
||||
color: var(--pst-color-border);
|
||||
}
|
||||
|
||||
.pst-color-border-muted {
|
||||
color: var(--pst-color-border-muted);
|
||||
}
|
||||
|
||||
.pst-color-inline-code {
|
||||
color: var(--pst-color-inline-code);
|
||||
}
|
||||
|
||||
.pst-color-inline-code-links {
|
||||
color: var(--pst-color-inline-code-links);
|
||||
}
|
||||
|
||||
.pst-color-target {
|
||||
color: var(--pst-color-target);
|
||||
}
|
||||
|
||||
.pst-color-background {
|
||||
color: var(--pst-color-background);
|
||||
}
|
||||
|
||||
.pst-color-on-background {
|
||||
color: var(--pst-color-on-background);
|
||||
}
|
||||
|
||||
.pst-color-surface {
|
||||
color: var(--pst-color-surface);
|
||||
}
|
||||
|
||||
.pst-color-on-surface {
|
||||
color: var(--pst-color-on-surface);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Adjust the height of the navbar */
|
||||
.bd-header .bd-header__inner{
|
||||
height: 52px; /* Adjust this value as needed */
|
||||
}
|
||||
|
||||
.navbar-nav > li > a {
|
||||
line-height: 52px; /* Vertically center the navbar links */
|
||||
}
|
||||
|
||||
/* Make sure the navbar items align properly */
|
||||
.navbar-nav {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
|
||||
.bd-header .navbar-header-items__start{
|
||||
margin-left: 0rem
|
||||
}
|
||||
|
||||
.bd-header button.primary-toggle {
|
||||
margin-right: 0rem;
|
||||
}
|
||||
|
||||
.bd-header ul.navbar-nav .dropdown .dropdown-menu {
|
||||
overflow-y: auto; /* Enable vertical scrolling */
|
||||
max-height: 80vh
|
||||
}
|
||||
|
||||
.bd-sidebar-primary {
|
||||
width: 22%; /* Adjust this value to your preference */
|
||||
line-height: 1.4;
|
||||
}
|
||||
|
||||
.bd-sidebar-secondary {
|
||||
line-height: 1.4;
|
||||
}
|
||||
|
||||
.toc-entry a.nav-link, .toc-entry a>code {
|
||||
background-color: transparent;
|
||||
border-color: transparent;
|
||||
}
|
||||
|
||||
.bd-sidebar-primary code{
|
||||
background-color: transparent;
|
||||
border-color: transparent;
|
||||
}
|
||||
|
||||
|
||||
.toctree-wrapper li[class^=toctree-l1]>a {
|
||||
font-size: 1.3em
|
||||
}
|
||||
|
||||
.toctree-wrapper li[class^=toctree-l1] {
|
||||
margin-bottom: 2em;
|
||||
}
|
||||
|
||||
.toctree-wrapper li[class^=toctree-l]>ul {
|
||||
margin-top: 0.5em;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
*, :after, :before {
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
div.deprecated {
|
||||
margin-top: 0.5em;
|
||||
margin-bottom: 2em;
|
||||
}
|
||||
|
||||
.admonition-beta.admonition, div.admonition-beta.admonition {
|
||||
border-color: var(--pst-color-warning);
|
||||
margin-top:0.5em;
|
||||
margin-bottom: 2em;
|
||||
}
|
||||
|
||||
.admonition-beta>.admonition-title, div.admonition-beta>.admonition-title {
|
||||
background-color: var(--pst-color-warning-bg);
|
||||
}
|
||||
|
||||
dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd {
|
||||
margin-left: 1rem;
|
||||
}
|
||||
|
||||
p {
|
||||
font-size: 0.9rem;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
BIN
docs/api_reference/_static/img/brand/favicon.png
Normal file
BIN
docs/api_reference/_static/img/brand/favicon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 777 B |
11
docs/api_reference/_static/wordmark-api-dark.svg
Normal file
11
docs/api_reference/_static/wordmark-api-dark.svg
Normal file
@@ -0,0 +1,11 @@
|
||||
<svg width="72" height="19" viewBox="0 0 72 19" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_4019_2020)">
|
||||
<path d="M29.4038 5.84477C30.1256 6.56657 30.1256 7.74117 29.4038 8.46296L27.7869 10.0538L27.7704 9.96259C27.6524 9.30879 27.3415 8.71552 26.8723 8.24627C26.5189 7.8936 26.1012 7.63282 25.6305 7.47143C25.3383 7.76508 25.1777 8.14989 25.1777 8.55487C25.1777 8.63706 25.1851 8.72224 25.2001 8.80742C25.4593 8.90082 25.6887 9.04503 25.8815 9.23781C26.6033 9.9596 26.6033 11.1342 25.8815 11.856L24.4738 13.2637C24.1129 13.6246 23.6392 13.8047 23.1647 13.8047C22.6902 13.8047 22.2165 13.6246 21.8556 13.2637C21.1338 12.5419 21.1338 11.3673 21.8556 10.6455L23.4725 9.05549L23.489 9.14665C23.6063 9.79896 23.9171 10.3922 24.3879 10.8622C24.742 11.2164 25.1343 11.4518 25.6043 11.6124L25.691 11.5257C25.954 11.2627 26.0982 10.913 26.0982 10.5402C26.0982 10.4572 26.0907 10.3743 26.0765 10.2929C25.8053 10.2032 25.5819 10.0754 25.3786 9.87218C25.0857 9.57928 24.9034 9.20493 24.8526 8.79024C24.8489 8.76035 24.8466 8.73121 24.8437 8.70132C24.8033 8.16109 24.9983 7.63357 25.3786 7.25399L26.7864 5.84627C27.1353 5.49733 27.6001 5.30455 28.0955 5.30455C28.5909 5.30455 29.0556 5.49658 29.4046 5.84627L29.4038 5.84477ZM36.7548 9.56583C36.7548 14.7163 32.5645 18.9058 27.4148 18.9058H9.34C4.1903 18.9058 0 14.7163 0 9.56583C0 4.41538 4.1903 0.22583 9.34 0.22583H27.4148C32.5652 0.22583 36.7548 4.41613 36.7548 9.56583ZM18 14.25C18.1472 14.0714 17.4673 13.5686 17.3283 13.384C17.0459 13.0777 17.0444 12.6368 16.8538 12.2789C16.3876 11.1985 15.8518 10.1262 15.1024 9.21166C14.3104 8.21116 13.333 7.38326 12.4745 6.44403C11.8371 5.78873 11.6668 4.85548 11.1041 4.15087C10.3285 3.00541 7.87624 2.69308 7.51683 4.31077C7.51833 4.36158 7.50264 4.39371 7.45855 4.42584C7.2598 4.57005 7.08271 4.73518 6.93402 4.93468C6.57013 5.44129 6.51409 6.30057 6.96839 6.75561C6.98333 6.51576 6.99155 6.28936 7.18134 6.1175C7.53252 6.41862 8.06304 6.52547 8.47026 6.30057C9.36989 7.585 9.14573 9.36184 9.86005 10.7457C10.0573 11.0729 10.2561 11.4069 10.5094 11.6939C10.7148 12.0137 11.4247 12.391 11.4665 12.6869C11.474 13.195 11.4142 13.7502 11.7475 14.1753C11.9044 14.4936 11.5188 14.8134 11.208 14.7738C10.8045 14.8291 10.3121 14.5026 9.95868 14.7036C9.8339 14.8388 9.58957 14.6894 9.48197 14.8769C9.44461 14.9741 9.24286 15.1108 9.36316 15.2042C9.49691 15.1026 9.62095 14.9965 9.80102 15.057C9.77412 15.2035 9.88994 15.2244 9.98184 15.267C9.97886 15.3663 9.92057 15.468 9.99679 15.5524C10.0857 15.4627 10.1388 15.3357 10.28 15.2983C10.7492 15.9238 11.2267 14.6655 12.2421 15.2318C12.0359 15.2214 11.8528 15.2475 11.7139 15.4172C11.6795 15.4553 11.6503 15.5001 11.7109 15.5494C12.2586 15.196 12.2556 15.6705 12.6112 15.5248C12.8847 15.382 13.1567 15.2035 13.4817 15.2543C13.1657 15.3454 13.153 15.5995 12.9677 15.8139C12.9363 15.8468 12.9213 15.8842 12.9579 15.9387C13.614 15.8834 13.6678 15.6652 14.1975 15.3977C14.5928 15.1564 14.9866 15.7414 15.3288 15.4082C15.4043 15.3357 15.5074 15.3604 15.6008 15.3507C15.4812 14.7133 14.1669 15.4672 14.1878 14.6124C14.6107 14.3247 14.5136 13.7741 14.542 13.3295C15.0284 13.5992 15.5694 13.7561 16.0461 14.0139C16.2867 14.4025 16.6641 14.9158 17.1669 14.8822C17.1804 14.8433 17.1923 14.8089 17.2065 14.7693C17.359 14.7955 17.5547 14.8964 17.6384 14.7036C17.8663 14.9419 18.201 14.93 18.4992 14.8687C18.7196 14.6894 18.0845 14.4338 17.9993 14.2493L18 14.25ZM31.3458 7.15387C31.3458 6.28413 31.0081 5.46744 30.3946 4.85399C29.7812 4.24054 28.9645 3.9028 28.094 3.9028C27.2235 3.9028 26.4068 4.24054 25.7933 4.85399L24.3856 6.26171C24.0569 6.59048 23.8073 6.97678 23.6436 7.40941L23.6339 7.43407L23.6085 7.44154C23.0974 7.5992 22.6469 7.86969 22.2696 8.24702L20.8618 9.65475C19.5938 10.9235 19.5938 12.9873 20.8618 14.2553C21.4753 14.8687 22.292 15.2064 23.1617 15.2064C24.0314 15.2064 24.8489 14.8687 25.4623 14.2553L26.8701 12.8475C27.1973 12.5203 27.4454 12.1355 27.609 11.7036L27.6188 11.6789L27.6442 11.6707C28.1463 11.5168 28.6095 11.2373 28.9854 10.8622L30.3931 9.4545C31.0066 8.84105 31.3443 8.02436 31.3443 7.15387H31.3458ZM12.8802 13.1972C12.7592 13.6695 12.7196 14.4742 12.1054 14.4974C12.0546 14.7701 12.2944 14.8724 12.5119 14.785C12.7278 14.6856 12.8302 14.8635 12.9026 15.0406C13.2359 15.0891 13.7291 14.9292 13.7477 14.5347C13.2501 14.2478 13.0962 13.7023 12.8795 13.1972H12.8802Z" fill="#F4F3FF"/>
|
||||
<path d="M43.5142 15.2258L47.1462 3.70583H49.9702L53.6022 15.2258H51.6182L48.3222 4.88983H48.7542L45.4982 15.2258H43.5142ZM45.5382 12.7298V10.9298H51.5862V12.7298H45.5382ZM55.0486 15.2258V3.70583H59.8086C59.9206 3.70583 60.0646 3.71116 60.2406 3.72183C60.4166 3.72716 60.5792 3.74316 60.7286 3.76983C61.3952 3.87116 61.9446 4.0925 62.3766 4.43383C62.8139 4.77516 63.1366 5.20716 63.3446 5.72983C63.5579 6.24716 63.6646 6.82316 63.6646 7.45783C63.6646 8.08716 63.5579 8.66316 63.3446 9.18583C63.1312 9.70316 62.8059 10.1325 62.3686 10.4738C61.9366 10.8152 61.3899 11.0365 60.7286 11.1378C60.5792 11.1592 60.4139 11.1752 60.2326 11.1858C60.0566 11.1965 59.9152 11.2018 59.8086 11.2018H56.9766V15.2258H55.0486ZM56.9766 9.40183H59.7286C59.8352 9.40183 59.9552 9.3965 60.0886 9.38583C60.2219 9.37516 60.3446 9.35383 60.4566 9.32183C60.7766 9.24183 61.0272 9.1005 61.2086 8.89783C61.3952 8.69516 61.5259 8.46583 61.6006 8.20983C61.6806 7.95383 61.7206 7.70316 61.7206 7.45783C61.7206 7.2125 61.6806 6.96183 61.6006 6.70583C61.5259 6.4445 61.3952 6.2125 61.2086 6.00983C61.0272 5.80716 60.7766 5.66583 60.4566 5.58583C60.3446 5.55383 60.2219 5.53516 60.0886 5.52983C59.9552 5.51916 59.8352 5.51383 59.7286 5.51383H56.9766V9.40183ZM65.4273 15.2258V3.70583H67.3553V15.2258H65.4273Z" fill="#F4F3FF"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_4019_2020">
|
||||
<rect width="71.0711" height="18.68" fill="white" transform="translate(0 0.22583)"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 5.7 KiB |
11
docs/api_reference/_static/wordmark-api.svg
Normal file
11
docs/api_reference/_static/wordmark-api.svg
Normal file
@@ -0,0 +1,11 @@
|
||||
<svg width="72" height="20" viewBox="0 0 72 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_4019_689)">
|
||||
<path d="M29.4038 5.97905C30.1256 6.70085 30.1256 7.87545 29.4038 8.59724L27.7869 10.188L27.7704 10.0969C27.6524 9.44307 27.3415 8.84979 26.8723 8.38055C26.5189 8.02787 26.1012 7.7671 25.6305 7.60571C25.3383 7.89936 25.1777 8.28416 25.1777 8.68915C25.1777 8.77134 25.1851 8.85652 25.2001 8.9417C25.4593 9.0351 25.6887 9.17931 25.8815 9.37209C26.6033 10.0939 26.6033 11.2685 25.8815 11.9903L24.4738 13.398C24.1129 13.7589 23.6392 13.939 23.1647 13.939C22.6902 13.939 22.2165 13.7589 21.8556 13.398C21.1338 12.6762 21.1338 11.5016 21.8556 10.7798L23.4725 9.18977L23.489 9.28093C23.6063 9.93323 23.9171 10.5265 24.3879 10.9965C24.742 11.3507 25.1343 11.586 25.6043 11.7467L25.691 11.66C25.954 11.397 26.0982 11.0473 26.0982 10.6745C26.0982 10.5915 26.0907 10.5086 26.0765 10.4271C25.8053 10.3375 25.5819 10.2097 25.3786 10.0065C25.0857 9.71356 24.9034 9.33921 24.8526 8.92451C24.8489 8.89463 24.8466 8.86549 24.8437 8.8356C24.8033 8.29537 24.9983 7.76785 25.3786 7.38827L26.7864 5.98055C27.1353 5.6316 27.6001 5.43883 28.0955 5.43883C28.5909 5.43883 29.0556 5.63086 29.4046 5.98055L29.4038 5.97905ZM36.7548 9.70011C36.7548 14.8506 32.5645 19.0401 27.4148 19.0401H9.34C4.1903 19.0401 0 14.8506 0 9.70011C0 4.54966 4.1903 0.360107 9.34 0.360107H27.4148C32.5652 0.360107 36.7548 4.55041 36.7548 9.70011ZM18 14.3843C18.1472 14.2057 17.4673 13.7029 17.3283 13.5183C17.0459 13.2119 17.0444 12.7711 16.8538 12.4132C16.3876 11.3327 15.8518 10.2605 15.1024 9.34594C14.3104 8.34543 13.333 7.51754 12.4745 6.57831C11.8371 5.92301 11.6668 4.98976 11.1041 4.28515C10.3285 3.13969 7.87624 2.82736 7.51683 4.44505C7.51833 4.49586 7.50264 4.52799 7.45855 4.56012C7.2598 4.70433 7.08271 4.86946 6.93402 5.06896C6.57013 5.57556 6.51409 6.43484 6.96839 6.88989C6.98333 6.65004 6.99155 6.42364 7.18134 6.25178C7.53252 6.5529 8.06304 6.65975 8.47026 6.43484C9.36989 7.71928 9.14573 9.49612 9.86005 10.8799C10.0573 11.2072 10.2561 11.5412 10.5094 11.8281C10.7148 12.1479 11.4247 12.5253 11.4665 12.8212C11.474 13.3293 11.4142 13.8844 11.7475 14.3096C11.9044 14.6279 11.5188 14.9477 11.208 14.9081C10.8045 14.9634 10.3121 14.6369 9.95868 14.8379C9.8339 14.9731 9.58957 14.8237 9.48197 15.0112C9.44461 15.1083 9.24286 15.2451 9.36316 15.3385C9.49691 15.2369 9.62095 15.1308 9.80102 15.1913C9.77412 15.3377 9.88994 15.3587 9.98184 15.4012C9.97886 15.5006 9.92057 15.6022 9.99679 15.6867C10.0857 15.597 10.1388 15.47 10.28 15.4326C10.7492 16.058 11.2267 14.7997 12.2421 15.3661C12.0359 15.3557 11.8528 15.3818 11.7139 15.5514C11.6795 15.5895 11.6503 15.6344 11.7109 15.6837C12.2586 15.3303 12.2556 15.8047 12.6112 15.659C12.8847 15.5163 13.1567 15.3377 13.4817 15.3885C13.1657 15.4797 13.153 15.7337 12.9677 15.9482C12.9363 15.9811 12.9213 16.0184 12.9579 16.073C13.614 16.0177 13.6678 15.7995 14.1975 15.532C14.5928 15.2907 14.9866 15.8757 15.3288 15.5425C15.4043 15.47 15.5074 15.4946 15.6008 15.4849C15.4812 14.8476 14.1669 15.6015 14.1878 14.7467C14.6107 14.459 14.5136 13.9083 14.542 13.4638C15.0284 13.7335 15.5694 13.8904 16.0461 14.1482C16.2867 14.5367 16.6641 15.0501 17.1669 15.0164C17.1804 14.9776 17.1923 14.9432 17.2065 14.9036C17.359 14.9298 17.5547 15.0306 17.6384 14.8379C17.8663 15.0762 18.201 15.0643 18.4992 15.003C18.7196 14.8237 18.0845 14.5681 17.9993 14.3836L18 14.3843ZM31.3458 7.28815C31.3458 6.41841 31.0081 5.60172 30.3946 4.98826C29.7812 4.37481 28.9645 4.03708 28.094 4.03708C27.2235 4.03708 26.4068 4.37481 25.7933 4.98826L24.3856 6.39599C24.0569 6.72476 23.8073 7.11106 23.6436 7.54369L23.6339 7.56835L23.6085 7.57582C23.0974 7.73348 22.6469 8.00396 22.2696 8.3813L20.8618 9.78902C19.5938 11.0578 19.5938 13.1215 20.8618 14.3895C21.4753 15.003 22.292 15.3407 23.1617 15.3407C24.0314 15.3407 24.8489 15.003 25.4623 14.3895L26.8701 12.9818C27.1973 12.6545 27.4454 12.2697 27.609 11.8378L27.6188 11.8132L27.6442 11.805C28.1463 11.651 28.6095 11.3716 28.9854 10.9965L30.3931 9.58878C31.0066 8.97532 31.3443 8.15863 31.3443 7.28815H31.3458ZM12.8802 13.3315C12.7592 13.8037 12.7196 14.6085 12.1054 14.6316C12.0546 14.9044 12.2944 15.0067 12.5119 14.9193C12.7278 14.8199 12.8302 14.9978 12.9026 15.1748C13.2359 15.2234 13.7291 15.0635 13.7477 14.669C13.2501 14.3821 13.0962 13.8366 12.8795 13.3315H12.8802Z" fill="#246161"/>
|
||||
<path d="M43.5142 15.3601L47.1462 3.84011H49.9702L53.6022 15.3601H51.6182L48.3222 5.02411H48.7542L45.4982 15.3601H43.5142ZM45.5382 12.8641V11.0641H51.5862V12.8641H45.5382ZM55.0486 15.3601V3.84011H59.8086C59.9206 3.84011 60.0646 3.84544 60.2406 3.85611C60.4166 3.86144 60.5792 3.87744 60.7286 3.90411C61.3952 4.00544 61.9446 4.22677 62.3766 4.56811C62.8139 4.90944 63.1366 5.34144 63.3446 5.86411C63.5579 6.38144 63.6646 6.95744 63.6646 7.59211C63.6646 8.22144 63.5579 8.79744 63.3446 9.32011C63.1312 9.83744 62.8059 10.2668 62.3686 10.6081C61.9366 10.9494 61.3899 11.1708 60.7286 11.2721C60.5792 11.2934 60.4139 11.3094 60.2326 11.3201C60.0566 11.3308 59.9152 11.3361 59.8086 11.3361H56.9766V15.3601H55.0486ZM56.9766 9.53611H59.7286C59.8352 9.53611 59.9552 9.53077 60.0886 9.52011C60.2219 9.50944 60.3446 9.48811 60.4566 9.45611C60.7766 9.37611 61.0272 9.23477 61.2086 9.03211C61.3952 8.82944 61.5259 8.60011 61.6006 8.34411C61.6806 8.08811 61.7206 7.83744 61.7206 7.59211C61.7206 7.34677 61.6806 7.09611 61.6006 6.84011C61.5259 6.57877 61.3952 6.34677 61.2086 6.14411C61.0272 5.94144 60.7766 5.80011 60.4566 5.72011C60.3446 5.68811 60.2219 5.66944 60.0886 5.66411C59.9552 5.65344 59.8352 5.64811 59.7286 5.64811H56.9766V9.53611ZM65.4273 15.3601V3.84011H67.3553V15.3601H65.4273Z" fill="#246161"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_4019_689">
|
||||
<rect width="71.0711" height="18.68" fill="white" transform="translate(0 0.360107)"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 5.7 KiB |
@@ -62,7 +62,7 @@ class ExampleLinksDirective(SphinxDirective):
|
||||
item_node.append(para_node)
|
||||
list_node.append(item_node)
|
||||
if list_node.children:
|
||||
title_node = nodes.title()
|
||||
title_node = nodes.rubric()
|
||||
title_node.append(nodes.Text(f"Examples using {class_or_func_name}"))
|
||||
return [title_node, list_node]
|
||||
return [list_node]
|
||||
@@ -75,7 +75,10 @@ class Beta(BaseAdmonition):
|
||||
def run(self):
|
||||
self.content = self.content or StringList(
|
||||
[
|
||||
"This feature is in beta. It is actively being worked on, so the API may change."
|
||||
(
|
||||
"This feature is in beta. It is actively being worked on, so the "
|
||||
"API may change."
|
||||
)
|
||||
]
|
||||
)
|
||||
self.arguments = self.arguments or ["Beta"]
|
||||
@@ -90,13 +93,10 @@ def setup(app):
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = "🦜🔗 LangChain"
|
||||
copyright = "2023, LangChain, Inc."
|
||||
author = "LangChain, Inc."
|
||||
copyright = "2023, LangChain Inc"
|
||||
author = "LangChain, Inc"
|
||||
|
||||
version = data["tool"]["poetry"]["version"]
|
||||
release = version
|
||||
|
||||
html_title = project + " " + version
|
||||
html_favicon = "_static/img/brand/favicon.png"
|
||||
html_last_updated_fmt = "%b %d, %Y"
|
||||
|
||||
|
||||
@@ -112,11 +112,13 @@ extensions = [
|
||||
"sphinx.ext.napoleon",
|
||||
"sphinx.ext.viewcode",
|
||||
"sphinxcontrib.autodoc_pydantic",
|
||||
"sphinx_copybutton",
|
||||
"sphinx_panels",
|
||||
"IPython.sphinxext.ipython_console_highlighting",
|
||||
"myst_parser",
|
||||
"_extensions.gallery_directive",
|
||||
"sphinx_design",
|
||||
"sphinx_copybutton",
|
||||
]
|
||||
source_suffix = [".rst"]
|
||||
source_suffix = [".rst", ".md"]
|
||||
|
||||
# some autodoc pydantic options are repeated in the actual template.
|
||||
# potentially user error, but there may be bugs in the sphinx extension
|
||||
@@ -148,23 +150,84 @@ exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = "scikit-learn-modern"
|
||||
html_theme_path = ["themes"]
|
||||
# The theme to use for HTML and HTML Help pages.
|
||||
html_theme = "pydata_sphinx_theme"
|
||||
|
||||
# redirects dictionary maps from old links to new links
|
||||
html_additional_pages = {}
|
||||
redirects = {
|
||||
"index": "langchain_api_reference",
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
html_theme_options = {
|
||||
# # -- General configuration ------------------------------------------------
|
||||
"sidebar_includehidden": True,
|
||||
"use_edit_page_button": False,
|
||||
# # "analytics": {
|
||||
# # "plausible_analytics_domain": "scikit-learn.org",
|
||||
# # "plausible_analytics_url": "https://views.scientific-python.org/js/script.js",
|
||||
# # },
|
||||
# # If "prev-next" is included in article_footer_items, then setting show_prev_next
|
||||
# # to True would repeat prev and next links. See
|
||||
# # https://github.com/pydata/pydata-sphinx-theme/blob/b731dc230bc26a3d1d1bb039c56c977a9b3d25d8/src/pydata_sphinx_theme/theme/pydata_sphinx_theme/layout.html#L118-L129
|
||||
"show_prev_next": False,
|
||||
"search_bar_text": "Search",
|
||||
"navigation_with_keys": True,
|
||||
"collapse_navigation": True,
|
||||
"navigation_depth": 3,
|
||||
"show_nav_level": 1,
|
||||
"show_toc_level": 3,
|
||||
"navbar_align": "left",
|
||||
"header_links_before_dropdown": 5,
|
||||
"header_dropdown_text": "Integrations",
|
||||
"logo": {
|
||||
"image_light": "_static/wordmark-api.svg",
|
||||
"image_dark": "_static/wordmark-api-dark.svg",
|
||||
},
|
||||
"surface_warnings": True,
|
||||
# # -- Template placement in theme layouts ----------------------------------
|
||||
"navbar_start": ["navbar-logo"],
|
||||
# # Note that the alignment of navbar_center is controlled by navbar_align
|
||||
"navbar_center": ["navbar-nav"],
|
||||
"navbar_end": ["langchain_docs", "theme-switcher", "navbar-icon-links"],
|
||||
# # navbar_persistent is persistent right (even when on mobiles)
|
||||
"navbar_persistent": ["search-field"],
|
||||
"article_header_start": ["breadcrumbs"],
|
||||
"article_header_end": [],
|
||||
"article_footer_items": [],
|
||||
"content_footer_items": [],
|
||||
# # Use html_sidebars that map page patterns to list of sidebar templates
|
||||
# "primary_sidebar_end": [],
|
||||
"footer_start": ["copyright"],
|
||||
"footer_center": [],
|
||||
"footer_end": [],
|
||||
# # When specified as a dictionary, the keys should follow glob-style patterns, as in
|
||||
# # https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-exclude_patterns
|
||||
# # In particular, "**" specifies the default for all pages
|
||||
# # Use :html_theme.sidebar_secondary.remove: for file-wide removal
|
||||
# "secondary_sidebar_items": {"**": ["page-toc", "sourcelink"]},
|
||||
# "show_version_warning_banner": True,
|
||||
# "announcement": None,
|
||||
"icon_links": [
|
||||
{
|
||||
# Label for this link
|
||||
"name": "GitHub",
|
||||
# URL where the link will redirect
|
||||
"url": "https://github.com/langchain-ai/langchain", # required
|
||||
# Icon class (if "type": "fontawesome"), or path to local image (if "type": "local")
|
||||
"icon": "fa-brands fa-square-github",
|
||||
# The type of image to be used (see below for details)
|
||||
"type": "fontawesome",
|
||||
},
|
||||
{
|
||||
"name": "X / Twitter",
|
||||
"url": "https://twitter.com/langchainai",
|
||||
"icon": "fab fa-twitter-square",
|
||||
},
|
||||
],
|
||||
"icon_links_label": "Quick Links",
|
||||
"external_links": [
|
||||
{"name": "Legacy reference", "url": "https://api.python.langchain.com/"},
|
||||
],
|
||||
}
|
||||
for old_link in redirects:
|
||||
html_additional_pages[old_link] = "redirects.html"
|
||||
|
||||
partners_dir = Path(__file__).parent.parent.parent / "libs/partners"
|
||||
partners = [
|
||||
(p.name, p.name.replace("-", "_") + "_api_reference")
|
||||
for p in partners_dir.iterdir()
|
||||
]
|
||||
partners = sorted(partners)
|
||||
|
||||
html_context = {
|
||||
"display_github": True, # Integrate GitHub
|
||||
@@ -172,8 +235,6 @@ html_context = {
|
||||
"github_repo": "langchain", # Repo name
|
||||
"github_version": "master", # Version
|
||||
"conf_py_path": "/docs/api_reference", # Path in the checkout to the docs root
|
||||
"redirects": redirects,
|
||||
"partners": partners,
|
||||
}
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
@@ -183,9 +244,7 @@ html_static_path = ["_static"]
|
||||
|
||||
# These paths are either relative to html_static_path
|
||||
# or fully qualified paths (e.g. https://...)
|
||||
html_css_files = [
|
||||
"css/custom.css",
|
||||
]
|
||||
html_css_files = ["css/custom.css"]
|
||||
html_use_index = False
|
||||
|
||||
myst_enable_extensions = ["colon_fence"]
|
||||
@@ -202,3 +261,5 @@ html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "")
|
||||
# Tell Jinja2 templates the build is running on Read the Docs
|
||||
if os.environ.get("READTHEDOCS", "") == "True":
|
||||
html_context["READTHEDOCS"] = True
|
||||
|
||||
master_doc = "index"
|
||||
|
||||
@@ -239,7 +239,7 @@ def _construct_doc(
|
||||
package_namespace: str,
|
||||
members_by_namespace: Dict[str, ModuleMembers],
|
||||
package_version: str,
|
||||
) -> str:
|
||||
) -> List[typing.Tuple[str, str]]:
|
||||
"""Construct the contents of the reference.rst file for the given package.
|
||||
|
||||
Args:
|
||||
@@ -251,15 +251,38 @@ def _construct_doc(
|
||||
Returns:
|
||||
The contents of the reference.rst file.
|
||||
"""
|
||||
full_doc = f"""\
|
||||
=======================
|
||||
``{package_namespace}`` {package_version}
|
||||
=======================
|
||||
docs = []
|
||||
index_doc = f"""\
|
||||
:html_theme.sidebar_secondary.remove:
|
||||
|
||||
.. currentmodule:: {package_namespace}
|
||||
|
||||
.. _{package_namespace}:
|
||||
|
||||
======================================
|
||||
{package_namespace.replace('_', '-')}: {package_version}
|
||||
======================================
|
||||
|
||||
.. automodule:: {package_namespace}
|
||||
:no-members:
|
||||
:no-inherited-members:
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
:maxdepth: 2
|
||||
|
||||
"""
|
||||
index_autosummary = """
|
||||
"""
|
||||
namespaces = sorted(members_by_namespace)
|
||||
|
||||
for module in namespaces:
|
||||
index_doc += f" {module}\n"
|
||||
module_doc = f"""\
|
||||
.. currentmodule:: {package_namespace}
|
||||
|
||||
.. _{package_namespace}_{module}:
|
||||
"""
|
||||
_members = members_by_namespace[module]
|
||||
classes = [
|
||||
el
|
||||
@@ -281,9 +304,9 @@ def _construct_doc(
|
||||
]
|
||||
if not (classes or functions):
|
||||
continue
|
||||
section = f":mod:`{package_namespace}.{module}`"
|
||||
section = f":mod:`{module}`"
|
||||
underline = "=" * (len(section) + 1)
|
||||
full_doc += f"""\
|
||||
module_doc += f"""
|
||||
{section}
|
||||
{underline}
|
||||
|
||||
@@ -291,16 +314,26 @@ def _construct_doc(
|
||||
:no-members:
|
||||
:no-inherited-members:
|
||||
|
||||
"""
|
||||
|
||||
index_autosummary += f"""
|
||||
:ref:`{package_namespace}_{module}`
|
||||
{'^' * (len(package_namespace) + len(module) + 8)}
|
||||
"""
|
||||
|
||||
if classes:
|
||||
full_doc += f"""\
|
||||
Classes
|
||||
--------------
|
||||
module_doc += f"""\
|
||||
**Classes**
|
||||
|
||||
.. currentmodule:: {package_namespace}
|
||||
|
||||
.. autosummary::
|
||||
:toctree: {module}
|
||||
"""
|
||||
index_autosummary += """
|
||||
**Classes**
|
||||
|
||||
.. autosummary::
|
||||
"""
|
||||
|
||||
for class_ in sorted(classes, key=lambda c: c["qualified_name"]):
|
||||
@@ -317,19 +350,22 @@ Classes
|
||||
else:
|
||||
template = "class.rst"
|
||||
|
||||
full_doc += f"""\
|
||||
module_doc += f"""\
|
||||
:template: {template}
|
||||
|
||||
{class_["qualified_name"]}
|
||||
|
||||
"""
|
||||
index_autosummary += f"""
|
||||
{class_['qualified_name']}
|
||||
"""
|
||||
|
||||
if functions:
|
||||
_functions = [f["qualified_name"] for f in functions]
|
||||
fstring = "\n ".join(sorted(_functions))
|
||||
full_doc += f"""\
|
||||
Functions
|
||||
--------------
|
||||
module_doc += f"""\
|
||||
**Functions**
|
||||
|
||||
.. currentmodule:: {package_namespace}
|
||||
|
||||
.. autosummary::
|
||||
@@ -338,11 +374,18 @@ Functions
|
||||
|
||||
{fstring}
|
||||
|
||||
"""
|
||||
|
||||
index_autosummary += f"""
|
||||
**Functions**
|
||||
|
||||
.. autosummary::
|
||||
|
||||
{fstring}
|
||||
"""
|
||||
if deprecated_classes:
|
||||
full_doc += f"""\
|
||||
Deprecated classes
|
||||
--------------
|
||||
module_doc += f"""\
|
||||
**Deprecated classes**
|
||||
|
||||
.. currentmodule:: {package_namespace}
|
||||
|
||||
@@ -350,6 +393,12 @@ Deprecated classes
|
||||
:toctree: {module}
|
||||
"""
|
||||
|
||||
index_autosummary += """
|
||||
**Deprecated classes**
|
||||
|
||||
.. autosummary::
|
||||
"""
|
||||
|
||||
for class_ in sorted(deprecated_classes, key=lambda c: c["qualified_name"]):
|
||||
if class_["kind"] == "TypedDict":
|
||||
template = "typeddict.rst"
|
||||
@@ -364,19 +413,21 @@ Deprecated classes
|
||||
else:
|
||||
template = "class.rst"
|
||||
|
||||
full_doc += f"""\
|
||||
module_doc += f"""\
|
||||
:template: {template}
|
||||
|
||||
{class_["qualified_name"]}
|
||||
|
||||
"""
|
||||
index_autosummary += f"""
|
||||
{class_['qualified_name']}
|
||||
"""
|
||||
|
||||
if deprecated_functions:
|
||||
_functions = [f["qualified_name"] for f in deprecated_functions]
|
||||
fstring = "\n ".join(sorted(_functions))
|
||||
full_doc += f"""\
|
||||
Deprecated functions
|
||||
--------------
|
||||
module_doc += f"""\
|
||||
**Deprecated functions**
|
||||
|
||||
.. currentmodule:: {package_namespace}
|
||||
|
||||
@@ -387,7 +438,17 @@ Deprecated functions
|
||||
{fstring}
|
||||
|
||||
"""
|
||||
return full_doc
|
||||
index_autosummary += f"""
|
||||
**Deprecated functions**
|
||||
|
||||
.. autosummary::
|
||||
|
||||
{fstring}
|
||||
|
||||
"""
|
||||
docs.append((f"{module}.rst", module_doc))
|
||||
docs.append(("index.rst", index_doc + index_autosummary))
|
||||
return docs
|
||||
|
||||
|
||||
def _build_rst_file(package_name: str = "langchain") -> None:
|
||||
@@ -399,16 +460,25 @@ def _build_rst_file(package_name: str = "langchain") -> None:
|
||||
package_dir = _package_dir(package_name)
|
||||
package_members = _load_package_modules(package_dir)
|
||||
package_version = _get_package_version(package_dir)
|
||||
with open(_out_file_path(package_name), "w") as f:
|
||||
f.write(
|
||||
_doc_first_line(package_name)
|
||||
+ _construct_doc(
|
||||
_package_namespace(package_name), package_members, package_version
|
||||
)
|
||||
)
|
||||
output_dir = _out_file_path(package_name)
|
||||
os.mkdir(output_dir)
|
||||
rsts = _construct_doc(
|
||||
_package_namespace(package_name), package_members, package_version
|
||||
)
|
||||
for name, rst in rsts:
|
||||
with open(output_dir / name, "w") as f:
|
||||
f.write(rst)
|
||||
|
||||
|
||||
def _package_namespace(package_name: str) -> str:
|
||||
"""Returns the package name used.
|
||||
|
||||
Args:
|
||||
package_name: Can be either "langchain" or "core" or "experimental".
|
||||
|
||||
Returns:
|
||||
modified package_name: Can be either "langchain" or "langchain_{package_name}"
|
||||
"""
|
||||
return (
|
||||
package_name
|
||||
if package_name == "langchain"
|
||||
@@ -455,12 +525,119 @@ def _get_package_version(package_dir: Path) -> str:
|
||||
|
||||
def _out_file_path(package_name: str) -> Path:
|
||||
"""Return the path to the file containing the documentation."""
|
||||
return HERE / f"{package_name.replace('-', '_')}_api_reference.rst"
|
||||
return HERE / f"{package_name.replace('-', '_')}"
|
||||
|
||||
|
||||
def _doc_first_line(package_name: str) -> str:
|
||||
"""Return the path to the file containing the documentation."""
|
||||
return f".. {package_name.replace('-', '_')}_api_reference:\n\n"
|
||||
def _build_index(dirs: List[str]) -> None:
|
||||
custom_names = {
|
||||
"airbyte": "Airbyte",
|
||||
"aws": "AWS",
|
||||
"ai21": "AI21",
|
||||
}
|
||||
ordered = ["core", "langchain", "text-splitters", "community", "experimental"]
|
||||
main_ = [dir_ for dir_ in ordered if dir_ in dirs]
|
||||
integrations = sorted(dir_ for dir_ in dirs if dir_ not in main_)
|
||||
doc = """# LangChain Python API Reference
|
||||
|
||||
Welcome to the LangChain Python API reference. This is a reference for all
|
||||
`langchain-x` packages.
|
||||
|
||||
For user guides see [https://python.langchain.com](https://python.langchain.com).
|
||||
|
||||
For the legacy API reference hosted on ReadTheDocs see [https://api.python.langchain.com/](https://api.python.langchain.com/).
|
||||
"""
|
||||
|
||||
if main_:
|
||||
main_headers = [
|
||||
" ".join(custom_names.get(x, x.title()) for x in dir_.split("-"))
|
||||
for dir_ in main_
|
||||
]
|
||||
main_tree = "\n".join(
|
||||
f"{header_name}<{dir_.replace('-', '_')}/index>"
|
||||
for header_name, dir_ in zip(main_headers, main_)
|
||||
)
|
||||
main_grid = "\n".join(
|
||||
f'- header: "**{header_name}**"\n content: "{_package_namespace(dir_).replace("_", "-")}: {_get_package_version(_package_dir(dir_))}"\n link: {dir_.replace("-", "_")}/index.html'
|
||||
for header_name, dir_ in zip(main_headers, main_)
|
||||
)
|
||||
doc += f"""## Base packages
|
||||
|
||||
```{{gallery-grid}}
|
||||
:grid-columns: "1 2 2 3"
|
||||
|
||||
{main_grid}
|
||||
```
|
||||
|
||||
```{{toctree}}
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
:caption: Base packages
|
||||
|
||||
{main_tree}
|
||||
```
|
||||
"""
|
||||
if integrations:
|
||||
integration_headers = [
|
||||
" ".join(
|
||||
custom_names.get(x, x.title().replace("ai", "AI").replace("db", "DB"))
|
||||
for x in dir_.split("-")
|
||||
)
|
||||
for dir_ in integrations
|
||||
]
|
||||
integration_tree = "\n".join(
|
||||
f"{header_name}<{dir_.replace('-', '_')}/index>"
|
||||
for header_name, dir_ in zip(integration_headers, integrations)
|
||||
)
|
||||
|
||||
integration_grid = ""
|
||||
integrations_to_show = [
|
||||
"openai",
|
||||
"anthropic",
|
||||
"google-vertexai",
|
||||
"aws",
|
||||
"huggingface",
|
||||
"mistralai",
|
||||
]
|
||||
for header_name, dir_ in sorted(
|
||||
zip(integration_headers, integrations),
|
||||
key=lambda h_d: integrations_to_show.index(h_d[1])
|
||||
if h_d[1] in integrations_to_show
|
||||
else len(integrations_to_show),
|
||||
)[: len(integrations_to_show)]:
|
||||
integration_grid += f'\n- header: "**{header_name}**"\n content: {_package_namespace(dir_).replace("_", "-")} {_get_package_version(_package_dir(dir_))}\n link: {dir_.replace("-", "_")}/index.html'
|
||||
doc += f"""## Integrations
|
||||
|
||||
```{{gallery-grid}}
|
||||
:grid-columns: "1 2 2 3"
|
||||
|
||||
{integration_grid}
|
||||
```
|
||||
|
||||
See the full list of integrations in the Section Navigation.
|
||||
|
||||
```{{toctree}}
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
:caption: Integrations
|
||||
|
||||
{integration_tree}
|
||||
```
|
||||
"""
|
||||
with open(HERE / "reference.md", "w") as f:
|
||||
f.write(doc)
|
||||
|
||||
dummy_index = """\
|
||||
# API reference
|
||||
|
||||
```{toctree}
|
||||
:maxdepth: 3
|
||||
:hidden:
|
||||
|
||||
Reference<reference>
|
||||
```
|
||||
"""
|
||||
with open(HERE / "index.md", "w") as f:
|
||||
f.write(dummy_index)
|
||||
|
||||
|
||||
def main(dirs: Optional[list] = None) -> None:
|
||||
@@ -488,6 +665,8 @@ def main(dirs: Optional[list] = None) -> None:
|
||||
else:
|
||||
print("Building package:", dir_)
|
||||
_build_rst_file(package_name=dir_)
|
||||
|
||||
_build_index(dirs)
|
||||
print("API reference files built.")
|
||||
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,8 +0,0 @@
|
||||
=============
|
||||
LangChain API
|
||||
=============
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
api_reference.rst
|
||||
@@ -1,17 +1,11 @@
|
||||
-e libs/experimental
|
||||
-e libs/langchain
|
||||
-e libs/core
|
||||
-e libs/community
|
||||
pydantic<2
|
||||
autodoc_pydantic==1.8.0
|
||||
myst_parser
|
||||
nbsphinx==0.8.9
|
||||
sphinx>=5
|
||||
sphinx-autobuild==2021.3.14
|
||||
sphinx_rtd_theme==1.0.0
|
||||
sphinx-typlog-theme==0.8.0
|
||||
sphinx-panels
|
||||
toml
|
||||
myst_nb
|
||||
sphinx_copybutton
|
||||
pydata-sphinx-theme==0.13.1
|
||||
autodoc_pydantic>=2,<3
|
||||
sphinx>=8,<9
|
||||
myst-parser>=3
|
||||
sphinx-autobuild>=2024
|
||||
pydata-sphinx-theme>=0.15
|
||||
toml>=0.10.2
|
||||
myst-nb>=1.1.1
|
||||
pyyaml
|
||||
sphinx-design
|
||||
sphinx-copybutton
|
||||
beautifulsoup4
|
||||
|
||||
44
docs/api_reference/scripts/custom_formatter.py
Normal file
44
docs/api_reference/scripts/custom_formatter.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import sys
|
||||
from glob import glob
|
||||
from pathlib import Path
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
CUR_DIR = Path(__file__).parents[1]
|
||||
|
||||
|
||||
def process_toc_h3_elements(html_content: str) -> str:
|
||||
"""Update Class.method() TOC headers to just method()."""
|
||||
# Create a BeautifulSoup object
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
|
||||
# Find all <li> elements with class "toc-h3"
|
||||
toc_h3_elements = soup.find_all("li", class_="toc-h3")
|
||||
|
||||
# Process each element
|
||||
for element in toc_h3_elements:
|
||||
try:
|
||||
element = element.a.code.span
|
||||
except Exception:
|
||||
continue
|
||||
# Get the text content of the element
|
||||
content = element.get_text()
|
||||
|
||||
# Apply the regex substitution
|
||||
modified_content = content.split(".")[-1]
|
||||
|
||||
# Update the element's content
|
||||
element.string = modified_content
|
||||
|
||||
# Return the modified HTML
|
||||
return str(soup)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
dir = sys.argv[1]
|
||||
for fn in glob(str(f"{dir.rstrip('/')}/**/*.html"), recursive=True):
|
||||
with open(fn, "r") as f:
|
||||
html = f.read()
|
||||
processed_html = process_toc_h3_elements(html)
|
||||
with open(fn, "w") as f:
|
||||
f.write(processed_html)
|
||||
@@ -1,4 +1,4 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ objname }}
|
||||
{{ underline }}==============
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
.. autosummary::
|
||||
{% for item in attributes %}
|
||||
~{{ name }}.{{ item }}
|
||||
~{{ item }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
@@ -22,11 +22,11 @@
|
||||
|
||||
.. autosummary::
|
||||
{% for item in methods %}
|
||||
~{{ name }}.{{ item }}
|
||||
~{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% for item in methods %}
|
||||
.. automethod:: {{ name }}.{{ item }}
|
||||
.. automethod:: {{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ objname }}
|
||||
{{ underline }}==============
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ objname }}
|
||||
{{ underline }}==============
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
12
docs/api_reference/templates/langchain_docs.html
Normal file
12
docs/api_reference/templates/langchain_docs.html
Normal file
@@ -0,0 +1,12 @@
|
||||
<!-- This will display a link to LangChain docs -->
|
||||
<head>
|
||||
<style>
|
||||
.text-link {
|
||||
text-decoration: none; /* Remove underline */
|
||||
color: inherit; /* Inherit color from parent element */
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<a href="https://python.langchain.com/" class='text-link'>Docs</a>
|
||||
</body>
|
||||
@@ -1,4 +1,4 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ objname }}
|
||||
{{ underline }}==============
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
@@ -15,7 +15,7 @@
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, model_construct, model_copy, model_dump, model_dump_json, model_parametrized_name, model_post_init, model_rebuild, model_validate, model_validate_json, model_validate_strings, model_extra, model_fields_set, model_json_schema
|
||||
|
||||
|
||||
{% block attributes %}
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ objname }}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autoclass:: {{ objname }}
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
|
||||
|
||||
{% block attributes %}
|
||||
{% if attributes %}
|
||||
.. rubric:: {{ _('Attributes') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in attributes %}
|
||||
~{{ name }}.{{ item }}
|
||||
~{{ item }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
@@ -26,11 +26,11 @@
|
||||
|
||||
.. autosummary::
|
||||
{% for item in methods %}
|
||||
~{{ name }}.{{ item }}
|
||||
~{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% for item in methods %}
|
||||
.. automethod:: {{ name }}.{{ item }}
|
||||
.. automethod:: {{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ objname }}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autopydantic_model:: {{ objname }}
|
||||
@@ -19,6 +15,10 @@
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign, as_tool, get_config_jsonschema, get_input_jsonschema, get_output_jsonschema, model_construct, model_copy, model_dump, model_dump_json, model_parametrized_name, model_post_init, model_rebuild, model_validate, model_validate_json, model_validate_strings, to_json, model_extra, model_fields_set, model_json_schema
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ objname }}
|
||||
{{ underline }}==============
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2007-2023 The scikit-learn developers.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@@ -1,67 +0,0 @@
|
||||
<script>
|
||||
$(document).ready(function() {
|
||||
/* Add a [>>>] button on the top-right corner of code samples to hide
|
||||
* the >>> and ... prompts and the output and thus make the code
|
||||
* copyable. */
|
||||
var div = $('.highlight-python .highlight,' +
|
||||
'.highlight-python3 .highlight,' +
|
||||
'.highlight-pycon .highlight,' +
|
||||
'.highlight-default .highlight')
|
||||
var pre = div.find('pre');
|
||||
|
||||
// get the styles from the current theme
|
||||
pre.parent().parent().css('position', 'relative');
|
||||
var hide_text = 'Hide prompts and outputs';
|
||||
var show_text = 'Show prompts and outputs';
|
||||
|
||||
// create and add the button to all the code blocks that contain >>>
|
||||
div.each(function(index) {
|
||||
var jthis = $(this);
|
||||
if (jthis.find('.gp').length > 0) {
|
||||
var button = $('<span class="copybutton">>>></span>');
|
||||
button.attr('title', hide_text);
|
||||
button.data('hidden', 'false');
|
||||
jthis.prepend(button);
|
||||
}
|
||||
// tracebacks (.gt) contain bare text elements that need to be
|
||||
// wrapped in a span to work with .nextUntil() (see later)
|
||||
jthis.find('pre:has(.gt)').contents().filter(function() {
|
||||
return ((this.nodeType == 3) && (this.data.trim().length > 0));
|
||||
}).wrap('<span>');
|
||||
});
|
||||
|
||||
// define the behavior of the button when it's clicked
|
||||
$('.copybutton').click(function(e){
|
||||
e.preventDefault();
|
||||
var button = $(this);
|
||||
if (button.data('hidden') === 'false') {
|
||||
// hide the code output
|
||||
button.parent().find('.go, .gp, .gt').hide();
|
||||
button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
|
||||
button.css('text-decoration', 'line-through');
|
||||
button.attr('title', show_text);
|
||||
button.data('hidden', 'true');
|
||||
} else {
|
||||
// show the code output
|
||||
button.parent().find('.go, .gp, .gt').show();
|
||||
button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
|
||||
button.css('text-decoration', 'none');
|
||||
button.attr('title', hide_text);
|
||||
button.data('hidden', 'false');
|
||||
}
|
||||
});
|
||||
|
||||
/*** Add permalink buttons next to glossary terms ***/
|
||||
$('dl.glossary > dt[id]').append(function() {
|
||||
return ('<a class="headerlink" href="#' +
|
||||
this.getAttribute('id') +
|
||||
'" title="Permalink to this term">¶</a>');
|
||||
});
|
||||
});
|
||||
|
||||
</script>
|
||||
{%- if pagename != 'index' and pagename != 'documentation' %}
|
||||
{% if theme_mathjax_path %}
|
||||
<script id="MathJax-script" async src="{{ theme_mathjax_path }}"></script>
|
||||
{% endif %}
|
||||
{%- endif %}
|
||||
@@ -1,132 +0,0 @@
|
||||
{# TEMPLATE VAR SETTINGS #}
|
||||
{%- set url_root = pathto('', 1) %}
|
||||
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
|
||||
{%- if not embedded and docstitle %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- else %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- endif %}
|
||||
{%- set lang_attr = 'en' %}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<!--[if IE 8]><html class="no-js lt-ie9" lang="{{ lang_attr }}" > <![endif]-->
|
||||
<!--[if gt IE 8]><!-->
|
||||
<html class="no-js" lang="{{ lang_attr }}"> <!--<![endif]-->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical"
|
||||
href="https://api.python.langchain.com/en/latest/{{ pagename }}.html"/>
|
||||
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
|
||||
<link rel="stylesheet"
|
||||
href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}"
|
||||
type="text/css"/>
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}"
|
||||
type="text/css"{% if css.title is not none %}
|
||||
title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css"/>
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css"/>
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}"
|
||||
src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
</head>
|
||||
<body>
|
||||
{% include "nav.html" %}
|
||||
{%- block content %}
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary"
|
||||
for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}"
|
||||
class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}"
|
||||
class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
</div>
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}
|
||||
© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}
|
||||
.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated
|
||||
on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}"
|
||||
rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{%- endblock %}
|
||||
<script src="{{ pathto('_static/js/vendor/bootstrap.min.js', 1) }}"></script>
|
||||
{% include "javascript.html" %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,78 +0,0 @@
|
||||
{%- if pagename != 'index' and pagename != 'documentation' %}
|
||||
{%- set nav_bar_class = "sk-docs-navbar" %}
|
||||
{%- set top_container_cls = "sk-docs-container" %}
|
||||
{%- else %}
|
||||
{%- set nav_bar_class = "sk-landing-navbar" %}
|
||||
{%- set top_container_cls = "sk-landing-container" %}
|
||||
{%- endif %}
|
||||
|
||||
<nav id="navbar" class="{{ nav_bar_class }} navbar navbar-expand-md navbar-light bg-light py-0">
|
||||
<div class="container-fluid {{ top_container_cls }} px-0">
|
||||
{%- if logo_url %}
|
||||
<a class="navbar-brand py-0" href="{{ pathto('index') }}">
|
||||
<img
|
||||
class="sk-brand-img"
|
||||
src="{{ logo_url|e }}"
|
||||
alt="logo"/>
|
||||
</a>
|
||||
{%- endif %}
|
||||
<button
|
||||
id="sk-navbar-toggler"
|
||||
class="navbar-toggler"
|
||||
type="button"
|
||||
data-toggle="collapse"
|
||||
data-target="#navbarSupportedContent"
|
||||
aria-controls="navbarSupportedContent"
|
||||
aria-expanded="false"
|
||||
aria-label="Toggle navigation"
|
||||
>
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
|
||||
<div class="sk-navbar-collapse collapse navbar-collapse" id="navbarSupportedContent">
|
||||
<ul class="navbar-nav mr-auto">
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link" href="{{ pathto('langchain_api_reference') }}">LangChain</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link" href="{{ pathto('core_api_reference') }}">Core</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link" href="{{ pathto('community_api_reference') }}">Community</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link" href="{{ pathto('experimental_api_reference') }}">Experimental</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link" href="{{ pathto('text_splitters_api_reference') }}">Text splitters</a>
|
||||
</li>
|
||||
{%- for title, pathname in partners %}
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link nav-more-item-mobile-items" href="{{ pathto(pathname) }}">{{ title }}</a>
|
||||
</li>
|
||||
{%- endfor %}
|
||||
<li class="nav-item dropdown nav-more-item-dropdown">
|
||||
<a class="sk-nav-link nav-link dropdown-toggle" href="#" id="navbarDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Partner libs</a>
|
||||
<div class="dropdown-menu" aria-labelledby="navbarDropdown">
|
||||
{%- for title, pathname in partners %}
|
||||
<a class="sk-nav-dropdown-item dropdown-item" href="{{ pathto(pathname) }}">{{ title }}</a>
|
||||
{%- endfor %}
|
||||
</div>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link" target="_blank" rel="noopener noreferrer" href="https://python.langchain.com/">Docs</a>
|
||||
</li>
|
||||
</ul>
|
||||
{%- if pagename != "search"%}
|
||||
<div id="searchbox" role="search">
|
||||
<div class="searchformwrapper">
|
||||
<form class="search" action="{{ pathto('search') }}" method="get">
|
||||
<input class="sk-search-text-input" type="text" name="q" aria-labelledby="searchlabel" />
|
||||
<input class="sk-search-text-btn" type="submit" value="{{ _('Go') }}" />
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
@@ -1,16 +0,0 @@
|
||||
{%- extends "basic/search.html" %}
|
||||
{% block extrahead %}
|
||||
<script type="text/javascript" src="{{ pathto('_static/underscore.js', 1) }}"></script>
|
||||
<script type="text/javascript" src="{{ pathto('searchindex.js', 1) }}" defer></script>
|
||||
<script type="text/javascript" src="{{ pathto('_static/doctools.js', 1) }}"></script>
|
||||
<script type="text/javascript" src="{{ pathto('_static/language_data.js', 1) }}"></script>
|
||||
<script type="text/javascript" src="{{ pathto('_static/searchtools.js', 1) }}"></script>
|
||||
<script type="text/javascript" src="{{ pathto('_static/sphinx_highlight.js', 1) }}"></script>
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function() {
|
||||
if (!Search.out) {
|
||||
Search.init();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
{% endblock %}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,8 +0,0 @@
|
||||
[theme]
|
||||
inherit = basic
|
||||
pygments_style = default
|
||||
stylesheet = css/theme.css
|
||||
|
||||
[options]
|
||||
link_to_live_contributing_page = false
|
||||
mathjax_path =
|
||||
1451
docs/data/people.yml
1451
docs/data/people.yml
File diff suppressed because it is too large
Load Diff
@@ -4,51 +4,90 @@ LangChain implements the latest research in the field of Natural Language Proces
|
||||
This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference,
|
||||
Templates, and Cookbooks.
|
||||
|
||||
From the opposite direction, scientists use LangChain in research and reference LangChain in the research papers.
|
||||
Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header).
|
||||
From the opposite direction, scientists use `LangChain` in research and reference it in the research papers.
|
||||
|
||||
`arXiv` papers with references to:
|
||||
[LangChain](https://arxiv.org/search/?query=langchain&searchtype=all&source=header) | [LangGraph](https://arxiv.org/search/?query=langgraph&searchtype=all&source=header) | [LangSmith](https://arxiv.org/search/?query=langsmith&searchtype=all&source=header)
|
||||
|
||||
## Summary
|
||||
|
||||
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation|
|
||||
|------------------|---------|-------------------|------------------------|
|
||||
| `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) | Pei Zhou, Jay Pujara, Xiang Ren, et al. | 2024-02-06 | `Cookbook:` [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
|
||||
| `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) | Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. | 2024-01-31 | `Cookbook:` [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
|
||||
| `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. | 2024-01-29 | `Cookbook:` [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
|
||||
| `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) | Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. | 2024-01-08 | `Cookbook:` [together_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
|
||||
| `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen, et al. | 2023-12-11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
|
||||
| `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. | 2023-11-15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
|
||||
| `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang, et al. | 2023-10-17 | `Cookbook:` [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
|
||||
| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023-10-09 | `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting), `Cookbook:` [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
|
||||
| `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) | Hugo Touvron, Louis Martin, Kevin Stone, et al. | 2023-07-18 | `Cookbook:` [Semi_Structured_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
|
||||
| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023-05-23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read), `Cookbook:` [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
|
||||
| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot), `Cookbook:` [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
|
||||
| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023-05-06 | `Cookbook:` [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
|
||||
| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023-04-17 | `Cookbook:` [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)
|
||||
| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
|
||||
| `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023-03-31 | `Cookbook:` [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
|
||||
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
|
||||
| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), `Cookbook:` [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022-10-06 | `Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), `API:` [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
|
||||
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
| `2403.14403v2` [Adaptive-RAG: Learning to Adapt Retrieval-Augmented Large Language Models through Question Complexity](http://arxiv.org/abs/2403.14403v2) | Soyeong Jeong, Jinheon Baek, Sukmin Cho, et al. | 2024‑03‑21 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
| `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) | Pei Zhou, Jay Pujara, Xiang Ren, et al. | 2024‑02‑06 | `Cookbook:` [Self-Discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
|
||||
| `2402.03367v2` [RAG-Fusion: a New Take on Retrieval-Augmented Generation](http://arxiv.org/abs/2402.03367v2) | Zackary Rackauckas | 2024‑01‑31 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
| `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) | Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. | 2024‑01‑31 | `Cookbook:` [Raptor](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
|
||||
| `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. | 2024‑01‑29 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Cookbook:` [Langgraph Crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
|
||||
| `2401.08500v1` [Code Generation with AlphaCodium: From Prompt Engineering to Flow Engineering](http://arxiv.org/abs/2401.08500v1) | Tal Ridnik, Dedy Kredo, Itamar Friedman | 2024‑01‑16 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
| `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) | Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. | 2024‑01‑08 | `Cookbook:` [Together Ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
|
||||
| `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen, et al. | 2023‑12‑11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
|
||||
| `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. | 2023‑11‑15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
|
||||
| `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang, et al. | 2023‑10‑17 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Cookbook:` [Langgraph Self Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
|
||||
| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023‑10‑09 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting), `Cookbook:` [Stepback-Qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
|
||||
| `2307.15337v3` [Skeleton-of-Thought: Prompting LLMs for Efficient Parallel Generation](http://arxiv.org/abs/2307.15337v3) | Xuefei Ning, Zinan Lin, Zixuan Zhou, et al. | 2023‑07‑28 | `Template:` [skeleton-of-thought](https://python.langchain.com/docs/templates/skeleton-of-thought)
|
||||
| `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) | Hugo Touvron, Louis Martin, Kevin Stone, et al. | 2023‑07‑18 | `Cookbook:` [Semi Structured Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
|
||||
| `2307.03172v3` [Lost in the Middle: How Language Models Use Long Contexts](http://arxiv.org/abs/2307.03172v3) | Nelson F. Liu, Kevin Lin, John Hewitt, et al. | 2023‑07‑06 | `Docs:` [docs/how_to/long_context_reorder](https://python.langchain.com/docs/how_to/long_context_reorder)
|
||||
| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023‑05‑23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read), `Cookbook:` [Rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
|
||||
| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023‑05‑15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot), `Cookbook:` [Tree Of Thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
|
||||
| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023‑05‑06 | `Cookbook:` [Plan And Execute Agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
|
||||
| `2305.02156v1` [Zero-Shot Listwise Document Reranking with a Large Language Model](http://arxiv.org/abs/2305.02156v1) | Xueguang Ma, Xinyu Zhang, Ronak Pradeep, et al. | 2023‑05‑03 | `Docs:` [docs/how_to/contextual_compression](https://python.langchain.com/docs/how_to/contextual_compression), `API:` [langchain...LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html#langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank)
|
||||
| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023‑04‑17 | `Cookbook:` [Semi Structured Multi Modal Rag Llama2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb), [Semi Structured And Multi Modal Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)
|
||||
| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023‑04‑07 | `Cookbook:` [Generative Agents Interactive Simulacra Of Human Behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb), [Multiagent Bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)
|
||||
| `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023‑03‑31 | `Cookbook:` [Camel Role Playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
|
||||
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023‑03‑30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [Hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023‑01‑24 | `API:` [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022‑12‑20 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [Hypothetical Document Embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
| `2212.08073v1` [Constitutional AI: Harmlessness from AI Feedback](http://arxiv.org/abs/2212.08073v1) | Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al. | 2022‑12‑15 | `Docs:` [docs/versions/migrating_chains/constitutional_chain](https://python.langchain.com/docs/versions/migrating_chains/constitutional_chain)
|
||||
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022‑12‑12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022‑11‑25 | `API:` [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022‑11‑18 | `API:` [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), `Cookbook:` [Program Aided Language Model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
| `2210.11934v2` [An Analysis of Fusion Functions for Hybrid Retrieval](http://arxiv.org/abs/2210.11934v2) | Sebastian Bruch, Siyu Gai, Amir Ingber | 2022‑10‑21 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022‑10‑06 | `Docs:` [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/concepts](https://python.langchain.com/docs/concepts), `API:` [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
|
||||
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022‑09‑22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
| `2205.13147v4` [Matryoshka Representation Learning](http://arxiv.org/abs/2205.13147v4) | Aditya Kusupati, Gantavya Bhatt, Aniket Rege, et al. | 2022‑05‑26 | `Docs:` [docs/integrations/providers/snowflake](https://python.langchain.com/docs/integrations/providers/snowflake)
|
||||
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022‑05‑25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022‑03‑15 | `Docs:` [docs/tutorials/sql_qa](https://python.langchain.com/docs/tutorials/sql_qa), `API:` [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022‑02‑01 | `API:` [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `2112.01488v3` [ColBERTv2: Effective and Efficient Retrieval via Lightweight Late Interaction](http://arxiv.org/abs/2112.01488v3) | Keshav Santhanam, Omar Khattab, Jon Saad-Falcon, et al. | 2021‑12‑02 | `Docs:` [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/docs/integrations/providers/dspy)
|
||||
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021‑02‑26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
| `2005.14165v4` [Language Models are Few-Shot Learners](http://arxiv.org/abs/2005.14165v4) | Tom B. Brown, Benjamin Mann, Nick Ryder, et al. | 2020‑05‑28 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
| `2005.11401v4` [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](http://arxiv.org/abs/2005.11401v4) | Patrick Lewis, Ethan Perez, Aleksandra Piktus, et al. | 2020‑05‑22 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019‑09‑11 | `API:` [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
|
||||
## Adaptive-RAG: Learning to Adapt Retrieval-Augmented Large Language Models through Question Complexity
|
||||
|
||||
- **Authors:** Soyeong Jeong, Jinheon Baek, Sukmin Cho, et al.
|
||||
- **arXiv id:** [2403.14403v2](http://arxiv.org/abs/2403.14403v2) **Published Date:** 2024-03-21
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
|
||||
**Abstract:** Retrieval-Augmented Large Language Models (LLMs), which incorporate the
|
||||
non-parametric knowledge from external knowledge bases into LLMs, have emerged
|
||||
as a promising approach to enhancing response accuracy in several tasks, such
|
||||
as Question-Answering (QA). However, even though there are various approaches
|
||||
dealing with queries of different complexities, they either handle simple
|
||||
queries with unnecessary computational overhead or fail to adequately address
|
||||
complex multi-step queries; yet, not all user requests fall into only one of
|
||||
the simple or complex categories. In this work, we propose a novel adaptive QA
|
||||
framework, that can dynamically select the most suitable strategy for
|
||||
(retrieval-augmented) LLMs from the simplest to the most sophisticated ones
|
||||
based on the query complexity. Also, this selection process is operationalized
|
||||
with a classifier, which is a smaller LM trained to predict the complexity
|
||||
level of incoming queries with automatically collected labels, obtained from
|
||||
actual predicted outcomes of models and inherent inductive biases in datasets.
|
||||
This approach offers a balanced strategy, seamlessly adapting between the
|
||||
iterative and single-step retrieval-augmented LLMs, as well as the no-retrieval
|
||||
methods, in response to a range of query complexities. We validate our model on
|
||||
a set of open-domain QA datasets, covering multiple query complexities, and
|
||||
show that ours enhances the overall efficiency and accuracy of QA systems,
|
||||
compared to relevant baselines including the adaptive retrieval approaches.
|
||||
Code is available at: https://github.com/starsuzi/Adaptive-RAG.
|
||||
|
||||
## Self-Discover: Large Language Models Self-Compose Reasoning Structures
|
||||
|
||||
- **arXiv id:** 2402.03620v1
|
||||
- **Title:** Self-Discover: Large Language Models Self-Compose Reasoning Structures
|
||||
- **Authors:** Pei Zhou, Jay Pujara, Xiang Ren, et al.
|
||||
- **Published Date:** 2024-02-06
|
||||
- **URL:** http://arxiv.org/abs/2402.03620v1
|
||||
- **arXiv id:** [2402.03620v1](http://arxiv.org/abs/2402.03620v1) **Published Date:** 2024-02-06
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
|
||||
@@ -68,13 +107,33 @@ the self-discovered reasoning structures are universally applicable across
|
||||
model families: from PaLM 2-L to GPT-4, and from GPT-4 to Llama2, and share
|
||||
commonalities with human reasoning patterns.
|
||||
|
||||
## RAG-Fusion: a New Take on Retrieval-Augmented Generation
|
||||
|
||||
- **Authors:** Zackary Rackauckas
|
||||
- **arXiv id:** [2402.03367v2](http://arxiv.org/abs/2402.03367v2) **Published Date:** 2024-01-31
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
|
||||
**Abstract:** Infineon has identified a need for engineers, account managers, and customers
|
||||
to rapidly obtain product information. This problem is traditionally addressed
|
||||
with retrieval-augmented generation (RAG) chatbots, but in this study, I
|
||||
evaluated the use of the newly popularized RAG-Fusion method. RAG-Fusion
|
||||
combines RAG and reciprocal rank fusion (RRF) by generating multiple queries,
|
||||
reranking them with reciprocal scores and fusing the documents and scores.
|
||||
Through manually evaluating answers on accuracy, relevance, and
|
||||
comprehensiveness, I found that RAG-Fusion was able to provide accurate and
|
||||
comprehensive answers due to the generated queries contextualizing the original
|
||||
query from various perspectives. However, some answers strayed off topic when
|
||||
the generated queries' relevance to the original query is insufficient. This
|
||||
research marks significant progress in artificial intelligence (AI) and natural
|
||||
language processing (NLP) applications and demonstrates transformations in a
|
||||
global and multi-industry context.
|
||||
|
||||
## RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval
|
||||
|
||||
- **arXiv id:** 2401.18059v1
|
||||
- **Title:** RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval
|
||||
- **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli, et al.
|
||||
- **Published Date:** 2024-01-31
|
||||
- **URL:** http://arxiv.org/abs/2401.18059v1
|
||||
- **arXiv id:** [2401.18059v1](http://arxiv.org/abs/2401.18059v1) **Published Date:** 2024-01-31
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
|
||||
@@ -96,13 +155,11 @@ benchmark by 20% in absolute accuracy.
|
||||
|
||||
## Corrective Retrieval Augmented Generation
|
||||
|
||||
- **arXiv id:** 2401.15884v2
|
||||
- **Title:** Corrective Retrieval Augmented Generation
|
||||
- **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al.
|
||||
- **Published Date:** 2024-01-29
|
||||
- **URL:** http://arxiv.org/abs/2401.15884v2
|
||||
- **arXiv id:** [2401.15884v2](http://arxiv.org/abs/2401.15884v2) **Published Date:** 2024-01-29
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
- **Cookbook:** [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
|
||||
|
||||
**Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the
|
||||
@@ -124,13 +181,36 @@ RAG-based approaches. Experiments on four datasets covering short- and
|
||||
long-form generation tasks show that CRAG can significantly improve the
|
||||
performance of RAG-based approaches.
|
||||
|
||||
## Code Generation with AlphaCodium: From Prompt Engineering to Flow Engineering
|
||||
|
||||
- **Authors:** Tal Ridnik, Dedy Kredo, Itamar Friedman
|
||||
- **arXiv id:** [2401.08500v1](http://arxiv.org/abs/2401.08500v1) **Published Date:** 2024-01-16
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
|
||||
**Abstract:** Code generation problems differ from common natural language problems - they
|
||||
require matching the exact syntax of the target language, identifying happy
|
||||
paths and edge cases, paying attention to numerous small details in the problem
|
||||
spec, and addressing other code-specific issues and requirements. Hence, many
|
||||
of the optimizations and tricks that have been successful in natural language
|
||||
generation may not be effective for code tasks. In this work, we propose a new
|
||||
approach to code generation by LLMs, which we call AlphaCodium - a test-based,
|
||||
multi-stage, code-oriented iterative flow, that improves the performances of
|
||||
LLMs on code problems. We tested AlphaCodium on a challenging code generation
|
||||
dataset called CodeContests, which includes competitive programming problems
|
||||
from platforms such as Codeforces. The proposed flow consistently and
|
||||
significantly improves results. On the validation set, for example, GPT-4
|
||||
accuracy (pass@5) increased from 19% with a single well-designed direct prompt
|
||||
to 44% with the AlphaCodium flow. Many of the principles and best practices
|
||||
acquired in this work, we believe, are broadly applicable to general code
|
||||
generation tasks. Full implementation is available at:
|
||||
https://github.com/Codium-ai/AlphaCodium
|
||||
|
||||
## Mixtral of Experts
|
||||
|
||||
- **arXiv id:** 2401.04088v1
|
||||
- **Title:** Mixtral of Experts
|
||||
- **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al.
|
||||
- **Published Date:** 2024-01-08
|
||||
- **URL:** http://arxiv.org/abs/2401.04088v1
|
||||
- **arXiv id:** [2401.04088v1](http://arxiv.org/abs/2401.04088v1) **Published Date:** 2024-01-08
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [together_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
|
||||
@@ -152,11 +232,8 @@ the base and instruct models are released under the Apache 2.0 license.
|
||||
|
||||
## Dense X Retrieval: What Retrieval Granularity Should We Use?
|
||||
|
||||
- **arXiv id:** 2312.06648v2
|
||||
- **Title:** Dense X Retrieval: What Retrieval Granularity Should We Use?
|
||||
- **Authors:** Tong Chen, Hongwei Wang, Sihao Chen, et al.
|
||||
- **Published Date:** 2023-12-11
|
||||
- **URL:** http://arxiv.org/abs/2312.06648v2
|
||||
- **arXiv id:** [2312.06648v2](http://arxiv.org/abs/2312.06648v2) **Published Date:** 2023-12-11
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
|
||||
@@ -181,11 +258,8 @@ information.
|
||||
|
||||
## Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
|
||||
|
||||
- **arXiv id:** 2311.09210v1
|
||||
- **Title:** Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
|
||||
- **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al.
|
||||
- **Published Date:** 2023-11-15
|
||||
- **URL:** http://arxiv.org/abs/2311.09210v1
|
||||
- **arXiv id:** [2311.09210v1](http://arxiv.org/abs/2311.09210v1) **Published Date:** 2023-11-15
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
|
||||
@@ -215,13 +289,11 @@ outside the pre-training knowledge scope.
|
||||
|
||||
## Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection
|
||||
|
||||
- **arXiv id:** 2310.11511v1
|
||||
- **Title:** Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection
|
||||
- **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang, et al.
|
||||
- **Published Date:** 2023-10-17
|
||||
- **URL:** http://arxiv.org/abs/2310.11511v1
|
||||
- **arXiv id:** [2310.11511v1](http://arxiv.org/abs/2310.11511v1) **Published Date:** 2023-10-17
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
- **Cookbook:** [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
|
||||
|
||||
**Abstract:** Despite their remarkable capabilities, large language models (LLMs) often
|
||||
@@ -248,13 +320,11 @@ to these models.
|
||||
|
||||
## Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
|
||||
|
||||
- **arXiv id:** 2310.06117v2
|
||||
- **Title:** Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
|
||||
- **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al.
|
||||
- **Published Date:** 2023-10-09
|
||||
- **URL:** http://arxiv.org/abs/2310.06117v2
|
||||
- **arXiv id:** [2310.06117v2](http://arxiv.org/abs/2310.06117v2) **Published Date:** 2023-10-09
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
- **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)
|
||||
- **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
|
||||
|
||||
@@ -269,13 +339,31 @@ including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back
|
||||
Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7%
|
||||
and 11% respectively, TimeQA by 27%, and MuSiQue by 7%.
|
||||
|
||||
## Skeleton-of-Thought: Prompting LLMs for Efficient Parallel Generation
|
||||
|
||||
- **Authors:** Xuefei Ning, Zinan Lin, Zixuan Zhou, et al.
|
||||
- **arXiv id:** [2307.15337v3](http://arxiv.org/abs/2307.15337v3) **Published Date:** 2023-07-28
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [skeleton-of-thought](https://python.langchain.com/docs/templates/skeleton-of-thought)
|
||||
|
||||
**Abstract:** This work aims at decreasing the end-to-end generation latency of large
|
||||
language models (LLMs). One of the major causes of the high generation latency
|
||||
is the sequential decoding approach adopted by almost all state-of-the-art
|
||||
LLMs. In this work, motivated by the thinking and writing process of humans, we
|
||||
propose Skeleton-of-Thought (SoT), which first guides LLMs to generate the
|
||||
skeleton of the answer, and then conducts parallel API calls or batched
|
||||
decoding to complete the contents of each skeleton point in parallel. Not only
|
||||
does SoT provide considerable speed-ups across 12 LLMs, but it can also
|
||||
potentially improve the answer quality on several question categories. SoT is
|
||||
an initial attempt at data-centric optimization for inference efficiency, and
|
||||
showcases the potential of eliciting high-quality answers by explicitly
|
||||
planning the answer structure in language.
|
||||
|
||||
## Llama 2: Open Foundation and Fine-Tuned Chat Models
|
||||
|
||||
- **arXiv id:** 2307.09288v2
|
||||
- **Title:** Llama 2: Open Foundation and Fine-Tuned Chat Models
|
||||
- **Authors:** Hugo Touvron, Louis Martin, Kevin Stone, et al.
|
||||
- **Published Date:** 2023-07-18
|
||||
- **URL:** http://arxiv.org/abs/2307.09288v2
|
||||
- **arXiv id:** [2307.09288v2](http://arxiv.org/abs/2307.09288v2) **Published Date:** 2023-07-18
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [Semi_Structured_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
|
||||
@@ -290,13 +378,32 @@ detailed description of our approach to fine-tuning and safety improvements of
|
||||
Llama 2-Chat in order to enable the community to build on our work and
|
||||
contribute to the responsible development of LLMs.
|
||||
|
||||
## Lost in the Middle: How Language Models Use Long Contexts
|
||||
|
||||
- **Authors:** Nelson F. Liu, Kevin Lin, John Hewitt, et al.
|
||||
- **arXiv id:** [2307.03172v3](http://arxiv.org/abs/2307.03172v3) **Published Date:** 2023-07-06
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/how_to/long_context_reorder](https://python.langchain.com/docs/how_to/long_context_reorder)
|
||||
|
||||
**Abstract:** While recent language models have the ability to take long contexts as input,
|
||||
relatively little is known about how well they use longer context. We analyze
|
||||
the performance of language models on two tasks that require identifying
|
||||
relevant information in their input contexts: multi-document question answering
|
||||
and key-value retrieval. We find that performance can degrade significantly
|
||||
when changing the position of relevant information, indicating that current
|
||||
language models do not robustly make use of information in long input contexts.
|
||||
In particular, we observe that performance is often highest when relevant
|
||||
information occurs at the beginning or end of the input context, and
|
||||
significantly degrades when models must access relevant information in the
|
||||
middle of long contexts, even for explicitly long-context models. Our analysis
|
||||
provides a better understanding of how language models use their input context
|
||||
and provides new evaluation protocols for future long-context language models.
|
||||
|
||||
## Query Rewriting for Retrieval-Augmented Large Language Models
|
||||
|
||||
- **arXiv id:** 2305.14283v3
|
||||
- **Title:** Query Rewriting for Retrieval-Augmented Large Language Models
|
||||
- **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He, et al.
|
||||
- **Published Date:** 2023-05-23
|
||||
- **URL:** http://arxiv.org/abs/2305.14283v3
|
||||
- **arXiv id:** [2305.14283v3](http://arxiv.org/abs/2305.14283v3) **Published Date:** 2023-05-23
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)
|
||||
@@ -322,11 +429,8 @@ for retrieval-augmented LLM.
|
||||
|
||||
## Large Language Model Guided Tree-of-Thought
|
||||
|
||||
- **arXiv id:** 2305.08291v1
|
||||
- **Title:** Large Language Model Guided Tree-of-Thought
|
||||
- **Authors:** Jieyi Long
|
||||
- **Published Date:** 2023-05-15
|
||||
- **URL:** http://arxiv.org/abs/2305.08291v1
|
||||
- **arXiv id:** [2305.08291v1](http://arxiv.org/abs/2305.08291v1) **Published Date:** 2023-05-15
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)
|
||||
@@ -352,11 +456,8 @@ implementation of the ToT-based Sudoku solver is available on GitHub:
|
||||
|
||||
## Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models
|
||||
|
||||
- **arXiv id:** 2305.04091v3
|
||||
- **Title:** Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models
|
||||
- **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan, et al.
|
||||
- **Published Date:** 2023-05-06
|
||||
- **URL:** http://arxiv.org/abs/2305.04091v3
|
||||
- **arXiv id:** [2305.04091v3](http://arxiv.org/abs/2305.04091v3) **Published Date:** 2023-05-06
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
|
||||
@@ -383,16 +484,37 @@ Prompting, and has comparable performance with 8-shot CoT prompting on the math
|
||||
reasoning problem. The code can be found at
|
||||
https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting.
|
||||
|
||||
## Visual Instruction Tuning
|
||||
## Zero-Shot Listwise Document Reranking with a Large Language Model
|
||||
|
||||
- **arXiv id:** 2304.08485v2
|
||||
- **Title:** Visual Instruction Tuning
|
||||
- **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al.
|
||||
- **Published Date:** 2023-04-17
|
||||
- **URL:** http://arxiv.org/abs/2304.08485v2
|
||||
- **Authors:** Xueguang Ma, Xinyu Zhang, Ronak Pradeep, et al.
|
||||
- **arXiv id:** [2305.02156v1](http://arxiv.org/abs/2305.02156v1) **Published Date:** 2023-05-03
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)
|
||||
- **Documentation:** [docs/how_to/contextual_compression](https://python.langchain.com/docs/how_to/contextual_compression)
|
||||
- **API Reference:** [langchain...LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html#langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank)
|
||||
|
||||
**Abstract:** Supervised ranking methods based on bi-encoder or cross-encoder architectures
|
||||
have shown success in multi-stage text ranking tasks, but they require large
|
||||
amounts of relevance judgments as training data. In this work, we propose
|
||||
Listwise Reranker with a Large Language Model (LRL), which achieves strong
|
||||
reranking effectiveness without using any task-specific training data.
|
||||
Different from the existing pointwise ranking methods, where documents are
|
||||
scored independently and ranked according to the scores, LRL directly generates
|
||||
a reordered list of document identifiers given the candidate documents.
|
||||
Experiments on three TREC web search datasets demonstrate that LRL not only
|
||||
outperforms zero-shot pointwise methods when reranking first-stage retrieval
|
||||
results, but can also act as a final-stage reranker to improve the top-ranked
|
||||
results of a pointwise method for improved efficiency. Additionally, we apply
|
||||
our approach to subsets of MIRACL, a recent multilingual retrieval dataset,
|
||||
with results showing its potential to generalize across different languages.
|
||||
|
||||
## Visual Instruction Tuning
|
||||
|
||||
- **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al.
|
||||
- **arXiv id:** [2304.08485v2](http://arxiv.org/abs/2304.08485v2) **Published Date:** 2023-04-17
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb), [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)
|
||||
|
||||
**Abstract:** Instruction tuning large language models (LLMs) using machine-generated
|
||||
instruction-following data has improved zero-shot capabilities on new tasks,
|
||||
@@ -412,14 +534,11 @@ publicly available.
|
||||
|
||||
## Generative Agents: Interactive Simulacra of Human Behavior
|
||||
|
||||
- **arXiv id:** 2304.03442v2
|
||||
- **Title:** Generative Agents: Interactive Simulacra of Human Behavior
|
||||
- **Authors:** Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al.
|
||||
- **Published Date:** 2023-04-07
|
||||
- **URL:** http://arxiv.org/abs/2304.03442v2
|
||||
- **arXiv id:** [2304.03442v2](http://arxiv.org/abs/2304.03442v2) **Published Date:** 2023-04-07
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
|
||||
- **Cookbook:** [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb), [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)
|
||||
|
||||
**Abstract:** Believable proxies of human behavior can empower interactive applications
|
||||
ranging from immersive environments to rehearsal spaces for interpersonal
|
||||
@@ -448,11 +567,8 @@ interaction patterns for enabling believable simulations of human behavior.
|
||||
|
||||
## CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society
|
||||
|
||||
- **arXiv id:** 2303.17760v2
|
||||
- **Title:** CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society
|
||||
- **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al.
|
||||
- **Published Date:** 2023-03-31
|
||||
- **URL:** http://arxiv.org/abs/2303.17760v2
|
||||
- **arXiv id:** [2303.17760v2](http://arxiv.org/abs/2303.17760v2) **Published Date:** 2023-03-31
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
|
||||
@@ -478,11 +594,8 @@ agents and beyond: https://github.com/camel-ai/camel.
|
||||
|
||||
## HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
|
||||
|
||||
- **arXiv id:** 2303.17580v4
|
||||
- **Title:** HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
|
||||
- **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.
|
||||
- **Published Date:** 2023-03-30
|
||||
- **URL:** http://arxiv.org/abs/2303.17580v4
|
||||
- **arXiv id:** [2303.17580v4](http://arxiv.org/abs/2303.17580v4) **Published Date:** 2023-03-30
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)
|
||||
@@ -508,40 +621,13 @@ modalities and domains and achieve impressive results in language, vision,
|
||||
speech, and other challenging tasks, which paves a new way towards the
|
||||
realization of artificial general intelligence.
|
||||
|
||||
## GPT-4 Technical Report
|
||||
|
||||
- **arXiv id:** 2303.08774v6
|
||||
- **Title:** GPT-4 Technical Report
|
||||
- **Authors:** OpenAI, Josh Achiam, Steven Adler, et al.
|
||||
- **Published Date:** 2023-03-15
|
||||
- **URL:** http://arxiv.org/abs/2303.08774v6
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
|
||||
**Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can
|
||||
accept image and text inputs and produce text outputs. While less capable than
|
||||
humans in many real-world scenarios, GPT-4 exhibits human-level performance on
|
||||
various professional and academic benchmarks, including passing a simulated bar
|
||||
exam with a score around the top 10% of test takers. GPT-4 is a
|
||||
Transformer-based model pre-trained to predict the next token in a document.
|
||||
The post-training alignment process results in improved performance on measures
|
||||
of factuality and adherence to desired behavior. A core component of this
|
||||
project was developing infrastructure and optimization methods that behave
|
||||
predictably across a wide range of scales. This allowed us to accurately
|
||||
predict some aspects of GPT-4's performance based on models trained with no
|
||||
more than 1/1,000th the compute of GPT-4.
|
||||
|
||||
## A Watermark for Large Language Models
|
||||
|
||||
- **arXiv id:** 2301.10226v4
|
||||
- **Title:** A Watermark for Large Language Models
|
||||
- **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.
|
||||
- **Published Date:** 2023-01-24
|
||||
- **URL:** http://arxiv.org/abs/2301.10226v4
|
||||
- **arXiv id:** [2301.10226v4](http://arxiv.org/abs/2301.10226v4) **Published Date:** 2023-01-24
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
- **API Reference:** [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
|
||||
**Abstract:** Potential harms of large language models can be mitigated by watermarking
|
||||
model output, i.e., embedding signals into generated text that are invisible to
|
||||
@@ -559,13 +645,11 @@ family, and discuss robustness and security.
|
||||
|
||||
## Precise Zero-Shot Dense Retrieval without Relevance Labels
|
||||
|
||||
- **arXiv id:** 2212.10496v1
|
||||
- **Title:** Precise Zero-Shot Dense Retrieval without Relevance Labels
|
||||
- **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.
|
||||
- **Published Date:** 2022-12-20
|
||||
- **URL:** http://arxiv.org/abs/2212.10496v1
|
||||
- **arXiv id:** [2212.10496v1](http://arxiv.org/abs/2212.10496v1) **Published Date:** 2022-12-20
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
- **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
|
||||
- **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)
|
||||
- **Cookbook:** [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
@@ -588,13 +672,37 @@ state-of-the-art unsupervised dense retriever Contriever and shows strong
|
||||
performance comparable to fine-tuned retrievers, across various tasks (e.g. web
|
||||
search, QA, fact verification) and languages~(e.g. sw, ko, ja).
|
||||
|
||||
## Constitutional AI: Harmlessness from AI Feedback
|
||||
|
||||
- **Authors:** Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al.
|
||||
- **arXiv id:** [2212.08073v1](http://arxiv.org/abs/2212.08073v1) **Published Date:** 2022-12-15
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/versions/migrating_chains/constitutional_chain](https://python.langchain.com/docs/versions/migrating_chains/constitutional_chain)
|
||||
|
||||
**Abstract:** As AI systems become more capable, we would like to enlist their help to
|
||||
supervise other AIs. We experiment with methods for training a harmless AI
|
||||
assistant through self-improvement, without any human labels identifying
|
||||
harmful outputs. The only human oversight is provided through a list of rules
|
||||
or principles, and so we refer to the method as 'Constitutional AI'. The
|
||||
process involves both a supervised learning and a reinforcement learning phase.
|
||||
In the supervised phase we sample from an initial model, then generate
|
||||
self-critiques and revisions, and then finetune the original model on revised
|
||||
responses. In the RL phase, we sample from the finetuned model, use a model to
|
||||
evaluate which of the two samples is better, and then train a preference model
|
||||
from this dataset of AI preferences. We then train with RL using the preference
|
||||
model as the reward signal, i.e. we use 'RL from AI Feedback' (RLAIF). As a
|
||||
result we are able to train a harmless but non-evasive AI assistant that
|
||||
engages with harmful queries by explaining its objections to them. Both the SL
|
||||
and RL methods can leverage chain-of-thought style reasoning to improve the
|
||||
human-judged performance and transparency of AI decision making. These methods
|
||||
make it possible to control AI behavior more precisely and with far fewer human
|
||||
labels.
|
||||
|
||||
## Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
|
||||
|
||||
- **arXiv id:** 2212.07425v3
|
||||
- **Title:** Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
|
||||
- **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.
|
||||
- **Published Date:** 2022-12-12
|
||||
- **URL:** http://arxiv.org/abs/2212.07425v3
|
||||
- **arXiv id:** [2212.07425v3](http://arxiv.org/abs/2212.07425v3) **Published Date:** 2022-12-12
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
@@ -623,11 +731,8 @@ further work on logical fallacy identification.
|
||||
|
||||
## Complementary Explanations for Effective In-Context Learning
|
||||
|
||||
- **arXiv id:** 2211.13892v2
|
||||
- **Title:** Complementary Explanations for Effective In-Context Learning
|
||||
- **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.
|
||||
- **Published Date:** 2022-11-25
|
||||
- **URL:** http://arxiv.org/abs/2211.13892v2
|
||||
- **arXiv id:** [2211.13892v2](http://arxiv.org/abs/2211.13892v2) **Published Date:** 2022-11-25
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
@@ -651,14 +756,11 @@ performance across three real-world tasks on multiple LLMs.
|
||||
|
||||
## PAL: Program-aided Language Models
|
||||
|
||||
- **arXiv id:** 2211.10435v2
|
||||
- **Title:** PAL: Program-aided Language Models
|
||||
- **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.
|
||||
- **Published Date:** 2022-11-18
|
||||
- **URL:** http://arxiv.org/abs/2211.10435v2
|
||||
- **arXiv id:** [2211.10435v2](http://arxiv.org/abs/2211.10435v2) **Published Date:** 2022-11-18
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
|
||||
- **API Reference:** [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)
|
||||
- **Cookbook:** [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability
|
||||
@@ -684,16 +786,32 @@ accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B
|
||||
which uses chain-of-thought by absolute 15% top-1. Our code and data are
|
||||
publicly available at http://reasonwithpal.com/ .
|
||||
|
||||
## ReAct: Synergizing Reasoning and Acting in Language Models
|
||||
## An Analysis of Fusion Functions for Hybrid Retrieval
|
||||
|
||||
- **arXiv id:** 2210.03629v3
|
||||
- **Title:** ReAct: Synergizing Reasoning and Acting in Language Models
|
||||
- **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.
|
||||
- **Published Date:** 2022-10-06
|
||||
- **URL:** http://arxiv.org/abs/2210.03629v3
|
||||
- **Authors:** Sebastian Bruch, Siyu Gai, Amir Ingber
|
||||
- **arXiv id:** [2210.11934v2](http://arxiv.org/abs/2210.11934v2) **Published Date:** 2022-10-21
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)
|
||||
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
|
||||
**Abstract:** We study hybrid search in text retrieval where lexical and semantic search
|
||||
are fused together with the intuition that the two are complementary in how
|
||||
they model relevance. In particular, we examine fusion by a convex combination
|
||||
(CC) of lexical and semantic scores, as well as the Reciprocal Rank Fusion
|
||||
(RRF) method, and identify their advantages and potential pitfalls. Contrary to
|
||||
existing studies, we find RRF to be sensitive to its parameters; that the
|
||||
learning of a CC fusion is generally agnostic to the choice of score
|
||||
normalization; that CC outperforms RRF in in-domain and out-of-domain settings;
|
||||
and finally, that CC is sample efficient, requiring only a small set of
|
||||
training examples to tune its only parameter to a target domain.
|
||||
|
||||
## ReAct: Synergizing Reasoning and Acting in Language Models
|
||||
|
||||
- **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.
|
||||
- **arXiv id:** [2210.03629v3](http://arxiv.org/abs/2210.03629v3) **Published Date:** 2022-10-06
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
- **API Reference:** [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
|
||||
|
||||
**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities
|
||||
@@ -721,11 +839,8 @@ Project site with code: https://react-lm.github.io
|
||||
|
||||
## Deep Lake: a Lakehouse for Deep Learning
|
||||
|
||||
- **arXiv id:** 2209.10785v2
|
||||
- **Title:** Deep Lake: a Lakehouse for Deep Learning
|
||||
- **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.
|
||||
- **Published Date:** 2022-09-22
|
||||
- **URL:** http://arxiv.org/abs/2209.10785v2
|
||||
- **arXiv id:** [2209.10785v2](http://arxiv.org/abs/2209.10785v2) **Published Date:** 2022-09-22
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
@@ -747,13 +862,41 @@ visualization engine, or (c) deep learning frameworks without sacrificing GPU
|
||||
utilization. Datasets stored in Deep Lake can be accessed from PyTorch,
|
||||
TensorFlow, JAX, and integrate with numerous MLOps tools.
|
||||
|
||||
## Matryoshka Representation Learning
|
||||
|
||||
- **Authors:** Aditya Kusupati, Gantavya Bhatt, Aniket Rege, et al.
|
||||
- **arXiv id:** [2205.13147v4](http://arxiv.org/abs/2205.13147v4) **Published Date:** 2022-05-26
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/providers/snowflake](https://python.langchain.com/docs/integrations/providers/snowflake)
|
||||
|
||||
**Abstract:** Learned representations are a central component in modern ML systems, serving
|
||||
a multitude of downstream tasks. When training such representations, it is
|
||||
often the case that computational and statistical constraints for each
|
||||
downstream task are unknown. In this context rigid, fixed capacity
|
||||
representations can be either over or under-accommodating to the task at hand.
|
||||
This leads us to ask: can we design a flexible representation that can adapt to
|
||||
multiple downstream tasks with varying computational resources? Our main
|
||||
contribution is Matryoshka Representation Learning (MRL) which encodes
|
||||
information at different granularities and allows a single embedding to adapt
|
||||
to the computational constraints of downstream tasks. MRL minimally modifies
|
||||
existing representation learning pipelines and imposes no additional cost
|
||||
during inference and deployment. MRL learns coarse-to-fine representations that
|
||||
are at least as accurate and rich as independently trained low-dimensional
|
||||
representations. The flexibility within the learned Matryoshka Representations
|
||||
offer: (a) up to 14x smaller embedding size for ImageNet-1K classification at
|
||||
the same level of accuracy; (b) up to 14x real-world speed-ups for large-scale
|
||||
retrieval on ImageNet-1K and 4K; and (c) up to 2% accuracy improvements for
|
||||
long-tail few-shot classification, all while being as robust as the original
|
||||
representations. Finally, we show that MRL extends seamlessly to web-scale
|
||||
datasets (ImageNet, JFT) across various modalities -- vision (ViT, ResNet),
|
||||
vision + language (ALIGN) and language (BERT). MRL code and pretrained models
|
||||
are open-sourced at https://github.com/RAIVNLab/MRL.
|
||||
|
||||
## Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
|
||||
|
||||
- **arXiv id:** 2205.12654v1
|
||||
- **Title:** Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
|
||||
- **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk
|
||||
- **Published Date:** 2022-05-25
|
||||
- **URL:** http://arxiv.org/abs/2205.12654v1
|
||||
- **arXiv id:** [2205.12654v1](http://arxiv.org/abs/2205.12654v1) **Published Date:** 2022-05-25
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
@@ -778,14 +921,12 @@ encoders, mine bitexts, and validate the bitexts by training NMT systems.
|
||||
|
||||
## Evaluating the Text-to-SQL Capabilities of Large Language Models
|
||||
|
||||
- **arXiv id:** 2204.00498v1
|
||||
- **Title:** Evaluating the Text-to-SQL Capabilities of Large Language Models
|
||||
- **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau
|
||||
- **Published Date:** 2022-03-15
|
||||
- **URL:** http://arxiv.org/abs/2204.00498v1
|
||||
- **arXiv id:** [2204.00498v1](http://arxiv.org/abs/2204.00498v1) **Published Date:** 2022-03-15
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
- **Documentation:** [docs/tutorials/sql_qa](https://python.langchain.com/docs/tutorials/sql_qa)
|
||||
- **API Reference:** [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
|
||||
|
||||
**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
|
||||
language model. We find that, without any finetuning, Codex is a strong
|
||||
@@ -797,14 +938,11 @@ few-shot examples.
|
||||
|
||||
## Locally Typical Sampling
|
||||
|
||||
- **arXiv id:** 2202.00666v5
|
||||
- **Title:** Locally Typical Sampling
|
||||
- **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.
|
||||
- **Published Date:** 2022-02-01
|
||||
- **URL:** http://arxiv.org/abs/2202.00666v5
|
||||
- **arXiv id:** [2202.00666v5](http://arxiv.org/abs/2202.00666v5) **Published Date:** 2022-02-01
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
- **API Reference:** [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
|
||||
**Abstract:** Today's probabilistic language generators fall short when it comes to
|
||||
producing coherent and fluent text despite the fact that the underlying models
|
||||
@@ -827,13 +965,32 @@ locally typical sampling offers competitive performance (in both abstractive
|
||||
summarization and story generation) in terms of quality while consistently
|
||||
reducing degenerate repetitions.
|
||||
|
||||
## ColBERTv2: Effective and Efficient Retrieval via Lightweight Late Interaction
|
||||
|
||||
- **Authors:** Keshav Santhanam, Omar Khattab, Jon Saad-Falcon, et al.
|
||||
- **arXiv id:** [2112.01488v3](http://arxiv.org/abs/2112.01488v3) **Published Date:** 2021-12-02
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/docs/integrations/providers/dspy)
|
||||
|
||||
**Abstract:** Neural information retrieval (IR) has greatly advanced search and other
|
||||
knowledge-intensive language tasks. While many neural IR methods encode queries
|
||||
and documents into single-vector representations, late interaction models
|
||||
produce multi-vector representations at the granularity of each token and
|
||||
decompose relevance modeling into scalable token-level computations. This
|
||||
decomposition has been shown to make late interaction more effective, but it
|
||||
inflates the space footprint of these models by an order of magnitude. In this
|
||||
work, we introduce ColBERTv2, a retriever that couples an aggressive residual
|
||||
compression mechanism with a denoised supervision strategy to simultaneously
|
||||
improve the quality and space footprint of late interaction. We evaluate
|
||||
ColBERTv2 across a wide range of benchmarks, establishing state-of-the-art
|
||||
quality within and outside the training domain while reducing the space
|
||||
footprint of late interaction models by 6--10$\times$.
|
||||
|
||||
## Learning Transferable Visual Models From Natural Language Supervision
|
||||
|
||||
- **arXiv id:** 2103.00020v1
|
||||
- **Title:** Learning Transferable Visual Models From Natural Language Supervision
|
||||
- **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.
|
||||
- **Published Date:** 2021-02-26
|
||||
- **URL:** http://arxiv.org/abs/2103.00020v1
|
||||
- **arXiv id:** [2103.00020v1](http://arxiv.org/abs/2103.00020v1) **Published Date:** 2021-02-26
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
@@ -859,16 +1016,77 @@ zero-shot without needing to use any of the 1.28 million training examples it
|
||||
was trained on. We release our code and pre-trained model weights at
|
||||
https://github.com/OpenAI/CLIP.
|
||||
|
||||
## CTRL: A Conditional Transformer Language Model for Controllable Generation
|
||||
## Language Models are Few-Shot Learners
|
||||
|
||||
- **arXiv id:** 1909.05858v2
|
||||
- **Title:** CTRL: A Conditional Transformer Language Model for Controllable Generation
|
||||
- **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.
|
||||
- **Published Date:** 2019-09-11
|
||||
- **URL:** http://arxiv.org/abs/1909.05858v2
|
||||
- **Authors:** Tom B. Brown, Benjamin Mann, Nick Ryder, et al.
|
||||
- **arXiv id:** [2005.14165v4](http://arxiv.org/abs/2005.14165v4) **Published Date:** 2020-05-28
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
|
||||
**Abstract:** Recent work has demonstrated substantial gains on many NLP tasks and
|
||||
benchmarks by pre-training on a large corpus of text followed by fine-tuning on
|
||||
a specific task. While typically task-agnostic in architecture, this method
|
||||
still requires task-specific fine-tuning datasets of thousands or tens of
|
||||
thousands of examples. By contrast, humans can generally perform a new language
|
||||
task from only a few examples or from simple instructions - something which
|
||||
current NLP systems still largely struggle to do. Here we show that scaling up
|
||||
language models greatly improves task-agnostic, few-shot performance, sometimes
|
||||
even reaching competitiveness with prior state-of-the-art fine-tuning
|
||||
approaches. Specifically, we train GPT-3, an autoregressive language model with
|
||||
175 billion parameters, 10x more than any previous non-sparse language model,
|
||||
and test its performance in the few-shot setting. For all tasks, GPT-3 is
|
||||
applied without any gradient updates or fine-tuning, with tasks and few-shot
|
||||
demonstrations specified purely via text interaction with the model. GPT-3
|
||||
achieves strong performance on many NLP datasets, including translation,
|
||||
question-answering, and cloze tasks, as well as several tasks that require
|
||||
on-the-fly reasoning or domain adaptation, such as unscrambling words, using a
|
||||
novel word in a sentence, or performing 3-digit arithmetic. At the same time,
|
||||
we also identify some datasets where GPT-3's few-shot learning still struggles,
|
||||
as well as some datasets where GPT-3 faces methodological issues related to
|
||||
training on large web corpora. Finally, we find that GPT-3 can generate samples
|
||||
of news articles which human evaluators have difficulty distinguishing from
|
||||
articles written by humans. We discuss broader societal impacts of this finding
|
||||
and of GPT-3 in general.
|
||||
|
||||
## Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks
|
||||
|
||||
- **Authors:** Patrick Lewis, Ethan Perez, Aleksandra Piktus, et al.
|
||||
- **arXiv id:** [2005.11401v4](http://arxiv.org/abs/2005.11401v4) **Published Date:** 2020-05-22
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
|
||||
**Abstract:** Large pre-trained language models have been shown to store factual knowledge
|
||||
in their parameters, and achieve state-of-the-art results when fine-tuned on
|
||||
downstream NLP tasks. However, their ability to access and precisely manipulate
|
||||
knowledge is still limited, and hence on knowledge-intensive tasks, their
|
||||
performance lags behind task-specific architectures. Additionally, providing
|
||||
provenance for their decisions and updating their world knowledge remain open
|
||||
research problems. Pre-trained models with a differentiable access mechanism to
|
||||
explicit non-parametric memory can overcome this issue, but have so far been
|
||||
only investigated for extractive downstream tasks. We explore a general-purpose
|
||||
fine-tuning recipe for retrieval-augmented generation (RAG) -- models which
|
||||
combine pre-trained parametric and non-parametric memory for language
|
||||
generation. We introduce RAG models where the parametric memory is a
|
||||
pre-trained seq2seq model and the non-parametric memory is a dense vector index
|
||||
of Wikipedia, accessed with a pre-trained neural retriever. We compare two RAG
|
||||
formulations, one which conditions on the same retrieved passages across the
|
||||
whole generated sequence, the other can use different passages per token. We
|
||||
fine-tune and evaluate our models on a wide range of knowledge-intensive NLP
|
||||
tasks and set the state-of-the-art on three open domain QA tasks, outperforming
|
||||
parametric seq2seq models and task-specific retrieve-and-extract architectures.
|
||||
For language generation tasks, we find that RAG models generate more specific,
|
||||
diverse and factual language than a state-of-the-art parametric-only seq2seq
|
||||
baseline.
|
||||
|
||||
## CTRL: A Conditional Transformer Language Model for Controllable Generation
|
||||
|
||||
- **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.
|
||||
- **arXiv id:** [1909.05858v2](http://arxiv.org/abs/1909.05858v2) **Published Date:** 2019-09-11
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
|
||||
**Abstract:** Large-scale language models show promising text generation capabilities, but
|
||||
users cannot easily control particular aspects of the generated text. We
|
||||
@@ -881,32 +1099,4 @@ codes also allow CTRL to predict which parts of the training data are most
|
||||
likely given a sequence. This provides a potential method for analyzing large
|
||||
amounts of data via model-based source attribution. We have released multiple
|
||||
full-sized, pretrained versions of CTRL at https://github.com/salesforce/ctrl.
|
||||
|
||||
## Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
|
||||
|
||||
- **arXiv id:** 1908.10084v1
|
||||
- **Title:** Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
|
||||
- **Authors:** Nils Reimers, Iryna Gurevych
|
||||
- **Published Date:** 2019-08-27
|
||||
- **URL:** http://arxiv.org/abs/1908.10084v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
|
||||
**Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new
|
||||
state-of-the-art performance on sentence-pair regression tasks like semantic
|
||||
textual similarity (STS). However, it requires that both sentences are fed into
|
||||
the network, which causes a massive computational overhead: Finding the most
|
||||
similar pair in a collection of 10,000 sentences requires about 50 million
|
||||
inference computations (~65 hours) with BERT. The construction of BERT makes it
|
||||
unsuitable for semantic similarity search as well as for unsupervised tasks
|
||||
like clustering.
|
||||
In this publication, we present Sentence-BERT (SBERT), a modification of the
|
||||
pretrained BERT network that use siamese and triplet network structures to
|
||||
derive semantically meaningful sentence embeddings that can be compared using
|
||||
cosine-similarity. This reduces the effort for finding the most similar pair
|
||||
from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while
|
||||
maintaining the accuracy from BERT.
|
||||
We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning
|
||||
tasks, where it outperforms other state-of-the-art sentence embeddings methods.
|
||||
|
||||
@@ -15,11 +15,6 @@ The interfaces for core components like LLMs, vector stores, retrievers and more
|
||||
No third party integrations are defined here.
|
||||
The dependencies are kept purposefully very lightweight.
|
||||
|
||||
### Partner packages
|
||||
|
||||
While the long tail of integrations are in `langchain-community`, we split popular integrations into their own packages (e.g. `langchain-openai`, `langchain-anthropic`, etc).
|
||||
This was done in order to improve support for these important integrations.
|
||||
|
||||
### `langchain`
|
||||
|
||||
The main `langchain` package contains chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
@@ -33,6 +28,11 @@ Key partner packages are separated out (see below).
|
||||
This contains all integrations for various components (LLMs, vector stores, retrievers).
|
||||
All dependencies in this package are optional to keep the package as lightweight as possible.
|
||||
|
||||
### Partner packages
|
||||
|
||||
While the long tail of integrations is in `langchain-community`, we split popular integrations into their own packages (e.g. `langchain-openai`, `langchain-anthropic`, etc).
|
||||
This was done in order to improve support for these important integrations.
|
||||
|
||||
### [`langgraph`](https://langchain-ai.github.io/langgraph)
|
||||
|
||||
`langgraph` is an extension of `langchain` aimed at
|
||||
@@ -61,28 +61,28 @@ A developer platform that lets you debug, test, evaluate, and monitor LLM applic
|
||||
## LangChain Expression Language (LCEL)
|
||||
<span data-heading-keywords="lcel"></span>
|
||||
|
||||
LangChain Expression Language, or LCEL, is a declarative way to chain LangChain components.
|
||||
`LangChain Expression Language`, or `LCEL`, is a declarative way to chain LangChain components.
|
||||
LCEL was designed from day 1 to **support putting prototypes in production, with no code changes**, from the simplest “prompt + LLM” chain to the most complex chains (we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL:
|
||||
|
||||
**First-class streaming support**
|
||||
- **First-class streaming support:**
|
||||
When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens.
|
||||
|
||||
**Async support**
|
||||
- **Async support:**
|
||||
Any chain built with LCEL can be called both with the synchronous API (eg. in your Jupyter notebook while prototyping) as well as with the asynchronous API (eg. in a [LangServe](/docs/langserve/) server). This enables using the same code for prototypes and in production, with great performance, and the ability to handle many concurrent requests in the same server.
|
||||
|
||||
**Optimized parallel execution**
|
||||
- **Optimized parallel execution:**
|
||||
Whenever your LCEL chains have steps that can be executed in parallel (eg if you fetch documents from multiple retrievers) we automatically do it, both in the sync and the async interfaces, for the smallest possible latency.
|
||||
|
||||
**Retries and fallbacks**
|
||||
- **Retries and fallbacks:**
|
||||
Configure retries and fallbacks for any part of your LCEL chain. This is a great way to make your chains more reliable at scale. We’re currently working on adding streaming support for retries/fallbacks, so you can get the added reliability without any latency cost.
|
||||
|
||||
**Access intermediate results**
|
||||
- **Access intermediate results:**
|
||||
For more complex chains it’s often very useful to access the results of intermediate steps even before the final output is produced. This can be used to let end-users know something is happening, or even just to debug your chain. You can stream intermediate results, and it’s available on every [LangServe](/docs/langserve) server.
|
||||
|
||||
**Input and output schemas**
|
||||
- **Input and output schemas**
|
||||
Input and output schemas give every LCEL chain Pydantic and JSONSchema schemas inferred from the structure of your chain. This can be used for validation of inputs and outputs, and is an integral part of LangServe.
|
||||
|
||||
[**Seamless LangSmith tracing**](https://docs.smith.langchain.com)
|
||||
- [**Seamless LangSmith tracing**](https://docs.smith.langchain.com)
|
||||
As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step.
|
||||
With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.smith.langchain.com/) for maximum observability and debuggability.
|
||||
|
||||
@@ -97,7 +97,7 @@ For guides on how to do specific tasks with LCEL, check out [the relevant how-to
|
||||
### Runnable interface
|
||||
<span data-heading-keywords="invoke,runnable"></span>
|
||||
|
||||
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
|
||||
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
|
||||
|
||||
This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way.
|
||||
The standard interface includes:
|
||||
@@ -186,7 +186,7 @@ For a full list of LangChain model providers with multimodal models, [check out
|
||||
<span data-heading-keywords="llm,llms"></span>
|
||||
|
||||
:::caution
|
||||
Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models),
|
||||
Pure text-in/text-out LLMs tend to be older or lower-level. Many new popular models are best used as [chat completion models](/docs/concepts/#chat-models),
|
||||
even for non-chat use cases.
|
||||
|
||||
You are probably looking for [the section above instead](/docs/concepts/#chat-models).
|
||||
@@ -201,7 +201,7 @@ When messages are passed in as input, they will be formatted into a string under
|
||||
|
||||
LangChain does not host any LLMs, rather we rely on third party integrations.
|
||||
|
||||
For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms).
|
||||
For specifics on how to use LLMs, see the [how-to guides](/docs/how_to/#llms).
|
||||
|
||||
### Messages
|
||||
|
||||
@@ -209,22 +209,25 @@ Some language models take a list of messages as input and return a message.
|
||||
There are a few different types of messages.
|
||||
All messages have a `role`, `content`, and `response_metadata` property.
|
||||
|
||||
The `role` describes WHO is saying the message.
|
||||
The `role` describes WHO is saying the message. The standard roles are "user", "assistant", "system", and "tool".
|
||||
LangChain has different message classes for different roles.
|
||||
|
||||
The `content` property describes the content of the message.
|
||||
This can be a few different things:
|
||||
|
||||
- A string (most models deal this type of content)
|
||||
- A string (most models deal with this type of content)
|
||||
- A List of dictionaries (this is used for multimodal input, where the dictionary contains information about that input type and that input location)
|
||||
|
||||
Optionally, messages can have a `name` property which allows for differentiating between multiple speakers with the same role.
|
||||
For example, if there are two users in the chat history it can be useful to differentiate between them. Not all models support this.
|
||||
|
||||
#### HumanMessage
|
||||
|
||||
This represents a message from the user.
|
||||
This represents a message with role "user".
|
||||
|
||||
#### AIMessage
|
||||
|
||||
This represents a message from the model. In addition to the `content` property, these messages also have:
|
||||
This represents a message with role "assistant". In addition to the `content` property, these messages also have:
|
||||
|
||||
**`response_metadata`**
|
||||
|
||||
@@ -244,11 +247,11 @@ This property returns a list of `ToolCall`s. A `ToolCall` is a dictionary with t
|
||||
|
||||
#### SystemMessage
|
||||
|
||||
This represents a system message, which tells the model how to behave. Not every model provider supports this.
|
||||
This represents a message with role "system", which tells the model how to behave. Not every model provider supports this.
|
||||
|
||||
#### ToolMessage
|
||||
|
||||
This represents the result of a tool call. In addition to `role` and `content`, this message has:
|
||||
This represents a message with role "tool", which contains the result of calling a tool. In addition to `role` and `content`, this message has:
|
||||
|
||||
- a `tool_call_id` field which conveys the id of the call to the tool that was called to produce this result.
|
||||
- an `artifact` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.
|
||||
@@ -343,6 +346,7 @@ For specifics on how to use prompt templates, see the [relevant how-to guides he
|
||||
|
||||
### Example selectors
|
||||
One common prompting technique for achieving better performance is to include examples as part of the prompt.
|
||||
This is known as [few-shot prompting](/docs/concepts/#few-shot-prompting).
|
||||
This gives the language model concrete examples of how it should behave.
|
||||
Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them.
|
||||
Example Selectors are classes responsible for selecting and then formatting examples into prompts.
|
||||
@@ -361,38 +365,32 @@ See documentation for that [here](/docs/concepts/#function-tool-calling).
|
||||
|
||||
:::
|
||||
|
||||
Responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks.
|
||||
`Output parser` is responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks.
|
||||
Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs.
|
||||
|
||||
LangChain has lots of different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information:
|
||||
|
||||
**Name**: The name of the output parser
|
||||
|
||||
**Supports Streaming**: Whether the output parser supports streaming.
|
||||
|
||||
**Has Format Instructions**: Whether the output parser has format instructions. This is generally available except when (a) the desired schema is not specified in the prompt but rather in other parameters (like OpenAI function calling), or (b) when the OutputParser wraps another OutputParser.
|
||||
|
||||
**Calls LLM**: Whether this output parser itself calls an LLM. This is usually only done by output parsers that attempt to correct misformatted output.
|
||||
|
||||
**Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific kwargs.
|
||||
|
||||
**Output Type**: The output type of the object returned by the parser.
|
||||
|
||||
**Description**: Our commentary on this output parser and when to use it.
|
||||
- **Name**: The name of the output parser
|
||||
- **Supports Streaming**: Whether the output parser supports streaming.
|
||||
- **Has Format Instructions**: Whether the output parser has format instructions. This is generally available except when (a) the desired schema is not specified in the prompt but rather in other parameters (like OpenAI function calling), or (b) when the OutputParser wraps another OutputParser.
|
||||
- **Calls LLM**: Whether this output parser itself calls an LLM. This is usually only done by output parsers that attempt to correct misformatted output.
|
||||
- **Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific kwargs.
|
||||
- **Output Type**: The output type of the object returned by the parser.
|
||||
- **Description**: Our commentary on this output parser and when to use it.
|
||||
|
||||
| Name | Supports Streaming | Has Format Instructions | Calls LLM | Input Type | Output Type | Description |
|
||||
|-----------------|--------------------|-------------------------------|-----------|----------------------------------|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [JSON](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.json.JsonOutputParser.html#langchain_core.output_parsers.json.JsonOutputParser) | ✅ | ✅ | | `str` \| `Message` | JSON object | Returns a JSON object as specified. You can specify a Pydantic model and it will return JSON for that model. Probably the most reliable output parser for getting structured data that does NOT use function calling. |
|
||||
| [XML](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html#langchain_core.output_parsers.xml.XMLOutputParser) | ✅ | ✅ | | `str` \| `Message` | `dict` | Returns a dictionary of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's). |
|
||||
| [CSV](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.list.CommaSeparatedListOutputParser.html#langchain_core.output_parsers.list.CommaSeparatedListOutputParser) | ✅ | ✅ | | `str` \| `Message` | `List[str]` | Returns a list of comma separated values. |
|
||||
| [OutputFixing](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.fix.OutputFixingParser.html#langchain.output_parsers.fix.OutputFixingParser) | | | ✅ | `str` \| `Message` | | Wraps another output parser. If that output parser errors, then this will pass the error message and the bad output to an LLM and ask it to fix the output. |
|
||||
| [RetryWithError](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.retry.RetryWithErrorOutputParser.html#langchain.output_parsers.retry.RetryWithErrorOutputParser) | | | ✅ | `str` \| `Message` | | Wraps another output parser. If that output parser errors, then this will pass the original inputs, the bad output, and the error message to an LLM and ask it to fix it. Compared to OutputFixingParser, this one also sends the original instructions. |
|
||||
| [Pydantic](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html#langchain_core.output_parsers.pydantic.PydanticOutputParser) | | ✅ | | `str` \| `Message` | `pydantic.BaseModel` | Takes a user defined Pydantic model and returns data in that format. |
|
||||
| [YAML](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.yaml.YamlOutputParser.html#langchain.output_parsers.yaml.YamlOutputParser) | | ✅ | | `str` \| `Message` | `pydantic.BaseModel` | Takes a user defined Pydantic model and returns data in that format. Uses YAML to encode it. |
|
||||
| [PandasDataFrame](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser.html#langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser) | | ✅ | | `str` \| `Message` | `dict` | Useful for doing operations with pandas DataFrames. |
|
||||
| [Enum](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.enum.EnumOutputParser.html#langchain.output_parsers.enum.EnumOutputParser) | | ✅ | | `str` \| `Message` | `Enum` | Parses response into one of the provided enum values. |
|
||||
| [Datetime](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html#langchain.output_parsers.datetime.DatetimeOutputParser) | | ✅ | | `str` \| `Message` | `datetime.datetime` | Parses response into a datetime string. |
|
||||
| [Structured](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.structured.StructuredOutputParser.html#langchain.output_parsers.structured.StructuredOutputParser) | | ✅ | | `str` \| `Message` | `Dict[str, str]` | An output parser that returns structured information. It is less powerful than other output parsers since it only allows for fields to be strings. This can be useful when you are working with smaller LLMs. |
|
||||
| [JSON](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.json.JsonOutputParser.html#langchain_core.output_parsers.json.JsonOutputParser) | ✅ | ✅ | | `str` \| `Message` | JSON object | Returns a JSON object as specified. You can specify a Pydantic model and it will return JSON for that model. Probably the most reliable output parser for getting structured data that does NOT use function calling. |
|
||||
| [XML](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html#langchain_core.output_parsers.xml.XMLOutputParser) | ✅ | ✅ | | `str` \| `Message` | `dict` | Returns a dictionary of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's). |
|
||||
| [CSV](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.list.CommaSeparatedListOutputParser.html#langchain_core.output_parsers.list.CommaSeparatedListOutputParser) | ✅ | ✅ | | `str` \| `Message` | `List[str]` | Returns a list of comma separated values. |
|
||||
| [OutputFixing](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.fix.OutputFixingParser.html#langchain.output_parsers.fix.OutputFixingParser) | | | ✅ | `str` \| `Message` | | Wraps another output parser. If that output parser errors, then this will pass the error message and the bad output to an LLM and ask it to fix the output. |
|
||||
| [RetryWithError](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.retry.RetryWithErrorOutputParser.html#langchain.output_parsers.retry.RetryWithErrorOutputParser) | | | ✅ | `str` \| `Message` | | Wraps another output parser. If that output parser errors, then this will pass the original inputs, the bad output, and the error message to an LLM and ask it to fix it. Compared to OutputFixingParser, this one also sends the original instructions. |
|
||||
| [Pydantic](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html#langchain_core.output_parsers.pydantic.PydanticOutputParser) | | ✅ | | `str` \| `Message` | `pydantic.BaseModel` | Takes a user defined Pydantic model and returns data in that format. |
|
||||
| [YAML](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.yaml.YamlOutputParser.html#langchain.output_parsers.yaml.YamlOutputParser) | | ✅ | | `str` \| `Message` | `pydantic.BaseModel` | Takes a user defined Pydantic model and returns data in that format. Uses YAML to encode it. |
|
||||
| [PandasDataFrame](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser.html#langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser) | | ✅ | | `str` \| `Message` | `dict` | Useful for doing operations with pandas DataFrames. |
|
||||
| [Enum](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.enum.EnumOutputParser.html#langchain.output_parsers.enum.EnumOutputParser) | | ✅ | | `str` \| `Message` | `Enum` | Parses response into one of the provided enum values. |
|
||||
| [Datetime](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html#langchain.output_parsers.datetime.DatetimeOutputParser) | | ✅ | | `str` \| `Message` | `datetime.datetime` | Parses response into a datetime string. |
|
||||
| [Structured](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.structured.StructuredOutputParser.html#langchain.output_parsers.structured.StructuredOutputParser) | | ✅ | | `str` \| `Message` | `Dict[str, str]` | An output parser that returns structured information. It is less powerful than other output parsers since it only allows for fields to be strings. This can be useful when you are working with smaller LLMs. |
|
||||
|
||||
For specifics on how to use output parsers, see the [relevant how-to guides here](/docs/how_to/#output-parsers).
|
||||
|
||||
@@ -503,7 +501,7 @@ For specifics on how to use retrievers, see the [relevant how-to guides here](/d
|
||||
For some techniques, such as [indexing and retrieval with multiple vectors per document](/docs/how_to/multi_vector/) or
|
||||
[caching embeddings](/docs/how_to/caching_embeddings/), having a form of key-value (KV) storage is helpful.
|
||||
|
||||
LangChain includes a [`BaseStore`](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.BaseStore.html) interface,
|
||||
LangChain includes a [`BaseStore`](https://python.langchain.com/api_reference/core/stores/langchain_core.stores.BaseStore.html) interface,
|
||||
which allows for storage of arbitrary data. However, LangChain components that require KV-storage accept a
|
||||
more specific `BaseStore[str, bytes]` instance that stores binary data (referred to as a `ByteStore`), and internally take care of
|
||||
encoding and decoding data for their specific needs.
|
||||
@@ -512,7 +510,7 @@ This means that as a user, you only need to think about one type of store rather
|
||||
|
||||
#### Interface
|
||||
|
||||
All [`BaseStores`](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.BaseStore.html) support the following interface. Note that the interface allows
|
||||
All [`BaseStores`](https://python.langchain.com/api_reference/core/stores/langchain_core.stores.BaseStore.html) support the following interface. Note that the interface allows
|
||||
for modifying **multiple** key-value pairs at once:
|
||||
|
||||
- `mget(key: Sequence[str]) -> List[Optional[bytes]]`: get the contents of multiple keys, returning `None` if the key does not exist
|
||||
@@ -530,10 +528,10 @@ Tools are needed whenever you want a model to control parts of your code or call
|
||||
|
||||
A tool consists of:
|
||||
|
||||
1. The name of the tool.
|
||||
2. A description of what the tool does.
|
||||
3. A JSON schema defining the inputs to the tool.
|
||||
4. A function (and, optionally, an async variant of the function).
|
||||
1. The `name` of the tool.
|
||||
2. A `description` of what the tool does.
|
||||
3. A `JSON schema` defining the inputs to the tool.
|
||||
4. A `function` (and, optionally, an async variant of the function).
|
||||
|
||||
When a tool is bound to a model, the name, description and JSON schema are provided as context to the model.
|
||||
Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs.
|
||||
@@ -646,14 +644,14 @@ The results of those actions can then be fed back into the agent and it determin
|
||||
[LangGraph](https://github.com/langchain-ai/langgraph) is an extension of LangChain specifically aimed at creating highly controllable and customizable agents.
|
||||
Please check out that documentation for a more in depth overview of agent concepts.
|
||||
|
||||
There is a legacy agent concept in LangChain that we are moving towards deprecating: `AgentExecutor`.
|
||||
There is a legacy `agent` concept in LangChain that we are moving towards deprecating: `AgentExecutor`.
|
||||
AgentExecutor was essentially a runtime for agents.
|
||||
It was a great place to get started, however, it was not flexible enough as you started to have more customized agents.
|
||||
In order to solve that we built LangGraph to be this flexible, highly-controllable runtime.
|
||||
|
||||
If you are still using AgentExecutor, do not fear: we still have a guide on [how to use AgentExecutor](/docs/how_to/agent_executor).
|
||||
It is recommended, however, that you start to transition to LangGraph.
|
||||
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent).
|
||||
In order to assist in this, we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent).
|
||||
|
||||
#### ReAct agents
|
||||
<span data-heading-keywords="react,react agent"></span>
|
||||
@@ -710,10 +708,10 @@ You can subscribe to these events by using the `callbacks` argument available th
|
||||
|
||||
Callback handlers can either be `sync` or `async`:
|
||||
|
||||
* Sync callback handlers implement the [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) interface.
|
||||
* Async callback handlers implement the [AsyncCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) interface.
|
||||
* Sync callback handlers implement the [BaseCallbackHandler](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) interface.
|
||||
* Async callback handlers implement the [AsyncCallbackHandler](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) interface.
|
||||
|
||||
During run-time LangChain configures an appropriate callback manager (e.g., [CallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManager.html) or [AsyncCallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.AsyncCallbackManager.html) which will be responsible for calling the appropriate method on each "registered" callback handler when the event is triggered.
|
||||
During run-time LangChain configures an appropriate callback manager (e.g., [CallbackManager](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.manager.CallbackManager.html) or [AsyncCallbackManager](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.manager.AsyncCallbackManager.html) which will be responsible for calling the appropriate method on each "registered" callback handler when the event is triggered.
|
||||
|
||||
#### Passing callbacks
|
||||
|
||||
@@ -739,7 +737,7 @@ callbacks to any child objects.
|
||||
:::important Async in Python<=3.10
|
||||
|
||||
Any `RunnableLambda`, a `RunnableGenerator`, or `Tool` that invokes other runnables
|
||||
and is running async in python<=3.10, will have to propagate callbacks to child
|
||||
and is running `async` in python<=3.10, will have to propagate callbacks to child
|
||||
objects manually. This is because LangChain cannot automatically propagate
|
||||
callbacks to child objects in this case.
|
||||
|
||||
@@ -781,7 +779,7 @@ For models (or other components) that don't support streaming natively, this ite
|
||||
you could still use the same general pattern when calling them. Using `.stream()` will also automatically call the model in streaming mode
|
||||
without the need to provide additional config.
|
||||
|
||||
The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html).
|
||||
The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html).
|
||||
Because this method is part of [LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel),
|
||||
you can handle formatting differences from different outputs using an [output parser](/docs/concepts/#output-parsers) to transform
|
||||
each yielded chunk.
|
||||
@@ -829,10 +827,10 @@ including a table listing available events.
|
||||
#### Callbacks
|
||||
|
||||
The lowest level way to stream outputs from LLMs in LangChain is via the [callbacks](/docs/concepts/#callbacks) system. You can pass a
|
||||
callback handler that handles the [`on_llm_new_token`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_new_token) event into LangChain components. When that component is invoked, any
|
||||
callback handler that handles the [`on_llm_new_token`](https://python.langchain.com/api_reference/langchain/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_new_token) event into LangChain components. When that component is invoked, any
|
||||
[LLM](/docs/concepts/#llms) or [chat model](/docs/concepts/#chat-models) contained in the component calls
|
||||
the callback with the generated token. Within the callback, you could pipe the tokens into some other destination, e.g. a HTTP response.
|
||||
You can also handle the [`on_llm_end`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_end) event to perform any necessary cleanup.
|
||||
You can also handle the [`on_llm_end`](https://python.langchain.com/api_reference/langchain/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_end) event to perform any necessary cleanup.
|
||||
|
||||
You can see [this how-to section](/docs/how_to/#callbacks) for more specifics on using callbacks.
|
||||
|
||||
@@ -869,7 +867,7 @@ Furthermore, using tokens can also improve efficiency, since the model processes
|
||||
### Function/tool calling
|
||||
|
||||
:::info
|
||||
We use the term tool calling interchangeably with function calling. Although
|
||||
We use the term `tool calling` interchangeably with `function calling`. Although
|
||||
function calling is sometimes meant to refer to invocations of a single function,
|
||||
we treat all models as though they can return multiple tool or function calls in
|
||||
each message.
|
||||
@@ -947,7 +945,7 @@ Here's an example:
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class Joke(BaseModel):
|
||||
@@ -964,7 +962,6 @@ structured_llm.invoke("Tell me a joke about cats")
|
||||
|
||||
```
|
||||
Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=None)
|
||||
|
||||
```
|
||||
|
||||
We recommend this method as a starting point when working with structured output:
|
||||
@@ -1065,7 +1062,7 @@ a `tool_calls` field containing `args` that match the desired shape.
|
||||
There are several acceptable formats you can use to bind tools to a model in LangChain. Here's one example:
|
||||
|
||||
```python
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from pydantic import BaseModel, Field
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
class ResponseFormatter(BaseModel):
|
||||
@@ -1101,10 +1098,91 @@ The following how-to guides are good practical resources for using function/tool
|
||||
|
||||
For a full list of model providers that support tool calling, [see this table](/docs/integrations/chat/#advanced-features).
|
||||
|
||||
### Few-shot prompting
|
||||
|
||||
One of the most effective ways to improve model performance is to give a model examples of
|
||||
what you want it to do. The technique of adding example inputs and expected outputs
|
||||
to a model prompt is known as "few-shot prompting". The technique is based on the
|
||||
[Language Models are Few-Shot Learners](https://arxiv.org/abs/2005.14165) paper.
|
||||
There are a few things to think about when doing few-shot prompting:
|
||||
|
||||
1. How are examples generated?
|
||||
2. How many examples are in each prompt?
|
||||
3. How are examples selected at runtime?
|
||||
4. How are examples formatted in the prompt?
|
||||
|
||||
Here are the considerations for each.
|
||||
|
||||
#### 1. Generating examples
|
||||
|
||||
The first and most important step of few-shot prompting is coming up with a good dataset of examples. Good examples should be relevant at runtime, clear, informative, and provide information that was not already known to the model.
|
||||
|
||||
At a high-level, the basic ways to generate examples are:
|
||||
- Manual: a person/people generates examples they think are useful.
|
||||
- Better model: a better (presumably more expensive/slower) model's responses are used as examples for a worse (presumably cheaper/faster) model.
|
||||
- User feedback: users (or labelers) leave feedback on interactions with the application and examples are generated based on that feedback (for example, all interactions with positive feedback could be turned into examples).
|
||||
- LLM feedback: same as user feedback but the process is automated by having models evaluate themselves.
|
||||
|
||||
Which approach is best depends on your task. For tasks where a small number core principles need to be understood really well, it can be valuable hand-craft a few really good examples.
|
||||
For tasks where the space of correct behaviors is broader and more nuanced, it can be useful to generate many examples in a more automated fashion so that there's a higher likelihood of there being some highly relevant examples for any runtime input.
|
||||
|
||||
**Single-turn v.s. multi-turn examples**
|
||||
|
||||
Another dimension to think about when generating examples is what the example is actually showing.
|
||||
|
||||
The simplest types of examples just have a user input and an expected model output. These are single-turn examples.
|
||||
|
||||
One more complex type if example is where the example is an entire conversation, usually in which a model initially responds incorrectly and a user then tells the model how to correct its answer.
|
||||
This is called a multi-turn example. Multi-turn examples can be useful for more nuanced tasks where its useful to show common errors and spell out exactly why they're wrong and what should be done instead.
|
||||
|
||||
#### 2. Number of examples
|
||||
|
||||
Once we have a dataset of examples, we need to think about how many examples should be in each prompt.
|
||||
The key tradeoff is that more examples generally improve performance, but larger prompts increase costs and latency.
|
||||
And beyond some threshold having too many examples can start to confuse the model.
|
||||
Finding the right number of examples is highly dependent on the model, the task, the quality of the examples, and your cost and latency constraints.
|
||||
Anecdotally, the better the model is the fewer examples it needs to perform well and the more quickly you hit steeply diminishing returns on adding more examples.
|
||||
But, the best/only way to reliably answer this question is to run some experiments with different numbers of examples.
|
||||
|
||||
#### 3. Selecting examples
|
||||
|
||||
Assuming we are not adding our entire example dataset into each prompt, we need to have a way of selecting examples from our dataset based on a given input. We can do this:
|
||||
- Randomly
|
||||
- By (semantic or keyword-based) similarity of the inputs
|
||||
- Based on some other constraints, like token size
|
||||
|
||||
LangChain has a number of [`ExampleSelectors`](/docs/concepts/#example-selectors) which make it easy to use any of these techniques.
|
||||
|
||||
Generally, selecting by semantic similarity leads to the best model performance. But how important this is is again model and task specific, and is something worth experimenting with.
|
||||
|
||||
#### 4. Formatting examples
|
||||
|
||||
Most state-of-the-art models these days are chat models, so we'll focus on formatting examples for those. Our basic options are to insert the examples:
|
||||
- In the system prompt as a string
|
||||
- As their own messages
|
||||
|
||||
If we insert our examples into the system prompt as a string, we'll need to make sure it's clear to the model where each example begins and which parts are the input versus output. Different models respond better to different syntaxes, like [ChatML](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chat-markup-language), XML, TypeScript, etc.
|
||||
|
||||
If we insert our examples as messages, where each example is represented as a sequence of Human, AI messages, we might want to also assign [names](/docs/concepts/#messages) to our messages like `"example_user"` and `"example_assistant"` to make it clear that these messages correspond to different actors than the latest input message.
|
||||
|
||||
**Formatting tool call examples**
|
||||
|
||||
One area where formatting examples as messages can be tricky is when our example outputs have tool calls. This is because different models have different constraints on what types of message sequences are allowed when any tool calls are generated.
|
||||
- Some models require that any AIMessage with tool calls be immediately followed by ToolMessages for every tool call,
|
||||
- Some models additionally require that any ToolMessages be immediately followed by an AIMessage before the next HumanMessage,
|
||||
- Some models require that tools are passed in to the model if there are any tool calls / ToolMessages in the chat history.
|
||||
|
||||
These requirements are model-specific and should be checked for the model you are using. If your model requires ToolMessages after tool calls and/or AIMessages after ToolMessages and your examples only include expected tool calls and not the actual tool outputs, you can try adding dummy ToolMessages / AIMessages to the end of each example with generic contents to satisfy the API constraints.
|
||||
In these cases it's especially worth experimenting with inserting your examples as strings versus messages, as having dummy messages can adversely affect certain models.
|
||||
|
||||
You can see a case study of how Anthropic and OpenAI respond to different few-shot prompting techniques on two different tool calling benchmarks [here](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance/).
|
||||
|
||||
### Retrieval
|
||||
|
||||
LLMs are trained on a large but fixed dataset, limiting their ability to reason over private or recent information. Fine-tuning an LLM with specific facts is one way to mitigate this, but is often [poorly suited for factual recall](https://www.anyscale.com/blog/fine-tuning-is-for-form-not-facts) and [can be costly](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise).
|
||||
Retrieval is the process of providing relevant information to an LLM to improve its response for a given input. Retrieval augmented generation (RAG) is the process of grounding the LLM generation (output) using the retrieved information.
|
||||
LLMs are trained on a large but fixed dataset, limiting their ability to reason over private or recent information.
|
||||
Fine-tuning an LLM with specific facts is one way to mitigate this, but is often [poorly suited for factual recall](https://www.anyscale.com/blog/fine-tuning-is-for-form-not-facts) and [can be costly](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise).
|
||||
`Retrieval` is the process of providing relevant information to an LLM to improve its response for a given input.
|
||||
`Retrieval augmented generation` (`RAG`) [paper](https://arxiv.org/abs/2005.11401) is the process of grounding the LLM generation (output) using the retrieved information.
|
||||
|
||||
:::tip
|
||||
|
||||
@@ -1124,12 +1202,12 @@ First, consider the user input(s) to your RAG system. Ideally, a RAG system can
|
||||
**Using an LLM to review and optionally modify the input is the central idea behind query translation.** This serves as a general buffer, optimizing raw user inputs for your retrieval system.
|
||||
For example, this can be as simple as extracting keywords or as complex as generating multiple sub-questions for a complex query.
|
||||
|
||||
| Name | When to use | Description |
|
||||
|---------------|-------------|-------------|
|
||||
| Name | When to use | Description |
|
||||
|---------------|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Multi-query](/docs/how_to/MultiQueryRetriever/) | When you need to cover multiple perspectives of a question. | Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, return the unique documents for all queries. |
|
||||
| [Decomposition](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a question can be broken down into smaller subproblems. | Decompose a question into a set of subproblems / questions, which can either be solved sequentially (use the answer from first + retrieval to answer the second) or in parallel (consolidate each answer into final answer). |
|
||||
| [Step-back](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a higher-level conceptual understanding is required. | First prompt the LLM to ask a generic step-back question about higher-level concepts or principles, and retrieve relevant facts about them. Use this grounding to help answer the user question. |
|
||||
| [HyDE](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | If you have challenges retrieving relevant documents using the raw user inputs. | Use an LLM to convert questions into hypothetical documents that answer the question. Use the embedded hypothetical documents to retrieve real documents with the premise that doc-doc similarity search can produce more relevant matches. |
|
||||
| [Decomposition](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a question can be broken down into smaller subproblems. | Decompose a question into a set of subproblems / questions, which can either be solved sequentially (use the answer from first + retrieval to answer the second) or in parallel (consolidate each answer into final answer). |
|
||||
| [Step-back](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a higher-level conceptual understanding is required. | First prompt the LLM to ask a generic step-back question about higher-level concepts or principles, and retrieve relevant facts about them. Use this grounding to help answer the user question. [Paper](https://arxiv.org/pdf/2310.06117). |
|
||||
| [HyDE](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | If you have challenges retrieving relevant documents using the raw user inputs. | Use an LLM to convert questions into hypothetical documents that answer the question. Use the embedded hypothetical documents to retrieve real documents with the premise that doc-doc similarity search can produce more relevant matches. [Paper](https://arxiv.org/abs/2212.10496). |
|
||||
|
||||
:::tip
|
||||
|
||||
@@ -1203,11 +1281,11 @@ Fifth, consider ways to improve the quality of your similarity search itself. Em
|
||||
|
||||
There are some additional tricks to improve the quality of your retrieval. Embeddings excel at capturing semantic information, but may struggle with keyword-based queries. Many [vector stores](/docs/integrations/retrievers/pinecone_hybrid_search/) offer built-in [hybrid-search](https://docs.pinecone.io/guides/data/understanding-hybrid-search) to combine keyword and semantic similarity, which marries the benefits of both approaches. Furthermore, many vector stores have [maximal marginal relevance](https://python.langchain.com/v0.1/docs/modules/model_io/prompts/example_selectors/mmr/), which attempts to diversify the results of a search to avoid returning similar and redundant documents.
|
||||
|
||||
| Name | When to use | Description |
|
||||
|-------------------|----------------------------------------------------------|-------------|
|
||||
| [ColBERT](/docs/integrations/providers/ragatouille/#using-colbert-as-a-reranker) | When higher granularity embeddings are needed. | ColBERT uses contextually influenced embeddings for each token in the document and query to get a granular query-document similarity score. |
|
||||
| [Hybrid search](/docs/integrations/retrievers/pinecone_hybrid_search/) | When combining keyword-based and semantic similarity. | Hybrid search combines keyword and semantic similarity, marrying the benefits of both approaches. |
|
||||
| [Maximal Marginal Relevance (MMR)](/docs/integrations/vectorstores/pinecone/#maximal-marginal-relevance-searches) | When needing to diversify search results. | MMR attempts to diversify the results of a search to avoid returning similar and redundant documents. |
|
||||
| Name | When to use | Description |
|
||||
|-------------------|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [ColBERT](/docs/integrations/providers/ragatouille/#using-colbert-as-a-reranker) | When higher granularity embeddings are needed. | ColBERT uses contextually influenced embeddings for each token in the document and query to get a granular query-document similarity score. [Paper](https://arxiv.org/abs/2112.01488). |
|
||||
| [Hybrid search](/docs/integrations/retrievers/pinecone_hybrid_search/) | When combining keyword-based and semantic similarity. | Hybrid search combines keyword and semantic similarity, marrying the benefits of both approaches. [Paper](https://arxiv.org/abs/2210.11934). |
|
||||
| [Maximal Marginal Relevance (MMR)](/docs/integrations/vectorstores/pinecone/#maximal-marginal-relevance-searches) | When needing to diversify search results. | MMR attempts to diversify the results of a search to avoid returning similar and redundant documents. |
|
||||
|
||||
:::tip
|
||||
|
||||
@@ -1227,7 +1305,7 @@ Sixth, consider ways to filter or rank retrieved documents. This is very useful
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch video on [RAG-Fusion](https://youtu.be/77qELPbNgxA?feature=shared), on approach for post-processing across multiple queries: Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, and combine the ranks of multiple search result lists to produce a single, unified ranking with [Reciprocal Rank Fusion (RRF)](https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1).
|
||||
See our RAG from Scratch video on [RAG-Fusion](https://youtu.be/77qELPbNgxA?feature=shared) ([paper](https://arxiv.org/abs/2402.03367)), on approach for post-processing across multiple queries: Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, and combine the ranks of multiple search result lists to produce a single, unified ranking with [Reciprocal Rank Fusion (RRF)](https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1).
|
||||
|
||||
:::
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ It covers a wide array of topics, including tutorials, use cases, integrations,
|
||||
and more, offering extensive guidance on building with LangChain.
|
||||
The content for this documentation lives in the `/docs` directory of the monorepo.
|
||||
2. In-code Documentation: This is documentation of the codebase itself, which is also
|
||||
used to generate the externally facing [API Reference](https://api.python.langchain.com/en/latest/langchain_api_reference.html).
|
||||
used to generate the externally facing [API Reference](https://python.langchain.com/api_reference/langchain/index.html).
|
||||
The content for the API reference is autogenerated by scanning the docstrings in the codebase. For this reason we ask that
|
||||
developers document their code well.
|
||||
|
||||
|
||||
@@ -24,3 +24,16 @@ for more information.
|
||||
Notably, Github doesn't allow this setting to be enabled for forks in **organizations** ([issue](https://github.com/orgs/community/discussions/5634)).
|
||||
If you are working in an organization, we recommend submitting your PR from a personal
|
||||
fork in order to enable this setting.
|
||||
|
||||
### Why hasn't my PR been reviewed?
|
||||
|
||||
Please reference our [Review Process](/docs/contributing/review_process/).
|
||||
|
||||
### Why was my PR closed?
|
||||
|
||||
Please reference our [Review Process](/docs/contributing/review_process/).
|
||||
|
||||
### I think my PR was closed in a way that didn't follow the review process. What should I do?
|
||||
|
||||
Tag `@efriis` in the PR comments referencing the portion of the review
|
||||
process that you believe was not followed. We'll take a look!
|
||||
|
||||
@@ -50,7 +50,7 @@ There are other files in the root directory level, but their presence should be
|
||||
## Documentation
|
||||
|
||||
The `/docs` directory contains the content for the documentation that is shown
|
||||
at https://python.langchain.com/ and the associated API Reference https://api.python.langchain.com/en/latest/langchain_api_reference.html.
|
||||
at https://python.langchain.com/ and the associated API Reference https://python.langchain.com/api_reference/langchain/index.html.
|
||||
|
||||
See the [documentation](/docs/contributing/documentation/) guidelines to learn how to contribute to the documentation.
|
||||
|
||||
|
||||
95
docs/docs/contributing/review_process.mdx
Normal file
95
docs/docs/contributing/review_process.mdx
Normal file
@@ -0,0 +1,95 @@
|
||||
# Review Process
|
||||
|
||||
## Overview
|
||||
|
||||
This document outlines the process used by the LangChain maintainers for reviewing pull requests (PRs). The primary objective of this process is to enhance the LangChain developer experience.
|
||||
|
||||
## Review Statuses
|
||||
|
||||
We categorize PRs using three main statuses, which are marked as project item statuses in the right sidebar and can be viewed in detail [here](https://github.com/orgs/langchain-ai/projects/12/views/1).
|
||||
|
||||
- **Triage**:
|
||||
- Initial status for all newly submitted PRs.
|
||||
- Requires a maintainer to categorize it into one of the other statuses.
|
||||
|
||||
- **Needs Support**:
|
||||
- PRs that require community feedback or additional input before moving forward.
|
||||
- Automatically promoted to the backlog if it receives 5 upvotes.
|
||||
- An auto-comment is generated when this status is applied, explaining the flow and the upvote requirement.
|
||||
- If the PR remains in this status for 25 days, it will be marked as “stale” via auto-comment.
|
||||
- PRs will be auto-closed after 30 days if no further action is taken.
|
||||
|
||||
- **In Review**:
|
||||
- PRs that are actively under review by our team.
|
||||
- These are regularly reviewed and monitored.
|
||||
|
||||
**Note:** A PR may only have one status at a time.
|
||||
|
||||
**Note:** You may notice 3 additional statuses of Done, Closed, and Internal that
|
||||
are external to this lifecycle. Done and Closed PRs have been merged or closed,
|
||||
respectively. Internal is for PRs submitted by core maintainers, and these PRs are owned
|
||||
by the submitter.
|
||||
|
||||
## Review Guidelines
|
||||
|
||||
1. **PRs that touch /libs/core**:
|
||||
- PRs that directly impact core code and are likely to affect end users.
|
||||
- **Triage Guideline**: most PRs should either go straight to `In Review` or closed.
|
||||
- These PRs are given top priority and are reviewed the fastest.
|
||||
- PRs that don't have a **concise** descriptions of their motivation (either in PR summary of in a linked issue) are likely to be closed without an in-depth review. Please do not generate verbose PR descriptions with an LLM.
|
||||
- PRs that don't have unit tests are likely to be closed.
|
||||
- Feature requests should first be opened as a GitHub issue and discussed with the LangChain maintainers. Large PRs submitted without prior discussion are likely to be closed.
|
||||
|
||||
2. **PRs that touch /libs/langchain**:
|
||||
- High-impact PRs that are closely related to core PRs but slightly lower in priority.
|
||||
- **Triage Guideline**: most PRs should either go straight to `In Review` or closed.
|
||||
- These are reviewed and closed aggressively, similar to core PRs.
|
||||
- New feature requests should be discussed with the core maintainer team beforehand in an issue.
|
||||
|
||||
3. **PRs that touch /libs/partners/****:
|
||||
- PRs involving integration packages.
|
||||
- **Triage Guideline**: most PRs should either go straight to `In Review` or closed.
|
||||
- The review may be conducted by our team or handed off to the partner's development team, depending on the PR's content.
|
||||
- We maintain communication lines with most partner dev teams to facilitate this process.
|
||||
|
||||
4. **Community PRs**:
|
||||
- Most community PRs will get an initial status of "needs support".
|
||||
- **Triage Guideline**: most PRs should go to `Needs support`. Bugfixes on high-traffic integrations should go straight to `In review`.
|
||||
- **Triage Guideline**: all new features and integrations should go to `Needs support` and will be closed if they do not get enough support (measured by upvotes or comments).
|
||||
- PRs in the `Needs Support` status for 20 days are marked as “stale” and will be closed after 30 days if no action is taken.
|
||||
|
||||
5. **Documentation PRs**:
|
||||
- PRs that touch the documentation content in docs/docs.
|
||||
- **Triage Guideline**:
|
||||
- PRs that fix typos or small errors in a single file and pass CI should go straight to `In Review`.
|
||||
- PRs that make changes that have been discussed and agreed upon in an issue should go straight to `In Review`.
|
||||
- PRs that add new pages or change the structure of the documentation should go to `Needs Support`.
|
||||
- We strive to standardize documentation formats to streamline the review process.
|
||||
- CI jobs run against documentation to ensure adherence to standards, automating much of the review.
|
||||
|
||||
6. **PRs must be in English**:
|
||||
- PRs that are not in English will be closed without review.
|
||||
- This is to ensure that all maintainers can review the PRs effectively.
|
||||
|
||||
## How to see a PR's status
|
||||
|
||||
See screenshot:
|
||||
|
||||

|
||||
|
||||
*To see the status of all open PRs, please visit the [LangChain Project Board](https://github.com/orgs/langchain-ai/projects/12/views/2).*
|
||||
|
||||
## Review Prioritization
|
||||
|
||||
Our goal is to provide the best possible development experience by focusing on making software that:
|
||||
|
||||
- Works: Works as intended (is bug-free).
|
||||
- Is useful: Improves LLM app development with components that work off-the-shelf and runtimes that simplify app building.
|
||||
- Is easy: Is intuitive to use and well-documented.
|
||||
|
||||
We believe this process reflects our priorities and are open to feedback if you feel it does not.
|
||||
|
||||
## Github Discussion
|
||||
|
||||
We welcome your feedback on this process. Please feel free to add a comment in
|
||||
[this GitHub Discussion](https://github.com/langchain-ai/langchain/discussions/25920).
|
||||
@@ -13,7 +13,7 @@
|
||||
"# How to split by HTML header \n",
|
||||
"## Description and motivation\n",
|
||||
"\n",
|
||||
"[HTMLHeaderTextSplitter](https://api.python.langchain.com/en/latest/html/langchain_text_splitters.html.HTMLHeaderTextSplitter.html) is a \"structure-aware\" chunker that splits text at the HTML element level and adds metadata for each header \"relevant\" to any given chunk. It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures. It can be used with other text splitters as part of a chunking pipeline.\n",
|
||||
"[HTMLHeaderTextSplitter](https://python.langchain.com/api_reference/text_splitters/html/langchain_text_splitters.html.HTMLHeaderTextSplitter.html) is a \"structure-aware\" chunker that splits text at the HTML element level and adds metadata for each header \"relevant\" to any given chunk. It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures. It can be used with other text splitters as part of a chunking pipeline.\n",
|
||||
"\n",
|
||||
"It is analogous to the [MarkdownHeaderTextSplitter](/docs/how_to/markdown_header_metadata_splitter) for markdown files.\n",
|
||||
"\n",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
"Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on a distance metric. But, retrieval may produce different results with subtle changes in query wording, or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious.\n",
|
||||
"\n",
|
||||
"The [MultiQueryRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` can mitigate some of the limitations of the distance-based retrieval and get a richer set of results.\n",
|
||||
"The [MultiQueryRetriever](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` can mitigate some of the limitations of the distance-based retrieval and get a richer set of results.\n",
|
||||
"\n",
|
||||
"Let's build a vectorstore using the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/docs/tutorials/rag):"
|
||||
]
|
||||
@@ -18,8 +18,23 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "994d6c74",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:08:00.190093Z",
|
||||
"iopub.status.busy": "2024-09-10T20:08:00.189665Z",
|
||||
"iopub.status.idle": "2024-09-10T20:08:05.438015Z",
|
||||
"shell.execute_reply": "2024-09-10T20:08:05.437685Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"USER_AGENT environment variable not set, consider setting it to identify your requests.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Build a sample vectorDB\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
@@ -54,7 +69,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "edbca101",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:08:05.439930Z",
|
||||
"iopub.status.busy": "2024-09-10T20:08:05.439810Z",
|
||||
"iopub.status.idle": "2024-09-10T20:08:05.553766Z",
|
||||
"shell.execute_reply": "2024-09-10T20:08:05.553520Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers.multi_query import MultiQueryRetriever\n",
|
||||
@@ -71,7 +93,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9e6d3b69",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:08:05.555359Z",
|
||||
"iopub.status.busy": "2024-09-10T20:08:05.555262Z",
|
||||
"iopub.status.idle": "2024-09-10T20:08:05.557046Z",
|
||||
"shell.execute_reply": "2024-09-10T20:08:05.556825Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set logging for the queries\n",
|
||||
@@ -85,13 +114,20 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "bc93dc2b-9407-48b0-9f9a-338247e7eb69",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:08:05.558176Z",
|
||||
"iopub.status.busy": "2024-09-10T20:08:05.558100Z",
|
||||
"iopub.status.idle": "2024-09-10T20:08:07.250342Z",
|
||||
"shell.execute_reply": "2024-09-10T20:08:07.249711Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. How can Task Decomposition be achieved through different methods?', '2. What strategies are commonly used for Task Decomposition?', '3. What are the various techniques for breaking down tasks in Task Decomposition?']\n"
|
||||
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. How can Task Decomposition be achieved through different methods?', '2. What strategies are commonly used for Task Decomposition?', '3. What are the various ways to break down tasks in Task Decomposition?']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -125,9 +161,9 @@
|
||||
"source": [
|
||||
"#### Supplying your own prompt\n",
|
||||
"\n",
|
||||
"Under the hood, `MultiQueryRetriever` generates queries using a specific [prompt](https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/multi_query.html#MultiQueryRetriever). To customize this prompt:\n",
|
||||
"Under the hood, `MultiQueryRetriever` generates queries using a specific [prompt](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html). To customize this prompt:\n",
|
||||
"\n",
|
||||
"1. Make a [PromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.prompt.PromptTemplate.html) with an input variable for the question;\n",
|
||||
"1. Make a [PromptTemplate](https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.prompt.PromptTemplate.html) with an input variable for the question;\n",
|
||||
"2. Implement an [output parser](/docs/concepts#output-parsers) like the one below to split the result into a list of queries.\n",
|
||||
"\n",
|
||||
"The prompt and output parser together must support the generation of a list of queries."
|
||||
@@ -137,14 +173,21 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d9afb0ca",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:08:07.253875Z",
|
||||
"iopub.status.busy": "2024-09-10T20:08:07.253600Z",
|
||||
"iopub.status.idle": "2024-09-10T20:08:07.277848Z",
|
||||
"shell.execute_reply": "2024-09-10T20:08:07.277487Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import BaseOutputParser\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Output parser will split the LLM result into a list of queries\n",
|
||||
@@ -180,13 +223,20 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "59c75c56-dbd7-4887-b9ba-0b5b21069f51",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:08:07.280001Z",
|
||||
"iopub.status.busy": "2024-09-10T20:08:07.279861Z",
|
||||
"iopub.status.idle": "2024-09-10T20:08:09.579525Z",
|
||||
"shell.execute_reply": "2024-09-10T20:08:09.578837Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer about regression?', '4. In what way is regression covered in the course?', '5. What are the teachings of the course regarding regression?']\n"
|
||||
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer regarding regression?', '4. In what way is regression covered in the course?', \"5. What are the course's teachings on regression?\"]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -228,7 +278,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# How to add scores to retriever results\n",
|
||||
"\n",
|
||||
"Retrievers will return sequences of [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) objects, which by default include no information about the process that retrieved them (e.g., a similarity score against a query). Here we demonstrate how to add retrieval scores to the `.metadata` of documents:\n",
|
||||
"Retrievers will return sequences of [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects, which by default include no information about the process that retrieved them (e.g., a similarity score against a query). Here we demonstrate how to add retrieval scores to the `.metadata` of documents:\n",
|
||||
"1. From [vectorstore retrievers](/docs/how_to/vectorstore_retriever);\n",
|
||||
"2. From higher-order LangChain retrievers, such as [SelfQueryRetriever](/docs/how_to/self_query) or [MultiVectorRetriever](/docs/how_to/multi_vector).\n",
|
||||
"\n",
|
||||
@@ -15,7 +15,7 @@
|
||||
"\n",
|
||||
"## Create vector store\n",
|
||||
"\n",
|
||||
"First we populate a vector store with some data. We will use a [PineconeVectorStore](https://api.python.langchain.com/en/latest/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html), but this guide is compatible with any LangChain vector store that implements a `.similarity_search_with_score` method."
|
||||
"First we populate a vector store with some data. We will use a [PineconeVectorStore](https://python.langchain.com/api_reference/pinecone/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html), but this guide is compatible with any LangChain vector store that implements a `.similarity_search_with_score` method."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -263,7 +263,7 @@
|
||||
"\n",
|
||||
"To propagate similarity scores through this retriever, we can again subclass `MultiVectorRetriever` and override a method. This time we will override `_get_relevant_documents`.\n",
|
||||
"\n",
|
||||
"First, we prepare some fake data. We generate fake \"whole documents\" and store them in a document store; here we will use a simple [InMemoryStore](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.InMemoryBaseStore.html)."
|
||||
"First, we prepare some fake data. We generate fake \"whole documents\" and store them in a document store; here we will use a simple [InMemoryStore](https://python.langchain.com/api_reference/core/stores/langchain_core.stores.InMemoryBaseStore.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -461,7 +461,7 @@
|
||||
"id": "f8014c9d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, we can initalize the agent with the LLM, the prompt, and the tools. The agent is responsible for taking in input and deciding what actions to take. Crucially, the Agent does not execute those actions - that is done by the AgentExecutor (next step). For more information about how to think about these components, see our [conceptual guide](/docs/concepts/#agents).\n",
|
||||
"Now, we can initialize the agent with the LLM, the prompt, and the tools. The agent is responsible for taking in input and deciding what actions to take. Crucially, the Agent does not execute those actions - that is done by the AgentExecutor (next step). For more information about how to think about these components, see our [conceptual guide](/docs/concepts/#agents).\n",
|
||||
"\n",
|
||||
"Note that we are passing in the `model`, not `model_with_tools`. That is because `create_tool_calling_agent` will call `.bind_tools` for us under the hood."
|
||||
]
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"An alternate way of [passing data through](/docs/how_to/passthrough) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html#langchain_core.runnables.passthrough.RunnablePassthrough.assign) static method takes an input value and adds the extra arguments passed to the assign function.\n",
|
||||
"An alternate way of [passing data through](/docs/how_to/passthrough) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html#langchain_core.runnables.passthrough.RunnablePassthrough.assign) static method takes an input value and adds the extra arguments passed to the assign function.\n",
|
||||
"\n",
|
||||
"This is useful in the common [LangChain Expression Language](/docs/concepts/#langchain-expression-language) pattern of additively creating a dictionary to use as input to a later step.\n",
|
||||
"\n",
|
||||
@@ -45,7 +45,8 @@
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Sometimes we want to invoke a [`Runnable`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) within a [RunnableSequence](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSequence.html) with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use the [`Runnable.bind()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.bind) method to set these arguments ahead of time.\n",
|
||||
"Sometimes we want to invoke a [`Runnable`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html) within a [RunnableSequence](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableSequence.html) with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use the [`Runnable.bind()`](https://python.langchain.com/api_reference/langchain_core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.bind) method to set these arguments ahead of time.\n",
|
||||
"\n",
|
||||
"## Binding stop sequences\n",
|
||||
"\n",
|
||||
@@ -49,7 +49,8 @@
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -183,7 +184,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_z0OU2CytqENVrRTI6T8DkI3u', 'function': {'arguments': '{\"location\": \"San Francisco, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw', 'function': {'arguments': '{\"location\": \"New York, NY\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH', 'function': {'arguments': '{\"location\": \"Los Angeles, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 84, 'prompt_tokens': 85, 'total_tokens': 169}, 'model_name': 'gpt-3.5-turbo-1106', 'system_fingerprint': 'fp_77a673219d', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d57ad5fa-b52a-4822-bc3e-74f838697e18-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco, CA', 'unit': 'celsius'}, 'id': 'call_z0OU2CytqENVrRTI6T8DkI3u'}, {'name': 'get_current_weather', 'args': {'location': 'New York, NY', 'unit': 'celsius'}, 'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw'}, {'name': 'get_current_weather', 'args': {'location': 'Los Angeles, CA', 'unit': 'celsius'}, 'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH'}])"
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_z0OU2CytqENVrRTI6T8DkI3u', 'function': {'arguments': '{\"location\": \"San Francisco, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw', 'function': {'arguments': '{\"location\": \"New York, NY\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH', 'function': {'arguments': '{\"location\": \"Los Angeles, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 84, 'prompt_tokens': 85, 'total_tokens': 169}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': 'fp_77a673219d', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d57ad5fa-b52a-4822-bc3e-74f838697e18-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco, CA', 'unit': 'celsius'}, 'id': 'call_z0OU2CytqENVrRTI6T8DkI3u'}, {'name': 'get_current_weather', 'args': {'location': 'New York, NY', 'unit': 'celsius'}, 'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw'}, {'name': 'get_current_weather', 'args': {'location': 'Los Angeles, CA', 'unit': 'celsius'}, 'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH'}])"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -192,7 +193,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\").bind(tools=tools)\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o-mini\").bind(tools=tools)\n",
|
||||
"model.invoke(\"What's the weather in SF, NYC and LA?\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the event.\n",
|
||||
"If you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the event.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::{.callout-warning}\n",
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.with_config()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method. This saves you the need to pass callbacks in each time you invoke the chain.\n",
|
||||
"If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.with_config()`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method. This saves you the need to pass callbacks in each time you invoke the chain.\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"\n",
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n",
|
||||
"In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n",
|
||||
"\n",
|
||||
"This prevents us from having to manually attach the handlers to each individual nested object. Here's an example:"
|
||||
]
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"\n",
|
||||
"To obtain the string content directly, use `.split_text`.\n",
|
||||
"\n",
|
||||
"To create LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) objects (e.g., for use in downstream tasks), use `.create_documents`."
|
||||
"To create LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects (e.g., for use in downstream tasks), use `.create_documents`."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -50,7 +50,8 @@
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI()"
|
||||
]
|
||||
@@ -63,7 +64,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# <!-- ruff: noqa: F821 -->\n",
|
||||
"from langchain.globals import set_llm_cache"
|
||||
"from langchain_core.globals import set_llm_cache"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -103,7 +104,7 @@
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"from langchain.cache import InMemoryCache\n",
|
||||
"from langchain_core.caches import InMemoryCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(InMemoryCache())\n",
|
||||
"\n",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"\n",
|
||||
":::tip Supported models\n",
|
||||
"\n",
|
||||
"See the [init_chat_model()](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) API reference for a full list of supported integrations.\n",
|
||||
"See the [init_chat_model()](https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html) API reference for a full list of supported integrations.\n",
|
||||
"\n",
|
||||
"Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n",
|
||||
"\n",
|
||||
@@ -26,10 +26,32 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "165b0de6-9ae3-4e3d-aa98-4fc8a97c4a06",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:22:32.858670Z",
|
||||
"iopub.status.busy": "2024-09-10T20:22:32.858278Z",
|
||||
"iopub.status.idle": "2024-09-10T20:22:33.009452Z",
|
||||
"shell.execute_reply": "2024-09-10T20:22:33.007022Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"zsh:1: 0.2.8 not found\r\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain>=0.2.8 langchain-openai langchain-anthropic langchain-google-vertexai"
|
||||
]
|
||||
@@ -44,19 +66,48 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 2,
|
||||
"id": "79e14913-803c-4382-9009-5c6af3d75d35",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:22:33.015729Z",
|
||||
"iopub.status.busy": "2024-09-10T20:22:33.015241Z",
|
||||
"iopub.status.idle": "2024-09-10T20:22:39.391716Z",
|
||||
"shell.execute_reply": "2024-09-10T20:22:39.390438Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/var/folders/4j/2rz3865x6qg07tx43146py8h0000gn/T/ipykernel_95293/571506279.py:4: LangChainBetaWarning: The function `init_chat_model` is in beta. It is actively being worked on, so the API may change.\n",
|
||||
" gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. You can call me Assistant! How can I help you today?\n",
|
||||
"\n",
|
||||
"Claude Opus: My name is Claude. It's nice to meet you!\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Gemini 1.5: I am a large language model, trained by Google. \n",
|
||||
"\n",
|
||||
"Gemini 1.5: I am a large language model, trained by Google. I do not have a name. \n",
|
||||
"I don't have a name like a person does. You can call me Bard if you like! 😊 \n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
@@ -89,14 +140,21 @@
|
||||
"source": [
|
||||
"## Inferring model provider\n",
|
||||
"\n",
|
||||
"For common and distinct model names `init_chat_model()` will attempt to infer the model provider. See the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) for a full list of inference behavior. E.g. any model that starts with `gpt-3...` or `gpt-4...` will be inferred as using model provider `openai`."
|
||||
"For common and distinct model names `init_chat_model()` will attempt to infer the model provider. See the [API reference](https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html) for a full list of inference behavior. E.g. any model that starts with `gpt-3...` or `gpt-4...` will be inferred as using model provider `openai`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"id": "0378ccc6-95bc-4d50-be50-fccc193f0a71",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:22:39.396908Z",
|
||||
"iopub.status.busy": "2024-09-10T20:22:39.396563Z",
|
||||
"iopub.status.idle": "2024-09-10T20:22:39.444959Z",
|
||||
"shell.execute_reply": "2024-09-10T20:22:39.444646Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"gpt_4o = init_chat_model(\"gpt-4o\", temperature=0)\n",
|
||||
@@ -116,17 +174,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"id": "6c037f27-12d7-4e83-811e-4245c0e3ba58",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:22:39.446901Z",
|
||||
"iopub.status.busy": "2024-09-10T20:22:39.446773Z",
|
||||
"iopub.status.idle": "2024-09-10T20:22:40.301906Z",
|
||||
"shell.execute_reply": "2024-09-10T20:22:40.300918Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_d576307f90', 'finish_reason': 'stop', 'logprobs': None}, id='run-5428ab5c-b5c0-46de-9946-5d4ca40dbdc8-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
|
||||
"AIMessage(content=\"I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 11, 'total_tokens': 34}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_25624ae3a5', 'finish_reason': 'stop', 'logprobs': None}, id='run-b41df187-4627-490d-af3c-1c96282d3eb0-0', usage_metadata={'input_tokens': 11, 'output_tokens': 23, 'total_tokens': 34})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -141,17 +206,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"id": "321e3036-abd2-4e1f-bcc6-606efd036954",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:22:40.316030Z",
|
||||
"iopub.status.busy": "2024-09-10T20:22:40.315628Z",
|
||||
"iopub.status.idle": "2024-09-10T20:22:41.199134Z",
|
||||
"shell.execute_reply": "2024-09-10T20:22:41.198173Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_012XvotUJ3kGLXJUWKBVxJUi', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-1ad1eefe-f1c6-4244-8bc6-90e2cb7ee554-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
|
||||
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", additional_kwargs={}, response_metadata={'id': 'msg_01Fx9P74A7syoFkwE73CdMMY', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-a0fd2bbd-3b7e-46bf-8d69-a48c7e60b03c-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -174,17 +246,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 6,
|
||||
"id": "814a2289-d0db-401e-b555-d5116112b413",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:22:41.203346Z",
|
||||
"iopub.status.busy": "2024-09-10T20:22:41.203004Z",
|
||||
"iopub.status.idle": "2024-09-10T20:22:41.891450Z",
|
||||
"shell.execute_reply": "2024-09-10T20:22:41.890539Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_ce0793330f', 'finish_reason': 'stop', 'logprobs': None}, id='run-3923e328-7715-4cd6-b215-98e4b6bf7c9d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
|
||||
"AIMessage(content=\"I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 11, 'total_tokens': 34}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_25624ae3a5', 'finish_reason': 'stop', 'logprobs': None}, id='run-3380f977-4b89-4f44-bc02-b64043b3166f-0', usage_metadata={'input_tokens': 11, 'output_tokens': 23, 'total_tokens': 34})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -202,17 +281,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 7,
|
||||
"id": "6c8755ba-c001-4f5a-a497-be3f1db83244",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:22:41.896413Z",
|
||||
"iopub.status.busy": "2024-09-10T20:22:41.895967Z",
|
||||
"iopub.status.idle": "2024-09-10T20:22:42.767565Z",
|
||||
"shell.execute_reply": "2024-09-10T20:22:42.766619Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_01RyYR64DoMPNCfHeNnroMXm', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-22446159-3723-43e6-88df-b84797e7751d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
|
||||
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", additional_kwargs={}, response_metadata={'id': 'msg_01EFKSWpmsn2PSYPQa4cNHWb', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-3c58f47c-41b9-4e56-92e7-fb9602e3787c-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -242,28 +328,37 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"id": "067dabee-1050-4110-ae24-c48eba01e13b",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:22:42.771941Z",
|
||||
"iopub.status.busy": "2024-09-10T20:22:42.771606Z",
|
||||
"iopub.status.idle": "2024-09-10T20:22:43.909206Z",
|
||||
"shell.execute_reply": "2024-09-10T20:22:43.908496Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'Los Angeles, CA'},\n",
|
||||
" 'id': 'call_sYT3PFMufHGWJD32Hi2CTNUP'},\n",
|
||||
" 'id': 'call_Ga9m8FAArIyEjItHmztPYA22',\n",
|
||||
" 'type': 'tool_call'},\n",
|
||||
" {'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'New York, NY'},\n",
|
||||
" 'id': 'call_j1qjhxRnD3ffQmRyqjlI1Lnk'}]"
|
||||
" 'id': 'call_jh2dEvBaAHRaw5JUDthOs7rt',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetWeather(BaseModel):\n",
|
||||
@@ -288,22 +383,31 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 9,
|
||||
"id": "e57dfe9f-cd24-4e37-9ce9-ccf8daf78f89",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:22:43.912746Z",
|
||||
"iopub.status.busy": "2024-09-10T20:22:43.912447Z",
|
||||
"iopub.status.idle": "2024-09-10T20:22:46.437049Z",
|
||||
"shell.execute_reply": "2024-09-10T20:22:46.436093Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'Los Angeles, CA'},\n",
|
||||
" 'id': 'toolu_01CxEHxKtVbLBrvzFS7GQ5xR'},\n",
|
||||
" 'id': 'toolu_01JMufPf4F4t2zLj7miFeqXp',\n",
|
||||
" 'type': 'tool_call'},\n",
|
||||
" {'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'New York City, NY'},\n",
|
||||
" 'id': 'toolu_013A79qt5toWSsKunFBDZd5S'}]"
|
||||
" 'id': 'toolu_01RQBHcE8kEEbYTuuS8WqY1u',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"# How to stream chat model responses\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"All [chat models](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) implement the [Runnable interface](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable), which comes with a **default** implementations of standard runnable methods (i.e. `ainvoke`, `batch`, `abatch`, `stream`, `astream`, `astream_events`).\n",
|
||||
"All [chat models](https://python.langchain.com/api_reference/core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) implement the [Runnable interface](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable), which comes with a **default** implementations of standard runnable methods (i.e. `ainvoke`, `batch`, `abatch`, `stream`, `astream`, `astream_events`).\n",
|
||||
"\n",
|
||||
"The **default** streaming implementation provides an`Iterator` (or `AsyncIterator` for asynchronous streaming) that yields a single value: the final output from the underlying chat model provider.\n",
|
||||
"\n",
|
||||
@@ -120,7 +120,7 @@
|
||||
"source": [
|
||||
"## Astream events\n",
|
||||
"\n",
|
||||
"Chat models also support the standard [astream events](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events) method.\n",
|
||||
"Chat models also support the standard [astream events](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events) method.\n",
|
||||
"\n",
|
||||
"This method is useful if you're streaming output from a larger LLM application that contains multiple steps (e.g., an LLM chain composed of a prompt, llm and parser)."
|
||||
]
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
"\n",
|
||||
"A number of model providers return token usage information as part of the chat generation response. When available, this information will be included on the `AIMessage` objects produced by the corresponding model.\n",
|
||||
"\n",
|
||||
"LangChain `AIMessage` objects include a [usage_metadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.usage_metadata) attribute. When populated, this attribute will be a [UsageMetadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.UsageMetadata.html) dictionary with standard keys (e.g., `\"input_tokens\"` and `\"output_tokens\"`).\n",
|
||||
"LangChain `AIMessage` objects include a [usage_metadata](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.usage_metadata) attribute. When populated, this attribute will be a [UsageMetadata](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html) dictionary with standard keys (e.g., `\"input_tokens\"` and `\"output_tokens\"`).\n",
|
||||
"\n",
|
||||
"Examples:\n",
|
||||
"\n",
|
||||
@@ -71,7 +71,7 @@
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"openai_response = llm.invoke(\"hello\")\n",
|
||||
"openai_response.usage_metadata"
|
||||
]
|
||||
@@ -118,7 +118,7 @@
|
||||
"source": [
|
||||
"### Using AIMessage.response_metadata\n",
|
||||
"\n",
|
||||
"Metadata from the model response is also included in the AIMessage [response_metadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.response_metadata) attribute. These data are typically not standardized. Note that different providers adopt different conventions for representing token counts:"
|
||||
"Metadata from the model response is also included in the AIMessage [response_metadata](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.response_metadata) attribute. These data are typically not standardized. Note that different providers adopt different conventions for representing token counts:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -153,7 +153,7 @@
|
||||
"\n",
|
||||
"#### OpenAI\n",
|
||||
"\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.9` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
|
||||
"For example, OpenAI will return a message [chunk](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.9` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
@@ -182,13 +182,13 @@
|
||||
"content=' you' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' today' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='?' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-4o-mini'} id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"aggregate = None\n",
|
||||
"for chunk in llm.stream(\"hello\", stream_usage=True):\n",
|
||||
@@ -252,7 +252,7 @@
|
||||
"content=' you' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' today' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content='?' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-8e758550-94b0-4cca-a298-57482793c25d'\n"
|
||||
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-4o-mini'} id='run-8e758550-94b0-4cca-a298-57482793c25d'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -289,7 +289,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Joke(BaseModel):\n",
|
||||
@@ -300,7 +300,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-3.5-turbo-0125\",\n",
|
||||
" model=\"gpt-4o-mini\",\n",
|
||||
" stream_usage=True,\n",
|
||||
")\n",
|
||||
"# Under the hood, .with_structured_output binds tools to the\n",
|
||||
@@ -362,7 +362,7 @@
|
||||
"from langchain_community.callbacks.manager import get_openai_callback\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-3.5-turbo-0125\",\n",
|
||||
" model=\"gpt-4o-mini\",\n",
|
||||
" temperature=0,\n",
|
||||
" stream_usage=True,\n",
|
||||
")\n",
|
||||
|
||||
@@ -77,7 +77,7 @@
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")"
|
||||
"chat = ChatOpenAI(model=\"gpt-4o-mini\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -140,7 +140,7 @@
|
||||
"\n",
|
||||
"## Chat history\n",
|
||||
"\n",
|
||||
"It's perfectly fine to store and pass messages directly as an array, but we can use LangChain's built-in [message history class](https://api.python.langchain.com/en/latest/langchain_api_reference.html#module-langchain.memory) to store and load messages as well. Instances of this class are responsible for storing and loading chat messages from persistent storage. LangChain integrates with many providers - you can see a [list of integrations here](/docs/integrations/memory) - but for this demo we will use an ephemeral demo class.\n",
|
||||
"It's perfectly fine to store and pass messages directly as an array, but we can use LangChain's built-in [message history class](https://python.langchain.com/api_reference/langchain/index.html#module-langchain.memory) to store and load messages as well. Instances of this class are responsible for storing and loading chat messages from persistent storage. LangChain integrates with many providers - you can see a [list of integrations here](/docs/integrations/memory) - but for this demo we will use an ephemeral demo class.\n",
|
||||
"\n",
|
||||
"Here's an example of the API:"
|
||||
]
|
||||
@@ -191,7 +191,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})"
|
||||
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -312,7 +312,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})"
|
||||
"AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -342,7 +342,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})"
|
||||
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
@@ -421,7 +421,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})"
|
||||
"AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
@@ -501,7 +501,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})"
|
||||
"AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
@@ -529,9 +529,9 @@
|
||||
" HumanMessage(content='How are you today?'),\n",
|
||||
" AIMessage(content='Fine thanks!'),\n",
|
||||
" HumanMessage(content=\"What's my name?\"),\n",
|
||||
" AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n",
|
||||
" AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n",
|
||||
" HumanMessage(content='Where does P. Sherman live?'),\n",
|
||||
" AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]"
|
||||
" AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
@@ -565,7 +565,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})"
|
||||
"AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
|
||||
@@ -71,7 +71,7 @@
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0.2)"
|
||||
"chat = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0.2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
"\n",
|
||||
"# Choose the LLM that will drive the agent\n",
|
||||
"# Only certain models support this\n",
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0)"
|
||||
"chat = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# How to split code\n",
|
||||
"\n",
|
||||
"[RecursiveCharacterTextSplitter](https://api.python.langchain.com/en/latest/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) includes pre-built lists of separators that are useful for splitting text in a specific programming language.\n",
|
||||
"[RecursiveCharacterTextSplitter](https://python.langchain.com/api_reference/text_splitters/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) includes pre-built lists of separators that are useful for splitting text in a specific programming language.\n",
|
||||
"\n",
|
||||
"Supported languages are stored in the `langchain_text_splitters.Language` enum. They include:\n",
|
||||
"\n",
|
||||
|
||||
@@ -58,7 +58,8 @@
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -99,7 +100,7 @@
|
||||
"id": "b0f74589",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Above, we defined `temperature` as a [`ConfigurableField`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.utils.ConfigurableField.html#langchain_core.runnables.utils.ConfigurableField) that we can set at runtime. To do so, we use the [`with_config`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method like this:"
|
||||
"Above, we defined `temperature` as a [`ConfigurableField`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.utils.ConfigurableField.html#langchain_core.runnables.utils.ConfigurableField) that we can set at runtime. To do so, we use the [`with_config`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -281,7 +282,8 @@
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass()"
|
||||
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -227,7 +227,7 @@
|
||||
"source": [
|
||||
"### `LLMListwiseRerank`\n",
|
||||
"\n",
|
||||
"[LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html) uses [zero-shot listwise document reranking](https://arxiv.org/pdf/2305.02156) and functions similarly to `LLMChainFilter` as a robust but more expensive option. It is recommended to use a more powerful LLM.\n",
|
||||
"[LLMListwiseRerank](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html) uses [zero-shot listwise document reranking](https://arxiv.org/pdf/2305.02156) and functions similarly to `LLMChainFilter` as a robust but more expensive option. It is recommended to use a more powerful LLM.\n",
|
||||
"\n",
|
||||
"Note that `LLMListwiseRerank` requires a model with the [with_structured_output](/docs/integrations/chat/) method implemented."
|
||||
]
|
||||
@@ -258,7 +258,7 @@
|
||||
"from langchain.retrievers.document_compressors import LLMListwiseRerank\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
|
||||
"\n",
|
||||
"_filter = LLMListwiseRerank.from_llm(llm, top_n=1)\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
|
||||
@@ -42,13 +42,13 @@
|
||||
"source": [
|
||||
"LangChain [tools](/docs/concepts#tools) are interfaces that an agent, chain, or chat model can use to interact with the world. See [here](/docs/how_to/#tools) for how-to guides covering tool-calling, built-in tools, custom tools, and more information.\n",
|
||||
"\n",
|
||||
"LangChain tools-- instances of [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html)-- are [Runnables](/docs/concepts/#runnable-interface) with additional constraints that enable them to be invoked effectively by language models:\n",
|
||||
"LangChain tools-- instances of [BaseTool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.BaseTool.html)-- are [Runnables](/docs/concepts/#runnable-interface) with additional constraints that enable them to be invoked effectively by language models:\n",
|
||||
"\n",
|
||||
"- Their inputs are constrained to be serializable, specifically strings and Python `dict` objects;\n",
|
||||
"- They contain names and descriptions indicating how and when they should be used;\n",
|
||||
"- They may contain a detailed [args_schema](https://python.langchain.com/v0.2/docs/how_to/custom_tools/) for their arguments. That is, while a tool (as a `Runnable`) might accept a single `dict` input, the specific keys and type information needed to populate a dict should be specified in the `args_schema`.\n",
|
||||
"- They may contain a detailed [args_schema](https://python.langchain.com/docs/how_to/custom_tools/) for their arguments. That is, while a tool (as a `Runnable`) might accept a single `dict` input, the specific keys and type information needed to populate a dict should be specified in the `args_schema`.\n",
|
||||
"\n",
|
||||
"Runnables that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments."
|
||||
"Runnables that accept string or `dict` input can be converted to tools using the [as_tool](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -180,7 +180,7 @@
|
||||
"id": "32b1a992-8997-4c98-8eb2-c9fe9431b799",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, the schema can be fully specified by directly passing the desired [args_schema](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool.args_schema) for the tool:"
|
||||
"Alternatively, the schema can be fully specified by directly passing the desired [args_schema](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool.args_schema) for the tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -190,7 +190,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GSchema(BaseModel):\n",
|
||||
@@ -285,7 +285,7 @@
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -331,7 +331,7 @@
|
||||
"id": "9ba737ac-43a2-4a6f-b855-5bd0305017f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We next create use a simple pre-built [LangGraph agent](https://python.langchain.com/v0.2/docs/tutorials/agents/) and provide it the tool:"
|
||||
"We next create use a simple pre-built [LangGraph agent](https://python.langchain.com/docs/tutorials/agents/) and provide it the tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -362,11 +362,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD', 'function': {'arguments': '{\"__arg1\":\"dogs\"}', 'name': 'pet_info_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 60, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d7f81de9-1fb7-4caf-81ed-16dcdb0b2ab4-0', tool_calls=[{'name': 'pet_info_retriever', 'args': {'__arg1': 'dogs'}, 'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD'}], usage_metadata={'input_tokens': 60, 'output_tokens': 19, 'total_tokens': 79})]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD', 'function': {'arguments': '{\"__arg1\":\"dogs\"}', 'name': 'pet_info_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 60, 'total_tokens': 79}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d7f81de9-1fb7-4caf-81ed-16dcdb0b2ab4-0', tool_calls=[{'name': 'pet_info_retriever', 'args': {'__arg1': 'dogs'}, 'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD'}], usage_metadata={'input_tokens': 60, 'output_tokens': 19, 'total_tokens': 79})]}}\n",
|
||||
"----\n",
|
||||
"{'tools': {'messages': [ToolMessage(content=\"[Document(id='86f835fe-4bbe-4ec6-aeb4-489a8b541707', page_content='Dogs are great companions, known for their loyalty and friendliness.')]\", name='pet_info_retriever', tool_call_id='call_W8cnfOjwqEn4cFcg19LN9mYD')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Dogs are known for being great companions, known for their loyalty and friendliness.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 134, 'total_tokens': 152}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ca5847a-a5eb-44c0-a774-84cc2c5bbc5b-0', usage_metadata={'input_tokens': 134, 'output_tokens': 18, 'total_tokens': 152})]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Dogs are known for being great companions, known for their loyalty and friendliness.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 134, 'total_tokens': 152}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ca5847a-a5eb-44c0-a774-84cc2c5bbc5b-0', usage_metadata={'input_tokens': 134, 'output_tokens': 18, 'total_tokens': 152})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@@ -497,11 +497,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_17iLPWvOD23zqwd1QVQ00Y63', 'function': {'arguments': '{\"question\":\"What are dogs known for according to pirates?\",\"answer_style\":\"quote\"}', 'name': 'pet_expert'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 59, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7fef44f3-7bba-4e63-8c51-2ad9c5e65e2e-0', tool_calls=[{'name': 'pet_expert', 'args': {'question': 'What are dogs known for according to pirates?', 'answer_style': 'quote'}, 'id': 'call_17iLPWvOD23zqwd1QVQ00Y63'}], usage_metadata={'input_tokens': 59, 'output_tokens': 28, 'total_tokens': 87})]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_17iLPWvOD23zqwd1QVQ00Y63', 'function': {'arguments': '{\"question\":\"What are dogs known for according to pirates?\",\"answer_style\":\"quote\"}', 'name': 'pet_expert'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 59, 'total_tokens': 87}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7fef44f3-7bba-4e63-8c51-2ad9c5e65e2e-0', tool_calls=[{'name': 'pet_expert', 'args': {'question': 'What are dogs known for according to pirates?', 'answer_style': 'quote'}, 'id': 'call_17iLPWvOD23zqwd1QVQ00Y63'}], usage_metadata={'input_tokens': 59, 'output_tokens': 28, 'total_tokens': 87})]}}\n",
|
||||
"----\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='\"Dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.\"', name='pet_expert', tool_call_id='call_17iLPWvOD23zqwd1QVQ00Y63')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='According to pirates, dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 119, 'total_tokens': 146}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a30edc3-7be0-4743-b980-ca2f8cad9b8d-0', usage_metadata={'input_tokens': 119, 'output_tokens': 27, 'total_tokens': 146})]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='According to pirates, dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 119, 'total_tokens': 146}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a30edc3-7be0-4743-b980-ca2f8cad9b8d-0', usage_metadata={'input_tokens': 119, 'output_tokens': 27, 'total_tokens': 146})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"LangChain has some built-in callback handlers, but you will often want to create your own handlers with custom logic.\n",
|
||||
"\n",
|
||||
"To create a custom callback handler, we need to determine the [event(s)](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n",
|
||||
"To create a custom callback handler, we need to determine the [event(s)](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n",
|
||||
"\n",
|
||||
"In the example below, we'll implement streaming with a custom handler.\n",
|
||||
"\n",
|
||||
@@ -107,7 +107,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can see [this reference page](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) for a list of events you can handle. Note that the `handle_chain_*` events run for most LCEL runnables.\n",
|
||||
"You can see [this reference page](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) for a list of events you can handle. Note that the `handle_chain_*` events run for most LCEL runnables.\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"In this guide, we'll learn how to create a custom chat model using LangChain abstractions.\n",
|
||||
"\n",
|
||||
"Wrapping your LLM with the standard [`BaseChatModel`](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) interface allow you to use your LLM in existing LangChain programs with minimal code modifications!\n",
|
||||
"Wrapping your LLM with the standard [`BaseChatModel`](https://python.langchain.com/api_reference/core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) interface allow you to use your LLM in existing LangChain programs with minimal code modifications!\n",
|
||||
"\n",
|
||||
"As an bonus, your LLM will automatically become a LangChain `Runnable` and will benefit from some optimizations out of the box (e.g., batch via a threadpool), async support, the `astream_events` API, etc.\n",
|
||||
"\n",
|
||||
@@ -503,7 +503,7 @@
|
||||
"\n",
|
||||
"Documentation:\n",
|
||||
"\n",
|
||||
"* The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://api.python.langchain.com/en/stable/langchain_api_reference.html).\n",
|
||||
"* The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://python.langchain.com/api_reference/langchain/index.html).\n",
|
||||
"* The class doc-string for the model contains a link to the model API if the model is powered by a service.\n",
|
||||
"\n",
|
||||
"Tests:\n",
|
||||
|
||||
@@ -402,7 +402,7 @@
|
||||
"\n",
|
||||
"Documentation:\n",
|
||||
"\n",
|
||||
"* The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://api.python.langchain.com/en/stable/langchain_api_reference.html).\n",
|
||||
"* The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://python.langchain.com/api_reference/langchain/index.html).\n",
|
||||
"* The class doc-string for the model contains a link to the model API if the model is powered by a service.\n",
|
||||
"\n",
|
||||
"Tests:\n",
|
||||
|
||||
@@ -270,7 +270,7 @@
|
||||
"\n",
|
||||
"Documentation:\n",
|
||||
"\n",
|
||||
"* The retriever contains doc-strings for all initialization arguments, as these will be surfaced in the [API Reference](https://api.python.langchain.com/en/stable/langchain_api_reference.html).\n",
|
||||
"* The retriever contains doc-strings for all initialization arguments, as these will be surfaced in the [API Reference](https://python.langchain.com/api_reference/langchain/index.html).\n",
|
||||
"* The class doc-string for the model contains a link to any relevant APIs used for the retriever (e.g., if the retriever is retrieving from wikipedia, it'll be good to link to the wikipedia API!)\n",
|
||||
"\n",
|
||||
"Tests:\n",
|
||||
|
||||
@@ -9,20 +9,20 @@
|
||||
"\n",
|
||||
"When constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components:\n",
|
||||
"\n",
|
||||
"| Attribute | Type | Description |\n",
|
||||
"|-----------------|---------------------------|------------------------------------------------------------------------------------------------------------------|\n",
|
||||
"| name | str | Must be unique within a set of tools provided to an LLM or agent. |\n",
|
||||
"| description | str | Describes what the tool does. Used as context by the LLM or agent. |\n",
|
||||
"| args_schema | Pydantic BaseModel | Optional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters |\n",
|
||||
"| return_direct | boolean | Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. |\n",
|
||||
"| Attribute | Type | Description |\n",
|
||||
"|---------------|---------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n",
|
||||
"| name | str | Must be unique within a set of tools provided to an LLM or agent. |\n",
|
||||
"| description | str | Describes what the tool does. Used as context by the LLM or agent. |\n",
|
||||
"| args_schema | pydantic.BaseModel | Optional but recommended, and required if using callback handlers. It can be used to provide more information (e.g., few-shot examples) or validation for expected parameters. |\n",
|
||||
"| return_direct | boolean | Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. |\n",
|
||||
"\n",
|
||||
"LangChain supports the creation of tools from:\n",
|
||||
"\n",
|
||||
"1. Functions;\n",
|
||||
"2. LangChain [Runnables](/docs/concepts#runnable-interface);\n",
|
||||
"3. By sub-classing from [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
|
||||
"3. By sub-classing from [BaseTool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
|
||||
"\n",
|
||||
"Creating tools from functions may be sufficient for most use cases, and can be done via a simple [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool). If more configuration is needed-- e.g., specification of both sync and async implementations-- one can also use the [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method.\n",
|
||||
"Creating tools from functions may be sufficient for most use cases, and can be done via a simple [@tool decorator](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.tool.html#langchain_core.tools.tool). If more configuration is needed-- e.g., specification of both sync and async implementations-- one can also use the [StructuredTool.from_function](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method.\n",
|
||||
"\n",
|
||||
"In this guide we provide an overview of these methods.\n",
|
||||
"\n",
|
||||
@@ -48,7 +48,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cc7005cd-072f-4d37-8453-6297468e5192",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:52.645451Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:52.645081Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.030958Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.030669Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -88,7 +95,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "0c0991db-b997-4611-be37-4346e660506b",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.032544Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.032420Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.035349Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.035123Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
@@ -112,22 +126,29 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "5626423f-053e-4a66-adca-1d794d835397",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.036658Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.036574Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.041154Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.040964Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'multiply_by_maxSchema',\n",
|
||||
" 'description': 'Multiply a by the maximum of b.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'a': {'title': 'A',\n",
|
||||
" 'description': 'scale factor',\n",
|
||||
"{'description': 'Multiply a by the maximum of b.',\n",
|
||||
" 'properties': {'a': {'description': 'scale factor',\n",
|
||||
" 'title': 'A',\n",
|
||||
" 'type': 'string'},\n",
|
||||
" 'b': {'title': 'B',\n",
|
||||
" 'description': 'list of ints over which to take maximum',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'integer'}}},\n",
|
||||
" 'required': ['a', 'b']}"
|
||||
" 'b': {'description': 'list of ints over which to take maximum',\n",
|
||||
" 'items': {'type': 'integer'},\n",
|
||||
" 'title': 'B',\n",
|
||||
" 'type': 'array'}},\n",
|
||||
" 'required': ['a', 'b'],\n",
|
||||
" 'title': 'multiply_by_maxSchema',\n",
|
||||
" 'type': 'object'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
@@ -163,7 +184,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9216d03a-f6ea-4216-b7e1-0661823a4c0b",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.042516Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.042427Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.045217Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.045010Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -171,13 +199,13 @@
|
||||
"text": [
|
||||
"multiplication-tool\n",
|
||||
"Multiply two numbers.\n",
|
||||
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
|
||||
"{'a': {'description': 'first number', 'title': 'A', 'type': 'integer'}, 'b': {'description': 'second number', 'title': 'B', 'type': 'integer'}}\n",
|
||||
"True\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CalculatorInput(BaseModel):\n",
|
||||
@@ -218,19 +246,26 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "336f5538-956e-47d5-9bde-b732559f9e61",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.046526Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.046456Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.050045Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.049836Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'fooSchema',\n",
|
||||
" 'description': 'The foo.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'bar': {'title': 'Bar',\n",
|
||||
" 'description': 'The bar.',\n",
|
||||
"{'description': 'The foo.',\n",
|
||||
" 'properties': {'bar': {'description': 'The bar.',\n",
|
||||
" 'title': 'Bar',\n",
|
||||
" 'type': 'string'},\n",
|
||||
" 'baz': {'title': 'Baz', 'description': 'The baz.', 'type': 'integer'}},\n",
|
||||
" 'required': ['bar', 'baz']}"
|
||||
" 'baz': {'description': 'The baz.', 'title': 'Baz', 'type': 'integer'}},\n",
|
||||
" 'required': ['bar', 'baz'],\n",
|
||||
" 'title': 'fooSchema',\n",
|
||||
" 'type': 'object'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -259,7 +294,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::{.callout-caution}\n",
|
||||
"By default, `@tool(parse_docstring=True)` will raise `ValueError` if the docstring does not parse correctly. See [API Reference](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) for detail and examples.\n",
|
||||
"By default, `@tool(parse_docstring=True)` will raise `ValueError` if the docstring does not parse correctly. See [API Reference](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.tool.html) for detail and examples.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -277,7 +312,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "564fbe6f-11df-402d-b135-ef6ff25e1e63",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.051302Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.051218Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.059704Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.059490Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -320,7 +362,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "6bc055d4-1fbe-4db5-8881-9c382eba6b1b",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.060971Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.060883Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.064615Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.064408Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -329,7 +378,7 @@
|
||||
"6\n",
|
||||
"Calculator\n",
|
||||
"multiply numbers\n",
|
||||
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
|
||||
"{'a': {'description': 'first number', 'title': 'A', 'type': 'integer'}, 'b': {'description': 'second number', 'title': 'B', 'type': 'integer'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -366,24 +415,39 @@
|
||||
"source": [
|
||||
"## Creating tools from Runnables\n",
|
||||
"\n",
|
||||
"LangChain [Runnables](/docs/concepts#runnable-interface) that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n",
|
||||
"LangChain [Runnables](/docs/concepts#runnable-interface) that accept string or `dict` input can be converted to tools using the [as_tool](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n",
|
||||
"\n",
|
||||
"Example usage:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 8,
|
||||
"id": "8ef593c5-cf72-4c10-bfc9-7d21874a0c24",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.065797Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.065733Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.130458Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.130229Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/var/folders/4j/2rz3865x6qg07tx43146py8h0000gn/T/ipykernel_95770/2548361071.py:14: LangChainBetaWarning: This API is in beta and may change in the future.\n",
|
||||
" as_tool = chain.as_tool(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer_style': {'title': 'Answer Style', 'type': 'string'}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -428,19 +492,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 9,
|
||||
"id": "1dad8f8e",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.131904Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.131803Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.136797Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.136563Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"from langchain.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_core.callbacks import (\n",
|
||||
" AsyncCallbackManagerForToolRun,\n",
|
||||
" CallbackManagerForToolRun,\n",
|
||||
")\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"from pydantic import BaseModel\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CalculatorInput(BaseModel):\n",
|
||||
@@ -448,9 +519,11 @@
|
||||
" b: int = Field(description=\"second number\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Note: It's important that every field has type hints. BaseTool is a\n",
|
||||
"# Pydantic class and not having type hints can lead to unexpected behavior.\n",
|
||||
"class CustomCalculatorTool(BaseTool):\n",
|
||||
" name = \"Calculator\"\n",
|
||||
" description = \"useful for when you need to answer questions about math\"\n",
|
||||
" name: str = \"Calculator\"\n",
|
||||
" description: str = \"useful for when you need to answer questions about math\"\n",
|
||||
" args_schema: Type[BaseModel] = CalculatorInput\n",
|
||||
" return_direct: bool = True\n",
|
||||
"\n",
|
||||
@@ -477,9 +550,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 10,
|
||||
"id": "bb551c33",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.138074Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.138007Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.141360Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.141158Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -487,7 +567,7 @@
|
||||
"text": [
|
||||
"Calculator\n",
|
||||
"useful for when you need to answer questions about math\n",
|
||||
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
|
||||
"{'a': {'description': 'first number', 'title': 'A', 'type': 'integer'}, 'b': {'description': 'second number', 'title': 'B', 'type': 'integer'}}\n",
|
||||
"True\n",
|
||||
"6\n",
|
||||
"6\n"
|
||||
@@ -512,7 +592,7 @@
|
||||
"source": [
|
||||
"## How to create async tools\n",
|
||||
"\n",
|
||||
"LangChain Tools implement the [Runnable interface 🏃](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html).\n",
|
||||
"LangChain Tools implement the [Runnable interface 🏃](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html).\n",
|
||||
"\n",
|
||||
"All Runnables expose the `invoke` and `ainvoke` methods (as well as other methods like `batch`, `abatch`, `astream` etc).\n",
|
||||
"\n",
|
||||
@@ -528,9 +608,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 11,
|
||||
"id": "6615cb77-fd4c-4676-8965-f92cc71d4944",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.142587Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.142504Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.147205Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.146995Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -560,9 +647,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 12,
|
||||
"id": "bb2af583-eadd-41f4-a645-bf8748bd3dcd",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.148383Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.148307Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.152684Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.152486Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -605,9 +699,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 13,
|
||||
"id": "4ad0932c-8610-4278-8c57-f9218f654c8a",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.153849Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.153773Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.158312Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.158130Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -650,9 +751,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 14,
|
||||
"id": "7094c0e8-6192-4870-a942-aad5b5ae48fd",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.159440Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.159364Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.160922Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.160712Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import ToolException\n",
|
||||
@@ -673,9 +781,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 15,
|
||||
"id": "b4d22022-b105-4ccc-a15b-412cb9ea3097",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.162046Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.161968Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.165236Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.165052Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -683,7 +798,7 @@
|
||||
"'Error: There is no city by the name of foobar.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -707,9 +822,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 16,
|
||||
"id": "3fad1728-d367-4e1b-9b54-3172981271cf",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.166372Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.166294Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.169739Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.169553Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -717,7 +839,7 @@
|
||||
"\"There is no such city, but it's probably above 0K there!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -741,9 +863,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 17,
|
||||
"id": "ebfe7c1f-318d-4e58-99e1-f31e69473c46",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.170937Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.170859Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.174498Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.174304Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -751,7 +880,7 @@
|
||||
"'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`'"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -778,7 +907,7 @@
|
||||
"\n",
|
||||
"Sometimes there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns custom objects like Documents, we may want to pass some view or metadata about this output to the model without passing the raw output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n",
|
||||
"\n",
|
||||
"The Tool and [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n",
|
||||
"The Tool and [ToolMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-core >= 0.2.19``\n",
|
||||
"\n",
|
||||
@@ -791,9 +920,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 18,
|
||||
"id": "14905425-0334-43a0-9de9-5bcf622ede0e",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.175683Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.175605Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.178798Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.178601Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import random\n",
|
||||
@@ -820,9 +956,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 19,
|
||||
"id": "0f2e1528-404b-46e6-b87c-f0957c4b9217",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.179881Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.179807Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.182100Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.181940Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -830,7 +973,7 @@
|
||||
"'Successfully generated array of 10 random ints in [0, 9].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -849,17 +992,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 20,
|
||||
"id": "cc197777-26eb-46b3-a83b-c2ce116c6311",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.183238Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.183170Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.185752Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.185567Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[1, 4, 2, 5, 3, 9, 0, 4, 7, 7])"
|
||||
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[4, 8, 2, 4, 1, 0, 9, 5, 8, 1])"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -885,9 +1035,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 21,
|
||||
"id": "fe1a09d1-378b-4b91-bb5e-0697c3d7eb92",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.186884Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.186803Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.190718Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.190494Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
@@ -917,17 +1074,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 22,
|
||||
"id": "8c3d16f6-1c4a-48ab-b05a-38547c592e79",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:25:53.191872Z",
|
||||
"iopub.status.busy": "2024-09-10T20:25:53.191794Z",
|
||||
"iopub.status.idle": "2024-09-10T20:25:53.194396Z",
|
||||
"shell.execute_reply": "2024-09-10T20:25:53.194184Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.4277, 0.7578, 2.4871])"
|
||||
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.5566, 0.5134, 2.7914])"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
"A [comma-separated values (CSV)](https://en.wikipedia.org/wiki/Comma-separated_values) file is a delimited text file that uses a comma to separate values. Each line of the file is a data record. Each record consists of one or more fields, separated by commas.\n",
|
||||
"\n",
|
||||
"LangChain implements a [CSV Loader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.csv_loader.CSVLoader.html) that will load CSV files into a sequence of [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Each row of the CSV file is translated to one document."
|
||||
"LangChain implements a [CSV Loader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.csv_loader.CSVLoader.html) that will load CSV files into a sequence of [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Each row of the CSV file is translated to one document."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -88,7 +88,7 @@
|
||||
"source": [
|
||||
"## Specify a column to identify the document source\n",
|
||||
"\n",
|
||||
"The `\"source\"` key on [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) metadata can be set using a column of the CSV. Use the `source_column` argument to specify a source for the document created from each row. Otherwise `file_path` will be used as the source for all documents created from the CSV file.\n",
|
||||
"The `\"source\"` key on [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) metadata can be set using a column of the CSV. Use the `source_column` argument to specify a source for the document created from each row. Otherwise `file_path` will be used as the source for all documents created from the CSV file.\n",
|
||||
"\n",
|
||||
"This is useful when using documents loaded from CSV files for chains that answer questions using sources."
|
||||
]
|
||||
|
||||
@@ -63,7 +63,7 @@
|
||||
"* The `load` methods is a convenience method meant solely for prototyping work -- it just invokes `list(self.lazy_load())`.\n",
|
||||
"* The `alazy_load` has a default implementation that will delegate to `lazy_load`. If you're using async, we recommend overriding the default implementation and providing a native async implementation.\n",
|
||||
"\n",
|
||||
"::: {.callout-important}\n",
|
||||
":::{.callout-important}\n",
|
||||
"When implementing a document loader do **NOT** provide parameters via the `lazy_load` or `alazy_load` methods.\n",
|
||||
"\n",
|
||||
"All configuration is expected to be passed through the initializer (__init__). This was a design choice made by LangChain to make sure that once a document loader has been instantiated it has all the information needed to load documents.\n",
|
||||
@@ -235,7 +235,7 @@
|
||||
"id": "56cb443e-f987-4386-b4ec-975ee129adb2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"::: {.callout-tip}\n",
|
||||
":::{.callout-tip}\n",
|
||||
"\n",
|
||||
"`load()` can be helpful in an interactive environment such as a jupyter notebook.\n",
|
||||
"\n",
|
||||
@@ -276,7 +276,7 @@
|
||||
"source": [
|
||||
"## Working with Files\n",
|
||||
"\n",
|
||||
"Many document loaders invovle parsing files. The difference between such loaders usually stems from how the file is parsed rather than how the file is loaded. For example, you can use `open` to read the binary content of either a PDF or a markdown file, but you need different parsing logic to convert that binary data into text.\n",
|
||||
"Many document loaders involve parsing files. The difference between such loaders usually stems from how the file is parsed, rather than how the file is loaded. For example, you can use `open` to read the binary content of either a PDF or a markdown file, but you need different parsing logic to convert that binary data into text.\n",
|
||||
"\n",
|
||||
"As a result, it can be helpful to decouple the parsing logic from the loading logic, which makes it easier to re-use a given parser regardless of how the data was loaded.\n",
|
||||
"\n",
|
||||
@@ -355,7 +355,7 @@
|
||||
"id": "433bfb7c-7767-43bc-b71e-42413d7494a8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Using the **blob** API also allows one to load content direclty from memory without having to read it from a file!"
|
||||
"Using the **blob** API also allows one to load content directly from memory without having to read it from a file!"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# How to load documents from a directory\n",
|
||||
"\n",
|
||||
"LangChain's [DirectoryLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.directory.DirectoryLoader.html) implements functionality for reading files from disk into LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Here we demonstrate:\n",
|
||||
"LangChain's [DirectoryLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.directory.DirectoryLoader.html) implements functionality for reading files from disk into LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Here we demonstrate:\n",
|
||||
"\n",
|
||||
"- How to load from a filesystem, including use of wildcard patterns;\n",
|
||||
"- How to use multithreading for file I/O;\n",
|
||||
@@ -134,7 +134,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Change loader class\n",
|
||||
"By default this uses the `UnstructuredLoader` class. To customize the loader, specify the loader class in the `loader_cls` kwarg. Below we show an example using [TextLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.text.TextLoader.html):"
|
||||
"By default this uses the `UnstructuredLoader` class. To customize the loader, specify the loader class in the `loader_cls` kwarg. Below we show an example using [TextLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.text.TextLoader.html):"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
"The HyperText Markup Language or [HTML](https://en.wikipedia.org/wiki/HTML) is the standard markup language for documents designed to be displayed in a web browser.\n",
|
||||
"\n",
|
||||
"This covers how to load `HTML` documents into a LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream.\n",
|
||||
"This covers how to load `HTML` documents into a LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream.\n",
|
||||
"\n",
|
||||
"Parsing HTML files often requires specialized tools. Here we demonstrate parsing via [Unstructured](https://unstructured-io.github.io/unstructured/) and [BeautifulSoup4](https://beautiful-soup-4.readthedocs.io/en/latest/), which can be installed via pip. Head over to the integrations page to find integrations with additional services, such as [Azure AI Document Intelligence](/docs/integrations/document_loaders/azure_document_intelligence) or [FireCrawl](/docs/integrations/document_loaders/firecrawl).\n",
|
||||
"\n",
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
[JSON Lines](https://jsonlines.org/) is a file format where each line is a valid JSON value.
|
||||
|
||||
LangChain implements a [JSONLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html)
|
||||
to convert JSON and JSONL data into LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document)
|
||||
LangChain implements a [JSONLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html)
|
||||
to convert JSON and JSONL data into LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document)
|
||||
objects. It uses a specified [jq schema](https://en.wikipedia.org/wiki/Jq_(programming_language)) to parse the JSON files, allowing for the extraction of specific fields into the content
|
||||
and metadata of the LangChain Document.
|
||||
|
||||
@@ -182,7 +182,7 @@ pprint(data)
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
Another option is set `jq_schema='.'` and provide `content_key`:
|
||||
Another option is to set `jq_schema='.'` and provide `content_key`:
|
||||
|
||||
```python
|
||||
loader = JSONLoader(
|
||||
|
||||
@@ -9,14 +9,14 @@
|
||||
"\n",
|
||||
"[Markdown](https://en.wikipedia.org/wiki/Markdown) is a lightweight markup language for creating formatted text using a plain-text editor.\n",
|
||||
"\n",
|
||||
"Here we cover how to load `Markdown` documents into LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream.\n",
|
||||
"Here we cover how to load `Markdown` documents into LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream.\n",
|
||||
"\n",
|
||||
"We will cover:\n",
|
||||
"\n",
|
||||
"- Basic usage;\n",
|
||||
"- Parsing of Markdown into elements such as titles, list items, and text.\n",
|
||||
"\n",
|
||||
"LangChain implements an [UnstructuredMarkdownLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html) object which requires the [Unstructured](https://unstructured-io.github.io/unstructured/) package. First we install it:"
|
||||
"LangChain implements an [UnstructuredMarkdownLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html) object which requires the [Unstructured](https://unstructured-io.github.io/unstructured/) package. First we install it:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -26,7 +26,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install \"unstructured[md]\""
|
||||
"%pip install \"unstructured[md]\" nltk"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
The [Microsoft Office](https://www.office.com/) suite of productivity software includes Microsoft Word, Microsoft Excel, Microsoft PowerPoint, Microsoft Outlook, and Microsoft OneNote. It is available for Microsoft Windows and macOS operating systems. It is also available on Android and iOS.
|
||||
|
||||
This covers how to load commonly used file formats including `DOCX`, `XLSX` and `PPTX` documents into a LangChain
|
||||
[Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document)
|
||||
[Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document)
|
||||
object that we can use downstream.
|
||||
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -8,7 +8,7 @@ The Embeddings class is a class designed for interfacing with text embedding mod
|
||||
|
||||
Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.
|
||||
|
||||
The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former, `.embed_documents`, takes as input multiple texts, while the latter, `.embed_query`, takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).
|
||||
The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former, `.embed_documents`, takes as input multiple texts, while the latter, `.embed_query`, takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).
|
||||
`.embed_query` will return a list of floats, whereas `.embed_documents` returns a list of lists of floats.
|
||||
|
||||
## Get started
|
||||
@@ -94,15 +94,6 @@ from langchain_huggingface import HuggingFaceEmbeddings
|
||||
|
||||
embeddings_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
|
||||
```
|
||||
|
||||
You can also leave the `model_name` blank to use the default [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) model.
|
||||
|
||||
```python
|
||||
from langchain_huggingface import HuggingFaceEmbeddings
|
||||
|
||||
embeddings_model = HuggingFaceEmbeddings()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"source": [
|
||||
"# How to combine results from multiple retrievers\n",
|
||||
"\n",
|
||||
"The [EnsembleRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n",
|
||||
"The [EnsembleRetriever](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://python.langchain.com/api_reference/core/retrievers/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n",
|
||||
"\n",
|
||||
"By leveraging the strengths of different algorithms, the `EnsembleRetriever` can achieve better performance than any single algorithm. \n",
|
||||
"\n",
|
||||
@@ -14,7 +14,7 @@
|
||||
"\n",
|
||||
"## Basic usage\n",
|
||||
"\n",
|
||||
"Below we demonstrate ensembling of a [BM25Retriever](https://api.python.langchain.com/en/latest/retrievers/langchain_community.retrievers.bm25.BM25Retriever.html) with a retriever derived from the [FAISS vector store](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html)."
|
||||
"Below we demonstrate ensembling of a [BM25Retriever](https://python.langchain.com/api_reference/community/retrievers/langchain_community.retrievers.bm25.BM25Retriever.html) with a retriever derived from the [FAISS vector store](https://python.langchain.com/api_reference/community/vectorstores/langchain_community.vectorstores.faiss.FAISS.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
353
docs/docs/how_to/example_selectors_langsmith.ipynb
Normal file
353
docs/docs/how_to/example_selectors_langsmith.ipynb
Normal file
@@ -0,0 +1,353 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4f7e423b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to select examples from a LangSmith dataset\n",
|
||||
"\n",
|
||||
"import Prerequisites from \"@theme/Prerequisites\";\n",
|
||||
"import Compatibility from \"@theme/Compatibility\";\n",
|
||||
"\n",
|
||||
"<Prerequisites titlesAndLinks={[\n",
|
||||
" [\"Chat models\", \"/docs/concepts/#chat-models\"],\n",
|
||||
" [\"Few-shot-prompting\", \"/docs/concepts/#few-shot-prompting\"],\n",
|
||||
" [\"LangSmith\", \"/docs/concepts/#langsmith\"],\n",
|
||||
"]} />\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"<Compatibility packagesAndVersions={[\n",
|
||||
" [\"langsmith\", \"0.1.101\"],\n",
|
||||
" [\"langchain-core\", \"0.2.34\"],\n",
|
||||
"]} />\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"LangSmith datasets have built-in support for similarity search, making them a great tool for building and querying few-shot examples.\n",
|
||||
"\n",
|
||||
"In this guide we'll see how to use an indexed LangSmith dataset as a few-shot example selector.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Before getting started make sure you've [created a LangSmith account](https://smith.langchain.com/) and set your credentials:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "85445e0e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Set LangSmith API key:\n",
|
||||
"\n",
|
||||
"········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.environ.get(\"LANGSMITH_API_KEY\"):\n",
|
||||
" os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Set LangSmith API key:\\n\\n\")\n",
|
||||
"\n",
|
||||
"os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ca899e29",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We'll need to install the `langsmith` SDK. In this example we'll also make use of `langchain`, `langchain-openai`, and `langchain-benchmarks`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b4fa7810",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU \"langsmith>=0.1.101\" \"langchain-core>=0.2.34\" langchain langchain-openai langchain-benchmarks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fc716e12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we'll clone a public dataset and turn on indexing for the dataset. We can also turn on indexing via the [LangSmith UI](https://docs.smith.langchain.com/how_to_guides/datasets/index_datasets_for_dynamic_few_shot_example_selection).\n",
|
||||
"\n",
|
||||
"We'll clone the [Multiverse math few shot example dataset](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance/).\n",
|
||||
"\n",
|
||||
"This enables searching over the dataset and will make sure that anytime we update/add examples they are also indexed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cf53d280",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langsmith import Client as LangSmith\n",
|
||||
"\n",
|
||||
"ls_client = LangSmith()\n",
|
||||
"\n",
|
||||
"dataset_name = \"multiverse-math-few-shot-examples-v2\"\n",
|
||||
"dataset_public_url = (\n",
|
||||
" \"https://smith.langchain.com/public/620596ee-570b-4d2b-8c8f-f828adbe5242/d\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"ls_client.clone_public_dataset(dataset_public_url)\n",
|
||||
"\n",
|
||||
"dataset_id = ls_client.read_dataset(dataset_name=dataset_name).id\n",
|
||||
"\n",
|
||||
"ls_client.index_dataset(dataset_id=dataset_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5767d171",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Querying dataset\n",
|
||||
"\n",
|
||||
"Indexing can take a few seconds. Once the dataset is indexed, we can search for similar examples. Note that the input to the `similar_examples` method must have the same schema as the examples inputs. In this case our example inputs are a dictionary with a \"question\" key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "5013a56f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"3"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"examples = ls_client.similar_examples(\n",
|
||||
" {\"question\": \"whats the negation of the negation of the negation of 3\"},\n",
|
||||
" limit=3,\n",
|
||||
" dataset_id=dataset_id,\n",
|
||||
")\n",
|
||||
"len(examples)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "a142db06",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'evaluate the negation of -100'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"examples[0].inputs[\"question\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d2627125",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For this dataset, the outputs are the conversation that followed the question in OpenAI message format:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "af5b9191",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'role': 'assistant',\n",
|
||||
" 'content': None,\n",
|
||||
" 'tool_calls': [{'id': 'toolu_01HTpq4cYNUac6F7omUc2Wz3',\n",
|
||||
" 'type': 'function',\n",
|
||||
" 'function': {'name': 'negate', 'arguments': '{\"a\": -100}'}}]},\n",
|
||||
" {'role': 'tool',\n",
|
||||
" 'content': '-100.0',\n",
|
||||
" 'tool_call_id': 'toolu_01HTpq4cYNUac6F7omUc2Wz3'},\n",
|
||||
" {'role': 'assistant', 'content': 'So the answer is 100.'},\n",
|
||||
" {'role': 'user',\n",
|
||||
" 'content': '100 is incorrect. Please refer to the output of your tool call.'},\n",
|
||||
" {'role': 'assistant',\n",
|
||||
" 'content': [{'text': \"You're right, my previous answer was incorrect. Let me re-evaluate using the tool output:\",\n",
|
||||
" 'type': 'text'}],\n",
|
||||
" 'tool_calls': [{'id': 'toolu_01XsJQboYghGDygQpPjJkeRq',\n",
|
||||
" 'type': 'function',\n",
|
||||
" 'function': {'name': 'negate', 'arguments': '{\"a\": -100}'}}]},\n",
|
||||
" {'role': 'tool',\n",
|
||||
" 'content': '-100.0',\n",
|
||||
" 'tool_call_id': 'toolu_01XsJQboYghGDygQpPjJkeRq'},\n",
|
||||
" {'role': 'assistant', 'content': 'The answer is -100.0'},\n",
|
||||
" {'role': 'user',\n",
|
||||
" 'content': 'You have the correct numerical answer but are returning additional text. Please only respond with the numerical answer.'},\n",
|
||||
" {'role': 'assistant', 'content': '-100.0'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"examples[0].outputs[\"conversation\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e852c8ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating dynamic few-shot prompts\n",
|
||||
"\n",
|
||||
"The search returns the examples whose inputs are most similar to the query input. We can use this for few-shot prompting a model like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "12cba1e1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"from langchain_benchmarks.tool_usage.tasks.multiverse_math import (\n",
|
||||
" add,\n",
|
||||
" cos,\n",
|
||||
" divide,\n",
|
||||
" log,\n",
|
||||
" multiply,\n",
|
||||
" negate,\n",
|
||||
" pi,\n",
|
||||
" power,\n",
|
||||
" sin,\n",
|
||||
" subtract,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langsmith import AsyncClient as AsyncLangSmith\n",
|
||||
"\n",
|
||||
"async_ls_client = AsyncLangSmith()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def similar_examples(input_: dict) -> dict:\n",
|
||||
" examples = ls_client.similar_examples(input_, limit=5, dataset_id=dataset_id)\n",
|
||||
" return {**input_, \"examples\": examples}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def asimilar_examples(input_: dict) -> dict:\n",
|
||||
" examples = await async_ls_client.similar_examples(\n",
|
||||
" input_, limit=5, dataset_id=dataset_id\n",
|
||||
" )\n",
|
||||
" return {**input_, \"examples\": examples}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def construct_prompt(input_: dict) -> list:\n",
|
||||
" instructions = \"\"\"You are great at using mathematical tools.\"\"\"\n",
|
||||
" examples = []\n",
|
||||
" for ex in input_[\"examples\"]:\n",
|
||||
" examples.append({\"role\": \"user\", \"content\": ex.inputs[\"question\"]})\n",
|
||||
" for msg in ex.outputs[\"conversation\"]:\n",
|
||||
" if msg[\"role\"] == \"assistant\":\n",
|
||||
" msg[\"name\"] = \"example_assistant\"\n",
|
||||
" if msg[\"role\"] == \"user\":\n",
|
||||
" msg[\"name\"] = \"example_user\"\n",
|
||||
" examples.append(msg)\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": instructions},\n",
|
||||
" *examples,\n",
|
||||
" {\"role\": \"user\", \"content\": input_[\"question\"]},\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, cos, divide, log, multiply, negate, pi, power, sin, subtract]\n",
|
||||
"llm = init_chat_model(\"gpt-4o-2024-08-06\")\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)\n",
|
||||
"\n",
|
||||
"example_selector = RunnableLambda(func=similar_examples, afunc=asimilar_examples)\n",
|
||||
"\n",
|
||||
"chain = example_selector | construct_prompt | llm_with_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "c423b367",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'negate',\n",
|
||||
" 'args': {'a': 3},\n",
|
||||
" 'id': 'call_uMSdoTl6ehfHh5a6JQUb2NoZ',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg = await chain.ainvoke({\"question\": \"whats the negation of the negation of 3\"})\n",
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "94489b4a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace, we can see that relevant examples were pulled in in the `similar_examples` step and passed as messages to ChatOpenAI: https://smith.langchain.com/public/9585e30f-765a-4ed9-b964-2211420cd2f8/r/fdea98d6-e90f-49d4-ac22-dfd012e9e0d9."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -16,11 +16,11 @@
|
||||
"also with JSON more or prompt based techniques.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"LangChain implements a [tool-call attribute](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.tool_calls) on messages from LLMs that include tool calls. See our [how-to guide on tool calling](/docs/how_to/tool_calling) for more detail. To build reference examples for data extraction, we build a chat history containing a sequence of: \n",
|
||||
"LangChain implements a [tool-call attribute](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.tool_calls) on messages from LLMs that include tool calls. See our [how-to guide on tool calling](/docs/how_to/tool_calling) for more detail. To build reference examples for data extraction, we build a chat history containing a sequence of: \n",
|
||||
"\n",
|
||||
"- [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html) containing example inputs;\n",
|
||||
"- [AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) containing example tool calls;\n",
|
||||
"- [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) containing example tool outputs.\n",
|
||||
"- [HumanMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.human.HumanMessage.html) containing example inputs;\n",
|
||||
"- [AIMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html) containing example tool calls;\n",
|
||||
"- [ToolMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.tool.ToolMessage.html) containing example tool outputs.\n",
|
||||
"\n",
|
||||
"LangChain adopts this convention for structuring tool calls into conversation across LLM model providers.\n",
|
||||
"\n",
|
||||
@@ -29,9 +29,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "89579144-bcb3-490a-8036-86a0a6bcd56b",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:26:41.780410Z",
|
||||
"iopub.status.busy": "2024-09-10T20:26:41.780102Z",
|
||||
"iopub.status.idle": "2024-09-10T20:26:42.147112Z",
|
||||
"shell.execute_reply": "2024-09-10T20:26:42.146838Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
@@ -67,17 +74,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "610c3025-ea63-4cd7-88bd-c8cbcb4d8a3f",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:26:42.148746Z",
|
||||
"iopub.status.busy": "2024-09-10T20:26:42.148621Z",
|
||||
"iopub.status.idle": "2024-09-10T20:26:42.162044Z",
|
||||
"shell.execute_reply": "2024-09-10T20:26:42.161794Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatPromptValue(messages=[SystemMessage(content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\"), HumanMessage(content='testing 1 2 3'), HumanMessage(content='this is some text')])"
|
||||
"ChatPromptValue(messages=[SystemMessage(content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\", additional_kwargs={}, response_metadata={}), HumanMessage(content='testing 1 2 3', additional_kwargs={}, response_metadata={}), HumanMessage(content='this is some text', additional_kwargs={}, response_metadata={})])"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -104,15 +118,22 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"id": "d875a49a-d2cb-4b9e-b5bf-41073bc3905c",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:26:42.163477Z",
|
||||
"iopub.status.busy": "2024-09-10T20:26:42.163391Z",
|
||||
"iopub.status.idle": "2024-09-10T20:26:42.324449Z",
|
||||
"shell.execute_reply": "2024-09-10T20:26:42.324206Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Person(BaseModel):\n",
|
||||
@@ -162,9 +183,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"id": "08356810-77ce-4e68-99d9-faa0326f2cee",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:26:42.326100Z",
|
||||
"iopub.status.busy": "2024-09-10T20:26:42.326016Z",
|
||||
"iopub.status.idle": "2024-09-10T20:26:42.329260Z",
|
||||
"shell.execute_reply": "2024-09-10T20:26:42.329014Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
@@ -177,7 +205,7 @@
|
||||
" SystemMessage,\n",
|
||||
" ToolMessage,\n",
|
||||
")\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Example(TypedDict):\n",
|
||||
@@ -238,9 +266,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"id": "7f59a745-5c81-4011-a4c5-a33ec1eca7ef",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:26:42.330580Z",
|
||||
"iopub.status.busy": "2024-09-10T20:26:42.330488Z",
|
||||
"iopub.status.idle": "2024-09-10T20:26:42.332813Z",
|
||||
"shell.execute_reply": "2024-09-10T20:26:42.332598Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"examples = [\n",
|
||||
@@ -273,22 +308,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 6,
|
||||
"id": "976bb7b8-09c4-4a3e-80df-49a483705c08",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:26:42.333955Z",
|
||||
"iopub.status.busy": "2024-09-10T20:26:42.333876Z",
|
||||
"iopub.status.idle": "2024-09-10T20:26:42.336841Z",
|
||||
"shell.execute_reply": "2024-09-10T20:26:42.336635Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"system: content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\"\n",
|
||||
"human: content=\"The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.\"\n",
|
||||
"ai: content='' tool_calls=[{'name': 'Person', 'args': {'name': None, 'hair_color': None, 'height_in_meters': None}, 'id': 'b843ba77-4c9c-48ef-92a4-54e534f24521'}]\n",
|
||||
"tool: content='You have correctly called this tool.' tool_call_id='b843ba77-4c9c-48ef-92a4-54e534f24521'\n",
|
||||
"human: content='Fiona traveled far from France to Spain.'\n",
|
||||
"ai: content='' tool_calls=[{'name': 'Person', 'args': {'name': 'Fiona', 'hair_color': None, 'height_in_meters': None}, 'id': '46f00d6b-50e5-4482-9406-b07bb10340f6'}]\n",
|
||||
"tool: content='You have correctly called this tool.' tool_call_id='46f00d6b-50e5-4482-9406-b07bb10340f6'\n",
|
||||
"human: content='this is some text'\n"
|
||||
"system: content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\" additional_kwargs={} response_metadata={}\n",
|
||||
"human: content=\"The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.\" additional_kwargs={} response_metadata={}\n",
|
||||
"ai: content='' additional_kwargs={} response_metadata={} tool_calls=[{'name': 'Data', 'args': {'people': []}, 'id': '240159b1-1405-4107-a07c-3c6b91b3d5b7', 'type': 'tool_call'}]\n",
|
||||
"tool: content='You have correctly called this tool.' tool_call_id='240159b1-1405-4107-a07c-3c6b91b3d5b7'\n",
|
||||
"human: content='Fiona traveled far from France to Spain.' additional_kwargs={} response_metadata={}\n",
|
||||
"ai: content='' additional_kwargs={} response_metadata={} tool_calls=[{'name': 'Data', 'args': {'people': [{'name': 'Fiona', 'hair_color': None, 'height_in_meters': None}]}, 'id': '3fc521e4-d1d2-4c20-bf40-e3d72f1068da', 'type': 'tool_call'}]\n",
|
||||
"tool: content='You have correctly called this tool.' tool_call_id='3fc521e4-d1d2-4c20-bf40-e3d72f1068da'\n",
|
||||
"human: content='this is some text' additional_kwargs={} response_metadata={}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -320,9 +362,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 7,
|
||||
"id": "df2e1ee1-69e8-4c4d-b349-95f2e320317b",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:26:42.338001Z",
|
||||
"iopub.status.busy": "2024-09-10T20:26:42.337915Z",
|
||||
"iopub.status.idle": "2024-09-10T20:26:42.349121Z",
|
||||
"shell.execute_reply": "2024-09-10T20:26:42.348908Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
@@ -343,9 +392,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 8,
|
||||
"id": "dbfea43d-769b-42e9-a76f-ce722f7d6f93",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:26:42.350335Z",
|
||||
"iopub.status.busy": "2024-09-10T20:26:42.350264Z",
|
||||
"iopub.status.idle": "2024-09-10T20:26:42.424894Z",
|
||||
"shell.execute_reply": "2024-09-10T20:26:42.424623Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"runnable = prompt | llm.with_structured_output(\n",
|
||||
@@ -367,18 +423,49 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 9,
|
||||
"id": "66545cab-af2a-40a4-9dc9-b4110458b7d3",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:26:42.426258Z",
|
||||
"iopub.status.busy": "2024-09-10T20:26:42.426187Z",
|
||||
"iopub.status.idle": "2024-09-10T20:26:46.151633Z",
|
||||
"shell.execute_reply": "2024-09-10T20:26:46.150690Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"people=[Person(name='earth', hair_color='null', height_in_meters='null')]\n",
|
||||
"people=[Person(name='earth', hair_color='null', height_in_meters='null')]\n",
|
||||
"people=[]\n",
|
||||
"people=[Person(name='earth', hair_color='null', height_in_meters='null')]\n",
|
||||
"people=[]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"people=[]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"people=[]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"people=[]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"people=[]\n"
|
||||
]
|
||||
}
|
||||
@@ -401,18 +488,49 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 10,
|
||||
"id": "1c09d805-ec16-4123-aef9-6a5b59499b5c",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:26:46.155346Z",
|
||||
"iopub.status.busy": "2024-09-10T20:26:46.155110Z",
|
||||
"iopub.status.idle": "2024-09-10T20:26:51.810359Z",
|
||||
"shell.execute_reply": "2024-09-10T20:26:51.809636Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"people=[]\n",
|
||||
"people=[]\n",
|
||||
"people=[]\n",
|
||||
"people=[]\n",
|
||||
"people=[]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"people=[]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"people=[]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"people=[]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"people=[]\n"
|
||||
]
|
||||
}
|
||||
@@ -435,9 +553,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 11,
|
||||
"id": "a9b7a762-1b75-4f9f-b9d9-6732dd05802c",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:26:51.813309Z",
|
||||
"iopub.status.busy": "2024-09-10T20:26:51.813150Z",
|
||||
"iopub.status.idle": "2024-09-10T20:26:53.474153Z",
|
||||
"shell.execute_reply": "2024-09-10T20:26:53.473522Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -445,7 +570,7 @@
|
||||
"Data(people=[Person(name='Harrison', hair_color='black', height_in_meters=None)])"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -476,7 +601,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -23,16 +23,56 @@
|
||||
"id": "57969139-ad0a-487e-97d8-cb30e2af9742",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"We need some example data! Let's download an article about [cars from wikipedia](https://en.wikipedia.org/wiki/Car) and load it as a LangChain [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html)."
|
||||
"First we'll install the dependencies needed for this guide:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "84460db2-36e1-4037-bfa6-2a11883c2ba5",
|
||||
"id": "a3b4d838-5be4-4207-8a4a-9ef5624c48f2",
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:19.850767Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:19.850427Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:21.432233Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:21.431606Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community lxml faiss-cpu langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ac000b03-33fc-414f-8f2c-3850df621a35",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we need some example data! Let's download an article about [cars from wikipedia](https://en.wikipedia.org/wiki/Car) and load it as a LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "84460db2-36e1-4037-bfa6-2a11883c2ba5",
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:21.434882Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:21.434571Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:22.214545Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:22.214253Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import re\n",
|
||||
@@ -55,15 +95,22 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"id": "fcb6917b-123d-4630-a0ce-ed8b293d482d",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:22.216143Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:22.216039Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:22.218117Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:22.217854Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"79174\n"
|
||||
"80427\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -87,13 +134,20 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "a3b288ed-87a6-4af0-aac8-20921dc370d4",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:22.219468Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:22.219395Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:22.340594Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:22.340319Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class KeyDevelopment(BaseModel):\n",
|
||||
@@ -156,7 +210,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "109f4f05-d0ff-431d-93d9-8f5aa34979a6",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:22.342277Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:22.342171Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:22.532302Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:22.532034Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
@@ -171,7 +232,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "aa4ae224-6d3d-4fe2-b210-7db19a9fe580",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:22.533795Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:22.533708Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:22.610573Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:22.610307Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"extractor = prompt | llm.with_structured_output(\n",
|
||||
@@ -194,7 +262,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "27b8a373-14b3-45ea-8bf5-9749122ad927",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:22.612123Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:22.612052Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:22.753493Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:22.753179Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_text_splitters import TokenTextSplitter\n",
|
||||
@@ -214,7 +289,7 @@
|
||||
"id": "5b43d7e0-3c85-4d97-86c7-e8c984b60b0a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Use [batch](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) functionality to run the extraction in **parallel** across each chunk! \n",
|
||||
"Use [batch](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html) functionality to run the extraction in **parallel** across each chunk! \n",
|
||||
"\n",
|
||||
":::{.callout-tip}\n",
|
||||
"You can often use .batch() to parallelize the extractions! `.batch` uses a threadpool under the hood to help you parallelize workloads.\n",
|
||||
@@ -227,7 +302,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "6ba766b5-8d6c-48e6-8d69-f391a66b65d2",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:22.755067Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:22.754987Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:36.691130Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:36.690500Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Limit just to the first 3 chunks\n",
|
||||
@@ -254,21 +336,27 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "c3f77470-ce6c-477f-8957-650913218632",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:36.694799Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:36.694458Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:36.701416Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:36.700993Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[KeyDevelopment(year=1966, description='The Toyota Corolla began production, becoming the best-selling series of automobile in history.', evidence='The Toyota Corolla, which has been in production since 1966, is the best-selling series of automobile in history.'),\n",
|
||||
" KeyDevelopment(year=1769, description='Nicolas-Joseph Cugnot built the first steam-powered road vehicle.', evidence='The French inventor Nicolas-Joseph Cugnot built the first steam-powered road vehicle in 1769.'),\n",
|
||||
" KeyDevelopment(year=1808, description='François Isaac de Rivaz designed and constructed the first internal combustion-powered automobile.', evidence='the Swiss inventor François Isaac de Rivaz designed and constructed the first internal combustion-powered automobile in 1808.'),\n",
|
||||
" KeyDevelopment(year=1886, description='Carl Benz patented his Benz Patent-Motorwagen, inventing the modern car.', evidence='The modern car—a practical, marketable automobile for everyday use—was invented in 1886, when the German inventor Carl Benz patented his Benz Patent-Motorwagen.'),\n",
|
||||
" KeyDevelopment(year=1908, description='Ford Model T, one of the first cars affordable by the masses, began production.', evidence='One of the first cars affordable by the masses was the Ford Model T, begun in 1908, an American car manufactured by the Ford Motor Company.'),\n",
|
||||
" KeyDevelopment(year=1888, description=\"Bertha Benz undertook the first road trip by car to prove the road-worthiness of her husband's invention.\", evidence=\"In August 1888, Bertha Benz, the wife of Carl Benz, undertook the first road trip by car, to prove the road-worthiness of her husband's invention.\"),\n",
|
||||
"[KeyDevelopment(year=1769, description='Nicolas-Joseph Cugnot built the first full-scale, self-propelled mechanical vehicle, a steam-powered tricycle.', evidence='Nicolas-Joseph Cugnot is widely credited with building the first full-scale, self-propelled mechanical vehicle in about 1769; he created a steam-powered tricycle.'),\n",
|
||||
" KeyDevelopment(year=1807, description=\"Nicéphore Niépce and his brother Claude created what was probably the world's first internal combustion engine.\", evidence=\"In 1807, Nicéphore Niépce and his brother Claude created what was probably the world's first internal combustion engine (which they called a Pyréolophore), but installed it in a boat on the river Saone in France.\"),\n",
|
||||
" KeyDevelopment(year=1886, description='Carl Benz patented the Benz Patent-Motorwagen, marking the birth of the modern car.', evidence='In November 1881, French inventor Gustave Trouvé demonstrated a three-wheeled car powered by electricity at the International Exposition of Electricity. Although several other German engineers (including Gottlieb Daimler, Wilhelm Maybach, and Siegfried Marcus) were working on cars at about the same time, the year 1886 is regarded as the birth year of the modern car—a practical, marketable automobile for everyday use—when the German Carl Benz patented his Benz Patent-Motorwagen; he is generally acknowledged as the inventor of the car.'),\n",
|
||||
" KeyDevelopment(year=1886, description='Carl Benz began promotion of his vehicle, marking the introduction of the first commercially available automobile.', evidence='Benz began promotion of the vehicle on 3 July 1886.'),\n",
|
||||
" KeyDevelopment(year=1888, description=\"Bertha Benz undertook the first road trip by car to prove the road-worthiness of her husband's invention.\", evidence=\"In August 1888, Bertha Benz, the wife and business partner of Carl Benz, undertook the first road trip by car, to prove the road-worthiness of her husband's invention.\"),\n",
|
||||
" KeyDevelopment(year=1896, description='Benz designed and patented the first internal-combustion flat engine, called boxermotor.', evidence='In 1896, Benz designed and patented the first internal-combustion flat engine, called boxermotor.'),\n",
|
||||
" KeyDevelopment(year=1897, description='Nesselsdorfer Wagenbau produced the Präsident automobil, one of the first factory-made cars in the world.', evidence='The first motor car in central Europe and one of the first factory-made cars in the world, was produced by Czech company Nesselsdorfer Wagenbau (later renamed to Tatra) in 1897, the Präsident automobil.'),\n",
|
||||
" KeyDevelopment(year=1890, description='Daimler Motoren Gesellschaft (DMG) was founded by Daimler and Maybach in Cannstatt.', evidence='Daimler and Maybach founded Daimler Motoren Gesellschaft (DMG) in Cannstatt in 1890.'),\n",
|
||||
" KeyDevelopment(year=1891, description='Auguste Doriot and Louis Rigoulot completed the longest trip by a petrol-driven vehicle with a Daimler powered Peugeot Type 3.', evidence='In 1891, Auguste Doriot and his Peugeot colleague Louis Rigoulot completed the longest trip by a petrol-driven vehicle when their self-designed and built Daimler powered Peugeot Type 3 completed 2,100 kilometres (1,300 mi) from Valentigney to Paris and Brest and back again.')]"
|
||||
" KeyDevelopment(year=1897, description='The first motor car in central Europe and one of the first factory-made cars in the world, the Präsident automobil, was produced by Nesselsdorfer Wagenbau.', evidence='The first motor car in central Europe and one of the first factory-made cars in the world, was produced by Czech company Nesselsdorfer Wagenbau (later renamed to Tatra) in 1897, the Präsident automobil.'),\n",
|
||||
" KeyDevelopment(year=1901, description='Ransom Olds started large-scale, production-line manufacturing of affordable cars at his Oldsmobile factory in Lansing, Michigan.', evidence='Large-scale, production-line manufacturing of affordable cars was started by Ransom Olds in 1901 at his Oldsmobile factory in Lansing, Michigan.'),\n",
|
||||
" KeyDevelopment(year=1913, description=\"Henry Ford introduced the world's first moving assembly line for cars at the Highland Park Ford Plant.\", evidence=\"This concept was greatly expanded by Henry Ford, beginning in 1913 with the world's first moving assembly line for cars at the Highland Park Ford Plant.\")]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
@@ -315,7 +403,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "aaf37c82-625b-4fa1-8e88-73303f08ac16",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:36.703897Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:36.703718Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:38.451523Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:38.450925Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
@@ -344,7 +439,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "47aad00b-7013-4f7f-a1b0-02ef269093bf",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:38.455094Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:38.454851Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:38.458315Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:38.457940Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"rag_extractor = {\n",
|
||||
@@ -356,7 +458,14 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "68f2de01-0cd8-456e-a959-db236189d41b",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:38.460115Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:38.459949Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:43.195532Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:43.194254Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"results = rag_extractor.invoke(\"Key developments associated with cars\")"
|
||||
@@ -366,15 +475,21 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "1788e2d6-77bb-417f-827c-eb96c035164e",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:43.200497Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:43.200037Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:43.206773Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:43.205426Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"year=1869 description='Mary Ward became one of the first documented car fatalities in Parsonstown, Ireland.' evidence='Mary Ward became one of the first documented car fatalities in 1869 in Parsonstown, Ireland,'\n",
|
||||
"year=1899 description=\"Henry Bliss one of the US's first pedestrian car casualties in New York City.\" evidence=\"Henry Bliss one of the US's first pedestrian car casualties in 1899 in New York City.\"\n",
|
||||
"year=2030 description='All fossil fuel vehicles will be banned in Amsterdam.' evidence='all fossil fuel vehicles will be banned in Amsterdam from 2030.'\n"
|
||||
"year=2006 description='Car-sharing services in the US experienced double-digit growth in revenue and membership.' evidence='in the US, some car-sharing services have experienced double-digit growth in revenue and membership growth between 2006 and 2007.'\n",
|
||||
"year=2020 description='56 million cars were manufactured worldwide, with China producing the most.' evidence='In 2020, there were 56 million cars manufactured worldwide, down from 67 million the previous year. The automotive industry in China produces by far the most (20 million in 2020).'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -416,7 +531,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -27,9 +27,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "25487939-8713-4ec7-b774-e4a761ac8298",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:44.442501Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:44.442044Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:44.872217Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:44.871897Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
@@ -62,16 +69,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "497eb023-c043-443d-ac62-2d4ea85fe1b0",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:44.873979Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:44.873840Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:44.878966Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:44.878718Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
|
||||
"from pydantic import BaseModel, Field, validator\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Person(BaseModel):\n",
|
||||
@@ -114,9 +128,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"id": "20b99ffb-a114-49a9-a7be-154c525f8ada",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:44.880355Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:44.880277Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:44.881834Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:44.881601Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"Anna is 23 years old and she is 6 feet tall\""
|
||||
@@ -124,9 +145,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"id": "4f3a66ce-de19-4571-9e54-67504ae3fba7",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:44.883138Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:44.883049Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:44.885139Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:44.884801Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -140,7 +168,7 @@
|
||||
"\n",
|
||||
"Here is the output schema:\n",
|
||||
"```\n",
|
||||
"{\"description\": \"Identifying information about all people in a text.\", \"properties\": {\"people\": {\"title\": \"People\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/Person\"}}}, \"required\": [\"people\"], \"definitions\": {\"Person\": {\"title\": \"Person\", \"description\": \"Information about a person.\", \"type\": \"object\", \"properties\": {\"name\": {\"title\": \"Name\", \"description\": \"The name of the person\", \"type\": \"string\"}, \"height_in_meters\": {\"title\": \"Height In Meters\", \"description\": \"The height of the person expressed in meters.\", \"type\": \"number\"}}, \"required\": [\"name\", \"height_in_meters\"]}}}\n",
|
||||
"{\"$defs\": {\"Person\": {\"description\": \"Information about a person.\", \"properties\": {\"name\": {\"description\": \"The name of the person\", \"title\": \"Name\", \"type\": \"string\"}, \"height_in_meters\": {\"description\": \"The height of the person expressed in meters.\", \"title\": \"Height In Meters\", \"type\": \"number\"}}, \"required\": [\"name\", \"height_in_meters\"], \"title\": \"Person\", \"type\": \"object\"}}, \"description\": \"Identifying information about all people in a text.\", \"properties\": {\"people\": {\"items\": {\"$ref\": \"#/$defs/Person\"}, \"title\": \"People\", \"type\": \"array\"}}, \"required\": [\"people\"]}\n",
|
||||
"```\n",
|
||||
"Human: Anna is 23 years old and she is 6 feet tall\n"
|
||||
]
|
||||
@@ -160,9 +188,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"id": "7e0041eb-37dc-4384-9fe3-6dd8c356371e",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:44.886765Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:44.886675Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:46.835960Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:46.835282Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -170,7 +205,7 @@
|
||||
"People(people=[Person(name='Anna', height_in_meters=1.83)])"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -202,16 +237,23 @@
|
||||
"\n",
|
||||
"If desired, it's easy to create a custom prompt and parser with `LangChain` and `LCEL`.\n",
|
||||
"\n",
|
||||
"To create a custom parser, define a function to parse the output from the model (typically an [AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html)) into an object of your choice.\n",
|
||||
"To create a custom parser, define a function to parse the output from the model (typically an [AIMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html)) into an object of your choice.\n",
|
||||
"\n",
|
||||
"See below for a simple implementation of a JSON parser."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 6,
|
||||
"id": "b1f11912-c1bb-4a2a-a482-79bf3996961f",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:46.839577Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:46.839233Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:46.849663Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:46.849177Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
@@ -221,7 +263,7 @@
|
||||
"from langchain_anthropic.chat_models import ChatAnthropic\n",
|
||||
"from langchain_core.messages import AIMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
|
||||
"from pydantic import BaseModel, Field, validator\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Person(BaseModel):\n",
|
||||
@@ -279,16 +321,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 7,
|
||||
"id": "9260d5e8-3b6c-4639-9f3b-fb2f90239e4b",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:46.851870Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:46.851698Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:46.854786Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:46.854424Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"System: Answer the user query. Output your answer as JSON that matches the given schema: ```json\n",
|
||||
"{'title': 'People', 'description': 'Identifying information about all people in a text.', 'type': 'object', 'properties': {'people': {'title': 'People', 'type': 'array', 'items': {'$ref': '#/definitions/Person'}}}, 'required': ['people'], 'definitions': {'Person': {'title': 'Person', 'description': 'Information about a person.', 'type': 'object', 'properties': {'name': {'title': 'Name', 'description': 'The name of the person', 'type': 'string'}, 'height_in_meters': {'title': 'Height In Meters', 'description': 'The height of the person expressed in meters.', 'type': 'number'}}, 'required': ['name', 'height_in_meters']}}}\n",
|
||||
"{'$defs': {'Person': {'description': 'Information about a person.', 'properties': {'name': {'description': 'The name of the person', 'title': 'Name', 'type': 'string'}, 'height_in_meters': {'description': 'The height of the person expressed in meters.', 'title': 'Height In Meters', 'type': 'number'}}, 'required': ['name', 'height_in_meters'], 'title': 'Person', 'type': 'object'}}, 'description': 'Identifying information about all people in a text.', 'properties': {'people': {'items': {'$ref': '#/$defs/Person'}, 'title': 'People', 'type': 'array'}}, 'required': ['people'], 'title': 'People', 'type': 'object'}\n",
|
||||
"```. Make sure to wrap the answer in ```json and ``` tags\n",
|
||||
"Human: Anna is 23 years old and she is 6 feet tall\n"
|
||||
]
|
||||
@@ -301,17 +350,32 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 8,
|
||||
"id": "c523301d-ae0e-45e3-b195-7fd28c67a5c4",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-09-10T20:35:46.856945Z",
|
||||
"iopub.status.busy": "2024-09-10T20:35:46.856769Z",
|
||||
"iopub.status.idle": "2024-09-10T20:35:48.373728Z",
|
||||
"shell.execute_reply": "2024-09-10T20:35:48.373079Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/bagatur/langchain/.venv/lib/python3.11/site-packages/pydantic/_internal/_fields.py:201: UserWarning: Field name \"schema\" in \"PromptInput\" shadows an attribute in parent \"BaseModel\"\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'people': [{'name': 'Anna', 'height_in_meters': 1.83}]}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -349,7 +413,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user